diff --git a/.github/workflows/autotools.yml b/.github/workflows/autotools.yml index 89afa405c43..d0cf5577738 100644 --- a/.github/workflows/autotools.yml +++ b/.github/workflows/autotools.yml @@ -52,3 +52,9 @@ jobs: uses: ./.github/workflows/intel-auto.yml with: build_mode: "production" + + call-release-auto-nvhpc: + name: "Autotools nvhpc Workflows" + uses: ./.github/workflows/nvhpc-auto.yml + with: + build_mode: "production" diff --git a/.github/workflows/cmake-ctest.yml b/.github/workflows/cmake-ctest.yml index da99e7e02cc..ee4bcff4de3 100644 --- a/.github/workflows/cmake-ctest.yml +++ b/.github/workflows/cmake-ctest.yml @@ -191,8 +191,12 @@ jobs: # symlinks the compiler executables to a common location - name: Setup GNU Fortran - uses: modflowpy/install-gfortran-action@v1 - + uses: fortran-lang/setup-fortran@v1 + id: setup-fortran + with: + compiler: gcc + version: 12 + - name: Run ctest (MacOS) id: run-ctest run: | diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml index 75180c0c048..84fe01094d9 100644 --- a/.github/workflows/cmake.yml +++ b/.github/workflows/cmake.yml @@ -44,3 +44,9 @@ jobs: uses: ./.github/workflows/intel-cmake.yml with: build_mode: "Release" + + call-release-cmake-nvhpc: + name: "CMake nvhpc Workflows" + uses: ./.github/workflows/nvhpc-cmake.yml + with: + build_mode: "Release" diff --git a/.github/workflows/cve.yml b/.github/workflows/cve.yml index 6756840981d..372518ade12 100644 --- a/.github/workflows/cve.yml +++ b/.github/workflows/cve.yml @@ -16,7 +16,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true permissions: diff --git a/.github/workflows/h5py.yml b/.github/workflows/h5py.yml index 10958441ab1..316a71d99b1 100644 --- a/.github/workflows/h5py.yml +++ b/.github/workflows/h5py.yml @@ -11,10 +11,11 @@ jobs: build: runs-on: ubuntu-latest steps: - - name: Install gfortran - run: | - sudo apt-get update - sudo apt-get install -y gfortran-12 + - name: Install Fortran + uses: fortran-lang/setup-fortran@v1 + with: + compiler: gcc + version: 13 - name: Checkout Spack uses: actions/checkout@v4 with: diff --git a/.github/workflows/hdfeos5.yml b/.github/workflows/hdfeos5.yml index 5faf74a9ac4..dad262d426f 100644 --- a/.github/workflows/hdfeos5.yml +++ b/.github/workflows/hdfeos5.yml @@ -16,7 +16,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true permissions: diff --git a/.github/workflows/intel-auto.yml b/.github/workflows/intel-auto.yml index f5249bdaf4a..d63262f28a1 100644 --- a/.github/workflows/intel-auto.yml +++ b/.github/workflows/intel-auto.yml @@ -13,14 +13,16 @@ permissions: jobs: Intel_build_and_test: - name: "Intel ${{ inputs.build_mode }} -Werror (build only)" + name: "Intel ${{ inputs.build_mode }}" runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - name: Install Dependencies - run: | + run: | sudo apt-get update sudo apt-get install autoconf automake libtool libtool-bin libaec-dev + - name: Add oneAPI to apt shell: bash run: | @@ -39,9 +41,9 @@ jobs: sudo apt install -y intel-oneapi-mpi-devel sudo apt-get install doxygen graphviz sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev - echo "CC=icx" >> $GITHUB_ENV - echo "CXX=icpx" >> $GITHUB_ENV - echo "FC=ifx" >> $GITHUB_ENV + echo "CC=icx" >> $GITHUB_ENV + echo "CXX=icpx" >> $GITHUB_ENV + echo "FC=ifx" >> $GITHUB_ENV - name: Install oneAPI MKL library shell: bash diff --git a/.github/workflows/intel-cmake.yml b/.github/workflows/intel-cmake.yml index 4bdda03c79d..9972376332e 100644 --- a/.github/workflows/intel-cmake.yml +++ b/.github/workflows/intel-cmake.yml @@ -14,7 +14,7 @@ permissions: jobs: Intel_build_and_test: - name: "Intel ${{ inputs.build_mode }} -Werror (build only)" + name: "Intel ${{ inputs.build_mode }}" runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -30,9 +30,9 @@ jobs: - name: Install Linux Dependencies run: | - sudo apt update - sudo apt-get install ninja-build doxygen graphviz - sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev - name: install oneAPI dpcpp and fortran compiler shell: bash diff --git a/.github/workflows/linux-auto-aocc-ompi.yml b/.github/workflows/linux-auto-aocc-ompi.yml index 76b6452226c..435d93f95c7 100644 --- a/.github/workflows/linux-auto-aocc-ompi.yml +++ b/.github/workflows/linux-auto-aocc-ompi.yml @@ -16,12 +16,12 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true permissions: contents: read - + jobs: build: runs-on: ubuntu-latest @@ -42,24 +42,31 @@ jobs: which clang which flang clang -v - - name: Install OpenMPI 4.1.5 + - name: Cache OpenMPI 4.1.5 installation + id: cache-openmpi-4_1_5 + uses: actions/cache@v3 + with: + path: /home/runner/work/hdf5/hdf5/openmpi-4.1.5-install + key: ${{ runner.os }}-${{ runner.arch }}-openmpi-4_1_5-cache + - if: ${{ steps.cache-openmpi-4_1_5.outputs.cache-hit != 'true' }} + name: Install OpenMPI 4.1.5 run: | export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/usr/local/lib wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.5.tar.gz tar zxvf openmpi-4.1.5.tar.gz cd openmpi-4.1.5 - ./configure CC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/clang FC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/flang --prefix=/usr/local + ./configure CC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/clang FC=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/bin/flang --prefix=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install make - sudo make install + make install - name: Install HDF5 env: - NPROCS: 2 + NPROCS: 2 run: | - export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/usr/local/lib/openmpi:/usr/local/lib - export LD_RUN_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/usr/local/lib/openmpi:/usr/local/lib - export PATH=/usr/local/bin:$PATH + export LD_LIBRARY_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib:/usr/local/lib + export LD_RUN_PATH=/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib:/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib:/usr/local/lib + export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/bin:/usr/local/bin:$PATH ./autogen.sh - ./configure --prefix=/tmp --enable-parallel --enable-shared CC=/usr/local/bin/mpicc LDFLAGS="-L/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib -L/usr/local/lib/openmpi" + ./configure --prefix=/tmp --enable-parallel --enable-shared CC=mpicc LDFLAGS="-L/home/runner/work/hdf5/hdf5/aocc-compiler-4.1.0/lib -L/home/runner/work/hdf5/hdf5/openmpi-4.1.5-install/lib" make -j make check -j make install diff --git a/.github/workflows/linux-nvhpc-auto.yml b/.github/workflows/linux-nvhpc-auto.yml deleted file mode 100644 index 1281e979633..00000000000 --- a/.github/workflows/linux-nvhpc-auto.yml +++ /dev/null @@ -1,58 +0,0 @@ -name: linux autotools nvhpc - -on: - workflow_dispatch: - push: - pull_request: - branches: [ develop ] - paths-ignore: - - '.github/CODEOWNERS' - - '.github/FUNDING.yml' - - 'doc/**' - - 'release_docs/**' - - 'ACKNOWLEDGEMENTS' - - 'COPYING**' - - '**.md' - -# Using concurrency to cancel any in-progress job or run -concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Install System dependencies - run: | - sudo apt update - sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev doxygen openssl libtool libtool-bin build-essential - - name: Install NVHPC - run: | - curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg - echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list - sudo apt-get update -y - sudo apt-get install -y nvhpc-23-7 - - name: Get Sources - uses: actions/checkout@v4 - - name: Test HDF5 - env: - NPROCS: 2 - run: | - export NVHPCSDK=/opt/nvidia/hpc_sdk - export OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc++ - export OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc - export OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvfortran - export LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/lib - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH - export DESTDIR=/tmp - ./autogen.sh - ./configure CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin/mpicc FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin/mpifort FCFLAGS="-fPIC -fortranlibs" --enable-fortran --enable-shared --enable-parallel - cat config.log - make -j - make check -j - make install - make uninstall diff --git a/.github/workflows/linux-nvhpc.yml b/.github/workflows/linux-nvhpc.yml deleted file mode 100644 index 06fd40a7c4f..00000000000 --- a/.github/workflows/linux-nvhpc.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: linux CMake nvhpc - -on: - workflow_dispatch: - push: - pull_request: - branches: [ develop ] - paths-ignore: - - '.github/CODEOWNERS' - - '.github/FUNDING.yml' - - 'doc/**' - - 'release_docs/**' - - 'ACKNOWLEDGEMENTS' - - 'COPYING**' - - '**.md' - -# Using concurrency to cancel any in-progress job or run -concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Install System dependencies - run: | - sudo apt update - sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev doxygen openssl libtool libtool-bin build-essential - - name: Install NVHPC - run: | - curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg - echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list - sudo apt-get update -y - sudo apt-get install -y nvhpc-23-7 - - name: Get Sources - uses: actions/checkout@v4 - - name: Test HDF5 - env: - FC: nvfortran - CC: nvc - FCFLAGS: -fPIC - run: | - export NVHPCSDK=/opt/nvidia/hpc_sdk - export OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc++ - export OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvc - export OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin/nvfortran - export LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/cuda/12.2/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/lib - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.7/compilers/bin:$PATH - cmake -B build -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF -DHDF5_ENABLE_PARALLEL:BOOL=ON -DHDF5_BUILD_FORTRAN:BOOL=ON - cat build/CMakeCache.txt - cmake --build build - ctest --test-dir build --output-on-failure diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e472b8cff56..43513c51a26 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -17,7 +17,7 @@ on: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true permissions: diff --git a/.github/workflows/netcdf.yml b/.github/workflows/netcdf.yml index f38608012c0..f08361f6ea0 100644 --- a/.github/workflows/netcdf.yml +++ b/.github/workflows/netcdf.yml @@ -19,7 +19,7 @@ permissions: # Using concurrency to cancel any in-progress job or run concurrency: - group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }} + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} cancel-in-progress: true jobs: diff --git a/.github/workflows/nvhpc-auto.yml b/.github/workflows/nvhpc-auto.yml new file mode 100644 index 00000000000..0b6f64af0e9 --- /dev/null +++ b/.github/workflows/nvhpc-auto.yml @@ -0,0 +1,87 @@ +name: hdf5 dev autotools nvhpc + +on: + workflow_call: + inputs: + build_mode: + description: "release vs. debug build" + required: true + type: string + +permissions: + contents: read + +jobs: + nvhpc_build_and_test: + name: "nvhpc ${{ inputs.build_mode }}" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Dependencies + run: | + sudo apt-get update + sudo apt-get install autoconf automake libtool libtool-bin libaec-dev + sudo apt-get install doxygen graphviz + sudo apt install -y zlib1g-dev libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev openssl build-essential + + - name: Install NVHPC + shell: bash + run: | + curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg + echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list + sudo apt-get update -y + sudo apt-get install -y nvhpc-23-9 + echo "CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin/mpicc" >> $GITHUB_ENV + echo "FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin/mpifort" >> $GITHUB_ENV + echo "NVHPCSDK=/opt/nvidia/hpc_sdk" >> $GITHUB_ENV + echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvc++" >> $GITHUB_ENV + echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvc" >> $GITHUB_ENV + echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvfortran" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/lib" >> $GITHUB_ENV + echo "DESTDIR=/tmp" >> $GITHUB_ENV + + - name: Autotools Configure + shell: bash + run: | + export RUNPARALLEL="mpiexec -np 2" + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH + sh ./autogen.sh + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + $GITHUB_WORKSPACE/configure \ + FCFLAGS="-fPIC -fortranlibs" \ + --enable-build-mode=${{ inputs.build_mode }} \ + --enable-fortran \ + --enable-shared \ + --enable-parallel + #cat config.log + + # BUILD + - name: Autotools Build + shell: bash + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH + make -j3 + working-directory: ${{ runner.workspace }}/build + + # RUN TESTS + # NORMAL + - name: Autotools Run Tests + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH + make check -j + working-directory: ${{ runner.workspace }}/build + + # INSTALL (note that this runs even when we don't run the tests) + - name: Autotools Install + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH + make install + working-directory: ${{ runner.workspace }}/build + +# - name: Autotools Verify Install +# run: | +# export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH +# make check-install +# working-directory: ${{ runner.workspace }}/build diff --git a/.github/workflows/nvhpc-cmake.yml b/.github/workflows/nvhpc-cmake.yml new file mode 100644 index 00000000000..1b0dbebc19e --- /dev/null +++ b/.github/workflows/nvhpc-cmake.yml @@ -0,0 +1,76 @@ +name: hdf5 dev CMake nvhpc + +on: + workflow_call: + inputs: + build_mode: + description: "release vs. debug build" + required: true + type: string + +permissions: + contents: read + +jobs: + nvhpc_build_and_test: + name: "nvhpc ${{ inputs.build_mode }}" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install Linux dependencies + shell: bash + run: | + sudo apt update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt install -y libaec-dev zlib1g-dev wget curl bzip2 flex bison cmake libzip-dev openssl build-essential + + - name: Install NVHPC + shell: bash + run: | + curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg + echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list + sudo apt-get update -y + sudo apt-get install -y nvhpc-23-9 + echo "CC=nvc" >> $GITHUB_ENV + echo "FC=nvfortran" >> $GITHUB_ENV + echo "NVHPCSDK=/opt/nvidia/hpc_sdk" >> $GITHUB_ENV + echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvc++" >> $GITHUB_ENV + echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvc" >> $GITHUB_ENV + echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin/nvfortran" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/cuda/12.2/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/lib" >> $GITHUB_ENV + echo "DESTDIR=/tmp" >> $GITHUB_ENV + + - name: CMake Configure with nvc + shell: bash + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/23.9/compilers/bin:$PATH + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja \ + -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ + -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF \ + -DHDF5_ENABLE_PARALLEL:BOOL=ON \ + -DHDF5_BUILD_CPP_LIB:BOOL=OFF \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ + -DHDF5_BUILD_FORTRAN:BOOL=ON \ + -DHDF5_BUILD_JAVA:BOOL=OFF \ + -DMPIEXEC_MAX_NUMPROCS:STRING="2" \ + $GITHUB_WORKSPACE + cat src/libhdf5.settings + + # BUILD + - name: CMake Build + shell: bash + run: | + cmake --build . --parallel 3 --config ${{ inputs.build_mode }} + working-directory: ${{ runner.workspace }}/build + + # RUN TESTS + - name: CMake Run Tests + shell: bash + run: | + ctest . --parallel 2 -C ${{ inputs.build_mode }} -V + working-directory: ${{ runner.workspace }}/build diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake index 042bfdc356b..72f7f459427 100644 --- a/CMakeFilters.cmake +++ b/CMakeFilters.cmake @@ -10,10 +10,9 @@ # help@hdfgroup.org. # option (USE_LIBAEC_STATIC "Use static AEC library " OFF) -option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0) -option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0) +option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" OFF) +option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" OFF) -set (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1) if (NOT ZLIB_USE_LOCALCONTENT) set (ZLIB_URL ${ZLIB_TGZ_ORIGPATH}/${ZLIB_TGZ_NAME}) else () @@ -23,7 +22,6 @@ if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0") message (VERBOSE "Filter ZLIB file is ${ZLIB_URL}") endif () -set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) if (NOT LIBAEC_USE_LOCALCONTENT) set (SZIP_URL ${LIBAEC_TGZ_ORIGPATH}/${LIBAEC_TGZ_NAME}) else () @@ -38,8 +36,8 @@ include (ExternalProject) set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)") set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - set (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1) - set (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1) + set (ZLIB_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for ZLIB" FORCE) + set (SZIP_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for SZIP" FORCE) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (ZLIB_URL ${ZLIB_GIT_URL} CACHE STRING "Path to zlib git repository") set (ZLIB_BRANCH ${ZLIB_GIT_BRANCH}) @@ -62,8 +60,10 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT endif () endif () else () - set (ZLIB_USE_EXTERNAL 0) - set (SZIP_USE_EXTERNAL 0) + set (HDF5_ENABLE_Z_LIB_SUPPORT OFF CACHE BOOL "" FORCE) + set (ZLIB_USE_EXTERNAL OFF CACHE BOOL "Use External Library Building for ZLIB") + set (HDF5_ENABLE_SZIP_SUPPORT OFF CACHE BOOL "" FORCE) + set (SZIP_USE_EXTERNAL OFF CACHE BOOL "Use External Library Building for SZIP") endif () endif () @@ -107,6 +107,7 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT) INCLUDE_DIRECTORIES (${ZLIB_INCLUDE_DIRS}) message (VERBOSE "Filter HDF5_ZLIB is ON") else () + set (HDF5_ENABLE_Z_LIB_SUPPORT OFF CACHE BOOL "" FORCE) message (WARNING " ZLib support in HDF5 was enabled but not found") endif () endif () @@ -157,6 +158,7 @@ if (HDF5_ENABLE_SZIP_SUPPORT) set (EXTERNAL_FILTERS "${EXTERNAL_FILTERS} ENCODE") endif () else () + set (HDF5_ENABLE_SZIP_SUPPORT OFF CACHE BOOL "" FORCE) message (WARNING "SZIP support in HDF5 was enabled but not found") endif () endif () diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index d42142a6da0..02f8dc35058 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -141,6 +141,10 @@ install ( #----------------------------------------------------------------------------- option (HDF5_PACK_EXAMPLES "Package the HDF5 Library Examples Compressed File" OFF) if (HDF5_PACK_EXAMPLES) + if (DEFINED CMAKE_TOOLCHAIN_FILE) + get_filename_component(TOOLCHAIN ${CMAKE_TOOLCHAIN_FILE} NAME) + set(CTEST_TOOLCHAIN_FILE "\${CTEST_SOURCE_DIRECTORY}/config/toolchain/${TOOLCHAIN}") + endif () configure_file ( ${HDF_RESOURCES_DIR}/examples/HDF5_Examples.cmake.in ${HDF5_BINARY_DIR}/HDF5_Examples.cmake @ONLY @@ -151,9 +155,9 @@ if (HDF5_PACK_EXAMPLES) COMPONENT hdfdocuments ) - option (EXAMPLES_USE_RELEASE_NAME "Use the released examples artifact name" OFF) option (EXAMPLES_DOWNLOAD "Download to use released examples files" OFF) if (EXAMPLES_DOWNLOAD) + option (EXAMPLES_USE_RELEASE_NAME "Use the released examples artifact name" OFF) if (EXAMPLES_USE_RELEASE_NAME) set (EXAMPLES_NAME ${EXAMPLES_TGZ_ORIGNAME}) else () @@ -295,10 +299,10 @@ endif () if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES) set (CPACK_PACKAGE_VENDOR "HDF_Group") set (CPACK_PACKAGE_NAME "${HDF5_PACKAGE_NAME}") - if (CDASH_LOCAL) - set (CPACK_PACKAGE_VERSION "${HDF5_PACKAGE_VERSION}") - else () + if (NOT WIN32 OR HDF5_VERS_SUBRELEASE MATCHES "^[0-9]+$") set (CPACK_PACKAGE_VERSION "${HDF5_PACKAGE_VERSION_STRING}") + else () + set (CPACK_PACKAGE_VERSION "${HDF5_PACKAGE_VERSION}") endif () set (CPACK_PACKAGE_VERSION_MAJOR "${HDF5_PACKAGE_VERSION_MAJOR}") set (CPACK_PACKAGE_VERSION_MINOR "${HDF5_PACKAGE_VERSION_MINOR}") diff --git a/CMakeLists.txt b/CMakeLists.txt index 6aa467d110b..c440c58b27f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -412,6 +412,13 @@ set (HDF5_PACKAGE_TARNAME "${HDF5_PACKAGE}${HDF_PACKAGE_EXT}") set (HDF5_PACKAGE_URL "http://www.hdfgroup.org") set (HDF5_PACKAGE_BUGREPORT "help@hdfgroup.org") +#----------------------------------------------------------------------------- +# Set variables needed for installation +#----------------------------------------------------------------------------- +set (HDF5_VERSION_STRING ${HDF5_PACKAGE_VERSION}) +set (HDF5_VERSION_MAJOR ${HDF5_PACKAGE_VERSION_MAJOR}) +set (HDF5_VERSION_MINOR ${HDF5_PACKAGE_VERSION_MINOR}) + #----------------------------------------------------------------------------- # Include some macros for reusable code #----------------------------------------------------------------------------- @@ -952,6 +959,13 @@ if (HDF5_BUILD_DOC AND EXISTS "${HDF5_DOXYGEN_DIR}" AND IS_DIRECTORY "${HDF5_DOX # check if Doxygen is installed find_package(Doxygen) if (DOXYGEN_FOUND) + option (HDF5_ENABLE_DOXY_WARNINGS "Enable fail if doxygen parsing has warnings." ON) + mark_as_advanced (HDF5_ENABLE_DOXY_WARNINGS) + if (HDF5_ENABLE_DOXY_WARNINGS) + set (HDF5_DOXY_WARNINGS "FAIL_ON_WARNINGS") + else () + set (HDF5_DOXY_WARNINGS "NO") + endif () message(STATUS "Doxygen version: ${DOXYGEN_VERSION}") add_subdirectory (doxygen) else () diff --git a/CMakePlugins.cmake b/CMakePlugins.cmake index b96d1ee0466..7fd332a2ebe 100644 --- a/CMakePlugins.cmake +++ b/CMakePlugins.cmake @@ -9,9 +9,8 @@ # If you do not have access to either file, you may request a copy from # help@hdfgroup.org. # -option (PLUGIN_USE_EXTERNAL "Use External Library Building for filter PLUGIN" 0) +option (PLUGIN_USE_EXTERNAL "Use External Library Building for filter PLUGIN" OFF) -set (PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGIN" 1) if (NOT PLUGIN_USE_LOCALCONTENT) set (PLUGIN_URL ${PLUGIN_TGZ_ORIGPATH}/${PLUGIN_TGZ_NAME}) else () @@ -27,7 +26,7 @@ include (ExternalProject) set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)") set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - set (PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGIN" 1) + set (PLUGIN_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for PLUGIN" FORCE) if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (PLUGIN_URL ${PLUGIN_GIT_URL} CACHE STRING "Path to PLUGIN git repository") set (PLUGIN_BRANCH ${PLUGIN_GIT_BRANCH}) @@ -42,7 +41,7 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT endif () endif () else () - set (PLUGIN_USE_EXTERNAL 0) + set (PLUGIN_USE_EXTERNAL OFF CACHE BOOL "Use External Library Building for PLUGIN") message (VERBOSE "Filter PLUGIN not built") endif () endif () diff --git a/CMakePresets.json b/CMakePresets.json index 6d1a12fdff6..48393df02ae 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -91,7 +91,7 @@ "HDF5_EXAMPLES_COMPRESSED": {"type": "STRING", "value": "hdf5-examples-master.tar.gz"}, "HDF5_EXAMPLES_COMPRESSED_DIR": {"type": "PATH", "value": "${sourceParentDir}/temp"}, "EXAMPLES_TGZ_ORIGPATH": {"type": "STRING", "value": "https://github.com/HDFGroup/hdf5-examples/releases/download/snapshot"}, - "EXAMPLES_TGZ_ORIGNAME": {"type": "STRING", "value": "snapshot.tar.gz"} + "EXAMPLES_TGZ_ORIGNAME": {"type": "STRING", "value": "hdf5-examples-2.0.4.tar.gz"} } }, { @@ -263,4 +263,5 @@ ] } ] -} \ No newline at end of file +} + diff --git a/CTestConfig.cmake b/CTestConfig.cmake index b780b86edae..62beafc0c12 100644 --- a/CTestConfig.cmake +++ b/CTestConfig.cmake @@ -22,20 +22,12 @@ set (CTEST_DROP_METHOD "https") if (CTEST_DROP_SITE_INIT) set (CTEST_DROP_SITE "${CTEST_DROP_SITE_INIT}") else () - if (CDASH_LOCAL) - set (CTEST_DROP_SITE "cdash-internal.hdfgroup.org") - else () - set (CTEST_DROP_SITE "cdash.hdfgroup.org") - endif () + set (CTEST_DROP_SITE "cdash.hdfgroup.org") endif () if (CTEST_DROP_LOCATION_INIT) set (CTEST_DROP_LOCATION "${CTEST_DROP_LOCATION_INIT}") else () - if (CDASH_LOCAL) - set (CTEST_DROP_LOCATION "/submit.php?project=HDF5Trunk") - else () - set (CTEST_DROP_LOCATION "/submit.php?project=HDF5") - endif () + set (CTEST_DROP_LOCATION "/submit.php?project=HDF5") endif () set (CTEST_DROP_SITE_CDASH TRUE) diff --git a/bin/h5cc.in b/bin/h5cc.in index 4eef3c95eee..e3dc988a576 100644 --- a/bin/h5cc.in +++ b/bin/h5cc.in @@ -116,15 +116,15 @@ usage() { # A wonderfully informative "usage" message. echo "usage: $prog_name [OPTIONS] " echo " OPTIONS:" - echo " -help This help message." - echo " -echo Show all the shell commands executed" - echo " -prefix=DIR Prefix directory to find HDF5 lib/ and include/" - echo " subdirectories [default: $prefix]" - echo " -show Show the commands without executing them" - echo " -showconfig Show the HDF5 library configuration summary" - echo " -shlib Compile with shared HDF5 libraries [default for hdf5 built" + echo " -help | --help | -h This help message." + echo " -echo Show all the shell commands executed" + echo " -prefix=DIR Prefix directory to find HDF5 lib/ and include/" + echo " subdirectories [default: $prefix]" + echo " -show Show the commands without executing them" + echo " -showconfig Show the HDF5 library configuration summary" + echo " -shlib Compile with shared HDF5 libraries [default for hdf5 built" echo " without static libraries]" - echo " -noshlib Compile with static HDF5 libraries [default for hdf5 built" + echo " -noshlib Compile with static HDF5 libraries [default for hdf5 built" echo " with static libraries]" echo " " echo " - the normal compile line options for your compiler." @@ -256,6 +256,12 @@ for arg in $@ ; do -help) usage ;; + --help) + usage + ;; + -h) + usage + ;; *\"*) qarg="'"$arg"'" allargs="$allargs $qarg" diff --git a/c++/CMakeLists.txt b/c++/CMakeLists.txt index 2c0275bc586..b419c805880 100644 --- a/c++/CMakeLists.txt +++ b/c++/CMakeLists.txt @@ -13,6 +13,6 @@ endif () #----------------------------------------------------------------------------- # Build the CPP unit tests #----------------------------------------------------------------------------- -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) endif () diff --git a/config/apple b/config/apple index a8a219b6798..39ed454a11f 100644 --- a/config/apple +++ b/config/apple @@ -55,30 +55,19 @@ fi # Figure out C compiler flags . $srcdir/config/gnu-flags . $srcdir/config/clang-flags +. $srcdir/config/oneapi-flags . $srcdir/config/intel-flags -# temp patch: if GCC 4.2.1 is used in Lion or Mountain Lion systems, do not -# use -O option as it causes failures in test/dt_arith. -case "$host_os" in - darwin1[12].*) # lion & mountain lion - #echo cc_vendor=$cc_vendor'-'cc_version=$cc_version - case "$cc_vendor-$cc_version" in - gcc-4.2.1) - # Remove any -O flags - #echo PROD_CFLAGS=$PROD_CFLAGS - PROD_CFLAGS="`echo $PROD_CFLAGS | sed -e 's/-O[0-3]*//'`" - #echo new PROD_CFLAGS=$PROD_CFLAGS - ;; - esac - ;; -esac - if test "X-" = "X-$FC"; then case $CC_BASENAME in gcc*) FC=gfortran FC_BASENAME=gfortran ;; + icx*) + FC=ifx + FC_BASENAME=ifx + ;; icc*) FC=ifort FC_BASENAME=ifort @@ -97,6 +86,7 @@ fi # Figure out FORTRAN compiler flags . $srcdir/config/gnu-fflags +. $srcdir/config/oneapi-fflags . $srcdir/config/intel-fflags @@ -107,6 +97,10 @@ if test "X-" = "X-$CXX"; then CXX=g++ CXX_BASENAME=g++ ;; + icx) + CXX=icpx + CXX_BASENAME=icpx + ;; icc) CXX=icpc CXX_BASENAME=icpc @@ -123,6 +117,7 @@ if test "X-" = "X-$CXX"; then fi # Figure out C++ compiler flags +. $srcdir/config/oneapi-cxxflags . $srcdir/config/intel-cxxflags # Do this ahead of GNU to avoid icpc being detected as g++ . $srcdir/config/gnu-cxxflags . $srcdir/config/clang-cxxflags @@ -139,6 +134,11 @@ case $CC in grep 'GCC' | sed 's/.*\((GCC) [-a-z0-9\. ]*.*\)/\1/'` ;; + *icx*) + cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *icc*) cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` @@ -156,6 +156,11 @@ case $FC in grep 'GCC' | sed 's/\(.*(GCC) [-a-z0-9\. ]*\).*/\1/'` ;; + *ifx*) + fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *ifc*|*ifort*) fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` @@ -179,6 +184,11 @@ case $CXX in grep 'GCC' | sed 's/.*\((GCC) [-a-z0-9\. ]*.*\)/\1/'` ;; + *icpx*) + cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *icpc*) cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` diff --git a/config/cmake-presets/hidden-presets.json b/config/cmake-presets/hidden-presets.json index bd36153c278..fad63b7b387 100644 --- a/config/cmake-presets/hidden-presets.json +++ b/config/cmake-presets/hidden-presets.json @@ -488,4 +488,5 @@ ] } ] -} \ No newline at end of file +} + diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index f847457f5a0..3d4c23b362c 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -909,7 +909,7 @@ endmacro () #----------------------------------------------------------------------------- # ---------------------------------------------------------------------- -# Set the flag to indicate that the machine is using a special algorithm toconvert +# Set the flag to indicate that the machine is using a special algorithm to convert # 'long double' to '(unsigned) long' values. (This flag should only be set for # the IBM Power Linux. When the bit sequence of long double is # 0x4351ccf385ebc8a0bfcc2a3c3d855620, the converted value of (unsigned)long diff --git a/config/cmake/HDF5PluginCache.cmake b/config/cmake/HDF5PluginCache.cmake index 34a97d5902a..14075616173 100644 --- a/config/cmake/HDF5PluginCache.cmake +++ b/config/cmake/HDF5PluginCache.cmake @@ -6,7 +6,7 @@ # examples are the tests for plugins set (H5PL_BUILD_TESTING ON CACHE BOOL "Enable H5PL testing" FORCE) -set (BUILD_EXAMPLES ON CACHE BOOL "Build H5PL Examples" FORCE) +set (BUILD_EXAMPLES ${HDF5_BUILD_EXAMPLES} CACHE BOOL "Build H5PL Examples" FORCE) #preset HDF5 cache vars to this projects libraries instead of searching set (H5PL_HDF5_HEADER "H5pubconf.h" CACHE STRING "Name of HDF5 header" FORCE) diff --git a/config/cmake/HDF5PluginMacros.cmake b/config/cmake/HDF5PluginMacros.cmake index aa409f710a2..e2bdce3f33f 100644 --- a/config/cmake/HDF5PluginMacros.cmake +++ b/config/cmake/HDF5PluginMacros.cmake @@ -31,56 +31,6 @@ macro (EXTERNAL_PLUGIN_LIBRARY compress_type) include (${HDF_RESOURCES_DIR}/HDF5PluginCache.cmake) set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) add_subdirectory(${plugin_SOURCE_DIR} ${plugin_BINARY_DIR}) - if (ENABLE_BLOSC) - add_dependencies (h5blosc ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_blosc ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_blosc PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_BSHUF) - add_dependencies (h5bshuf ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_bshuf ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_bshuf PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_BZIP2) - add_dependencies (h5bz2 ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_bzip2 ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_bzip2 PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_JPEG) - add_dependencies (h5jpeg ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_jpeg ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_jpeg PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_LZ4) - add_dependencies (h5lz4 ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_lz4 ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_lz4 PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_LZF) - add_dependencies (h5lzf ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_lzf ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_lzf PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_MAFISC) - add_dependencies (h5mafisc ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_mafisc ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_mafisc PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_SZ) - add_dependencies (h5sz ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_sz ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_sz PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_ZFP) - add_dependencies (h5zfp ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_zfp ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_zfp PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () - if (ENABLE_ZSTD) - add_dependencies (h5zstd ${HDF5_LIBSH_TARGET}) - add_dependencies (h5ex_d_zstd ${HDF5_LIBSH_TARGET}) - target_include_directories (h5ex_d_zstd PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR}") - endif () endif () message (VERBOSE "HDF5_INCLUDE_DIR=${HDF5_INCLUDE_DIR}") set (PLUGIN_BINARY_DIR "${plugin_BINARY_DIR}") @@ -94,10 +44,10 @@ macro (FILTER_OPTION plname) string(TOLOWER ${plname} PLUGIN_NAME) option (ENABLE_${plname} "Enable Library Building for ${plname} plugin" ON) if (ENABLE_${plname}) - option (HDF_${plname}_USE_EXTERNAL "Use External Library Building for ${PLUGIN_NAME} plugin" 0) + option (HDF_${plname}_USE_EXTERNAL "Use External Library Building for ${PLUGIN_NAME} plugin" OFF) mark_as_advanced (HDF_${plname}_USE_EXTERNAL) if (H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ") - set (HDF_${plname}_USE_EXTERNAL 1 CACHE BOOL "Use External Library Building for ${PLUGIN_NAME} plugin" FORCE) + set (HDF_${plname}_USE_EXTERNAL ON CACHE BOOL "Use External Library Building for ${PLUGIN_NAME} plugin" FORCE) if (H5PL_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT") set (HDF_${plname}_URL ${HDF_${plname}_GIT_URL}) set (HDF_${plname}_BRANCH ${HDF_${plname}_GIT_BRANCH}) diff --git a/config/cmake/HDFCXXCompilerFlags.cmake b/config/cmake/HDFCXXCompilerFlags.cmake index e8a55ba779b..dd120c911cb 100644 --- a/config/cmake/HDFCXXCompilerFlags.cmake +++ b/config/cmake/HDFCXXCompilerFlags.cmake @@ -21,7 +21,7 @@ message (VERBOSE "Warnings Configuration: CXX default: ${CMAKE_CXX_FLAGS}") #----------------------------------------------------------------------------- # Compiler specific flags : Shouldn't there be compiler tests for these #----------------------------------------------------------------------------- -if (WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel") +if (WIN32 AND (CMAKE_CXX_COMPILER_ID STREQUAL "Intel" OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM")) set (_INTEL_WINDOWS 1) endif () @@ -49,6 +49,22 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL SunPro AND CMAKE_CXX_COMPILER_LOADED) endif () endif () +if (CMAKE_CXX_COMPILER_ID STREQUAL "NVHPC" AND CMAKE_CXX_COMPILER_LOADED) + if (NOT DEFINED CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION) + if (NOT CMAKE_CXX_STANDARD OR CMAKE_CXX_STANDARD EQUAL 11) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_C11_STANDARD_COMPILE_OPTION}") + endif () + endif () + if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Minform=warn") + if (NOT ${HDF_CFG_NAME} MATCHES "RelWithDebInfo") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -s") + endif () + else () + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Mbounds -gopt -g") + endif () +endif () + if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_LOADED) set (CMAKE_CXX_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_CXX_FLAGS}") if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") @@ -97,7 +113,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- if (${CMAKE_SYSTEM_NAME} MATCHES "SunOS") @@ -114,16 +130,16 @@ else () # warnings that are emitted. If you need it, add it at configure time. if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") if (_INTEL_WINDOWS) - ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-general") + ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-general") else () - ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/general") + ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/general") endif() if (NOT _INTEL_WINDOWS) if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15.0) - ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/15") + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/15") endif() if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 18.0) - ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/18") + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/18") endif() endif() elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") @@ -133,7 +149,13 @@ else () ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/cxx-general") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/cxx-error-general") endif () - elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-general") + else () + ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/general") + endif() + elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/general") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "PGI") list (APPEND HDF5_CMAKE_CXX_FLAGS "-Minform=inform") @@ -148,18 +170,28 @@ endif () if (HDF5_ENABLE_DEV_WARNINGS) message (STATUS "....HDF5 developer group warnings are enabled") if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") - ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/developer-general") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-developer-general") + else () + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/developer-general") + endif() elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # Use the C warnings as CXX warnings are the same ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/developer-general") - elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-developer-general") + else () + ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/developer-general") + endif() + elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/developer-general") endif () else () if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # Use the C warnings as CXX warnings are the same ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/no-developer-general") - elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CXXFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/no-developer-general") endif () endif () @@ -291,6 +323,8 @@ if (HDF5_ENABLE_SYMBOLS MATCHES "YES") if (CMAKE_CXX_COMPILER_LOADED) if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel" AND NOT _INTEL_WINDOWS) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" AND NOT _INTEL_WINDOWS) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g") endif () @@ -299,6 +333,8 @@ elseif (HDF5_ENABLE_SYMBOLS MATCHES "NO") if (CMAKE_CXX_COMPILER_LOADED) if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel" AND NOT _INTEL_WINDOWS) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-s") + elseif (CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM" AND NOT _INTEL_WINDOWS) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-s") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -s") endif () diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index 1dca9103ef3..7bddad0f776 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -9,8 +9,8 @@ # If you do not have access to either file, you may request a copy from # help@hdfgroup.org. # -set(CMAKE_C_STANDARD 99) -set(CMAKE_C_STANDARD_REQUIRED TRUE) +set (CMAKE_C_STANDARD 99) +set (CMAKE_C_STANDARD_REQUIRED TRUE) set (CMAKE_C_FLAGS "${CMAKE_C99_STANDARD_COMPILE_OPTION} ${CMAKE_C_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_SANITIZER_FLAGS} ${CMAKE_C_FLAGS}") @@ -18,35 +18,84 @@ message (VERBOSE "Warnings Configuration: C default: ${CMAKE_C_FLAGS}") #----------------------------------------------------------------------------- # Compiler specific flags : Shouldn't there be compiler tests for these #----------------------------------------------------------------------------- -if(WIN32 AND CMAKE_C_COMPILER_ID STREQUAL "Intel") - set(_INTEL_WINDOWS 1) -endif() +if (WIN32 AND (CMAKE_C_COMPILER_ID STREQUAL "Intel" OR CMAKE_C_COMPILER_ID MATCHES "IntelLLVM")) + set (_INTEL_WINDOWS 1) +endif () -if(WIN32 AND CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" AND "x${CMAKE_C_SIMULATE_ID}" STREQUAL "xMSVC") - set(_CLANG_MSVC_WINDOWS 1) -endif() +if (WIN32 AND CMAKE_C_COMPILER_ID MATCHES "[Cc]lang" AND "x${CMAKE_C_SIMULATE_ID}" STREQUAL "xMSVC") + set (_CLANG_MSVC_WINDOWS 1) +endif () # Disable deprecation warnings for standard C functions. # really only needed for newer versions of VS, but should # not hurt other versions, and this will work into the # future -if(MSVC OR _INTEL_WINDOWS OR _CLANG_MSVC_WINDOWS) - add_definitions(-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) -endif() +if (MSVC OR _INTEL_WINDOWS OR _CLANG_MSVC_WINDOWS) + add_definitions (-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) +endif () -if(MSVC) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stack:10000000") -endif() +if (MSVC) + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stack:10000000") +endif () # MSVC 14.28 enables C5105, but the Windows SDK 10.0.18362.0 triggers it. -if(CMAKE_C_COMPILER_ID STREQUAL "MSVC" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 19.28) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd5105") -endif() +if (CMAKE_C_COMPILER_ID STREQUAL "MSVC" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 19.28) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd5105") +endif () if(_CLANG_MSVC_WINDOWS AND "x${CMAKE_C_COMPILER_FRONTEND_VARIANT}" STREQUAL "xGNU") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Xlinker -stack:20000000") endif() +if (CMAKE_C_COMPILER_ID STREQUAL "NVHPC" ) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Minform=warn") + if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") + if (NOT ${HDF_CFG_NAME} MATCHES "RelWithDebInfo") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s") + endif () + else () + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -g") + endif () + + # With at least NVHPC 23.5 - 23.9, compiling with -O2 or higher and -DNDEBUG + # appears to have issues that manifest in the tests as incorrect metadata + # checksums being read or memory being corrupted. Compiling without -DNDEBUG + # does not appear to have these issues, but is not ideal due to compiling in + # asserts and other library debug code. Compiling with -O1 also does not appear + # to have these issues, so set maximum optimization level to -O1 for now until + # it can be determined whether these issues are compiler-specific or issues + # in the library. + set (cmake_c_flags_minsizerel_edited "${CMAKE_C_FLAGS_MINSIZEREL}") + string (REPLACE "-O2" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (REPLACE "-O3" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (REPLACE "-O4" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (REPLACE "-Ofast" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (REPLACE "-fast" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") + string (STRIP "${cmake_c_flags_minsizerel_edited}" cmake_c_flags_minsizerel_edited) + string (PREPEND cmake_c_flags_minsizerel_edited "-O1 ") + set (CMAKE_C_FLAGS_MINSIZEREL "${cmake_c_flags_minsizerel_edited}") + + set (cmake_c_flags_release_edited "${CMAKE_C_FLAGS_RELEASE}") + string (REPLACE "-O2" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (REPLACE "-O3" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (REPLACE "-O4" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (REPLACE "-Ofast" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (REPLACE "-fast" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") + string (STRIP "${cmake_c_flags_release_edited}" cmake_c_flags_release_edited) + string (PREPEND cmake_c_flags_release_edited "-O1 ") + set (CMAKE_C_FLAGS_RELEASE "${cmake_c_flags_release_edited}") + + set (cmake_c_flags_relwithdebinfo_edited "${CMAKE_C_FLAGS_RELWITHDEBINFO}") + string (REPLACE "-O2" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (REPLACE "-O3" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (REPLACE "-O4" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (REPLACE "-Ofast" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (REPLACE "-fast" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") + string (STRIP "${cmake_c_flags_relwithdebinfo_edited}" cmake_c_flags_relwithdebinfo_edited) + string (PREPEND cmake_c_flags_relwithdebinfo_edited "-O1 ") + set (CMAKE_C_FLAGS_RELWITHDEBINFO "${cmake_c_flags_relwithdebinfo_edited}") +endif () + if (CMAKE_COMPILER_IS_GNUCC) set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}") if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") @@ -106,7 +155,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- @@ -135,20 +184,20 @@ else () # warnings that are emitted. If you need it, add it at configure time. if (CMAKE_C_COMPILER_ID STREQUAL "Intel") if (_INTEL_WINDOWS) - ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-general") + ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-general") else () - ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/general") + ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/general") endif() if (NOT _INTEL_WINDOWS) - if(NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 15.0) - ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/15") - endif() + if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 15.0) + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/15") + endif () # this is just a failsafe list (APPEND H5_CFLAGS "-finline-functions") - if(NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 18.0) - ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/18") - endif() - endif() + if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 18.0) + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/18") + endif () + endif () elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU") # Add general CFlags for GCC versions 4.8 and above if (CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) @@ -158,7 +207,15 @@ else () # gcc automatically inlines based on the optimization level # this is just a failsafe list (APPEND H5_CFLAGS "-finline-functions") - elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-general") + else () + # this is just a failsafe + list (APPEND H5_CFLAGS "-finline-functions") + ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/general") + endif () + elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (HDF5_CMAKE_C_FLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/general") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/error-general") elseif (CMAKE_C_COMPILER_ID STREQUAL "PGI") @@ -180,13 +237,19 @@ if (HDF5_ENABLE_DEV_WARNINGS) message (STATUS "....HDF5 developer group warnings are enabled") if (CMAKE_C_COMPILER_ID STREQUAL "Intel") if (_INTEL_WINDOWS) - ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-developer-general") + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-developer-general") else () - ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/developer-general") + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/developer-general") endif () elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/developer-general") - elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-developer-general") + else () + ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/developer-general") + endif () + elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/developer-general") endif () @@ -200,12 +263,16 @@ if (HDF5_ENABLE_DEV_WARNINGS) list (APPEND H5_CFLAGS "-Winline") elseif (CMAKE_C_COMPILER_ID STREQUAL "Intel" AND NOT _INTEL_WINDOWS) list (APPEND H5_CFLAGS "-Winline") + elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" AND NOT _INTEL_WINDOWS) + list (APPEND H5_CFLAGS "-Winline") + elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + list (APPEND H5_CFLAGS "-Winline") endif () endif () else () if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/no-developer-general") - elseif (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") + elseif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") ADD_H5_FLAGS (H5_CFLAGS "${HDF5_SOURCE_DIR}/config/clang-warnings/no-developer-general") endif () endif () diff --git a/config/cmake/HDFFortranCompilerFlags.cmake b/config/cmake/HDFFortranCompilerFlags.cmake index e08df05c52d..8ac3f490cc3 100644 --- a/config/cmake/HDFFortranCompilerFlags.cmake +++ b/config/cmake/HDFFortranCompilerFlags.cmake @@ -23,6 +23,8 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) set (HDF5_WARNINGS_BLOCKED 1) if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} /warn:none") + elseif (CMAKE_Fortran_COMPILER_ID MATCHES "IntelLLVM") + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} /warn:none") endif () endif () if (WIN32) @@ -41,7 +43,7 @@ if (HDF5_DISABLE_COMPILER_WARNINGS) endif () #----------------------------------------------------------------------------- -# HDF5 library compile options +# HDF5 library compile options - to be made available to all targets #----------------------------------------------------------------------------- if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 10.0) if (HDF5_ENABLE_BUILD_DIAGS) @@ -56,11 +58,25 @@ if (CMAKE_Fortran_COMPILER_ID STREQUAL "NAG") message (STATUS "... Select IEEE floating-point mode full") list (APPEND HDF5_CMAKE_Fortran_FLAGS "-ieee=full") endif () +if (CMAKE_Fortran_COMPILER_ID STREQUAL "NVHPC") + if (NOT ${HDF_CFG_NAME} MATCHES "Debug" AND NOT ${HDF_CFG_NAME} MATCHES "Developer") + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -Mnoframe") + if (NOT ${HDF_CFG_NAME} MATCHES "RelWithDebInfo") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -s") + endif () + else () + set (CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -Mbounds -Mchkptr -Mdclchk -g") + endif () +endif () if (NOT MSVC AND NOT MINGW) # General flags if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") - ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/ifort-general") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-ifort-general") + else () + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/ifort-general") + endif() list (APPEND HDF5_CMAKE_Fortran_FLAGS "-free") elseif (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/gfort-general") @@ -75,6 +91,13 @@ if (NOT MSVC AND NOT MINGW) else () list (APPEND HDF5_CMAKE_Fortran_FLAGS "-std=f2008") endif () + elseif (CMAKE_Fortran_COMPILER_ID MATCHES "IntelLLVM") + if (_INTEL_WINDOWS) + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-ifort-general") + else () + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/ifort-general") + endif() + list (APPEND HDF5_CMAKE_Fortran_FLAGS "-free") elseif (CMAKE_Fortran_COMPILER_ID STREQUAL "PGI") list (APPEND HDF5_CMAKE_Fortran_FLAGS "-Mfreeform" "-Mdclchk" "-Mstandard" "-Mallocatable=03") endif () @@ -125,7 +148,10 @@ if (NOT MSVC AND NOT MINGW) endif () else () if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") - ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-ifort-general") + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/classic/win-ifort-general") + list (APPEND HDF5_CMAKE_Fortran_FLAGS "/stand:f03" "/free") + elseif (CMAKE_Fortran_COMPILER_ID MATCHES "IntelLLVM") + ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/oneapi/win-ifort-general") list (APPEND HDF5_CMAKE_Fortran_FLAGS "/stand:f03" "/free") endif () endif () diff --git a/config/cmake/LIBAEC/CMakeLists.txt b/config/cmake/LIBAEC/CMakeLists.txt index 53950d8942a..9ffb68d28b7 100644 --- a/config/cmake/LIBAEC/CMakeLists.txt +++ b/config/cmake/LIBAEC/CMakeLists.txt @@ -369,10 +369,10 @@ configure_file (${LIBAEC_SOURCE_DIR}/README.md ${LIBAEC_BINARY_DIR}/LIBAEC_READM if (NOT LIBAEC_EXTERNALLY_CONFIGURED) set (CPACK_PACKAGE_VENDOR "HDF_Group") set (CPACK_PACKAGE_NAME "${LIBAEC_PACKAGE_NAME}") - if (CDASH_LOCAL) - set (CPACK_PACKAGE_VERSION "${LIBAEC_PACKAGE_VERSION}") - else () + if (NOT WIN32 OR LIBAEC_VERS_SUBRELEASE MATCHES "^[0-9]+$") set (CPACK_PACKAGE_VERSION "${LIBAEC_PACKAGE_VERSION_STRING}") + else () + set (CPACK_PACKAGE_VERSION "${LIBAEC_PACKAGE_VERSION}") endif () set (CPACK_PACKAGE_VERSION_MAJOR "${LIBAEC_PACKAGE_VERSION_MAJOR}") set (CPACK_PACKAGE_VERSION_MINOR "${LIBAEC_PACKAGE_VERSION_MINOR}") diff --git a/config/cmake/ZLIB/CMakeLists.txt b/config/cmake/ZLIB/CMakeLists.txt index 12411ac9e07..5c06a544ce2 100644 --- a/config/cmake/ZLIB/CMakeLists.txt +++ b/config/cmake/ZLIB/CMakeLists.txt @@ -428,10 +428,10 @@ configure_file (${ZLIB_SOURCE_DIR}/README ${ZLIB_BINARY_DIR}/ZLIB_README @ONLY) if (NOT ZLIB_EXTERNALLY_CONFIGURED) set (CPACK_PACKAGE_VENDOR "HDF_Group") set (CPACK_PACKAGE_NAME "${ZLIB_PACKAGE_NAME}") - if (CDASH_LOCAL) - set (CPACK_PACKAGE_VERSION "${ZLIB_PACKAGE_VERSION}") - else () + if (NOT WIN32 OR ZLIB_VERS_SUBRELEASE MATCHES "^[0-9]+$") set (CPACK_PACKAGE_VERSION "${ZLIB_PACKAGE_VERSION_STRING}") + else () + set (CPACK_PACKAGE_VERSION "${ZLIB_PACKAGE_VERSION}") endif () set (CPACK_PACKAGE_VERSION_MAJOR "${ZLIB_PACKAGE_VERSION_MAJOR}") set (CPACK_PACKAGE_VERSION_MINOR "${ZLIB_PACKAGE_VERSION_MINOR}") diff --git a/config/cmake/examples/HDF5_Examples.cmake.in b/config/cmake/examples/HDF5_Examples.cmake.in index d77c16d49ec..2f3a6491a90 100644 --- a/config/cmake/examples/HDF5_Examples.cmake.in +++ b/config/cmake/examples/HDF5_Examples.cmake.in @@ -29,6 +29,7 @@ set(CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY}) #INSTALLDIR - HDF5 root folder #CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo #CTEST_SOURCE_NAME - name of source folder; HDF5Examples +#CTEST_TOOLCHAIN_FILE - name and path in source of toolchain file if(DEFINED CTEST_SCRIPT_ARG) # transform ctest script arguments of the form # script.ctest,var1=value1,var2=value2 @@ -62,11 +63,6 @@ if(NOT DEFINED CTEST_SOURCE_NAME) set(CTEST_SOURCE_NAME "HDF5Examples") endif() -if(NOT DEFINED HDF_LOCAL) - set(CDASH_LOCAL "NO") -else() - set(CDASH_LOCAL "YES") -endif() if(NOT DEFINED CTEST_SITE) set(CTEST_SITE "local") endif() @@ -100,10 +96,13 @@ else() endif() ### default HDF5_PLUGIN_PATH to where the filter libraries are located set(ENV{HDF5_PLUGIN_PATH} "${INSTALLDIR}/lib/plugin") -if(${CDASH_LOCAL}) - set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCDASH_LOCAL:BOOL=ON") -endif() set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@") +### use a toolchain file (supported everywhere) #### +if(NOT DEFINED CTEST_TOOLCHAIN_FILE) + set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=@CTEST_TOOLCHAIN_FILE@") +else() + set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=${CTEST_TOOLCHAIN_FILE}") +endif() ############################################################################################################### # For any comments please contact cdashhelp@hdfgroup.org diff --git a/config/cmake/examples/HDF5_Examples_options.cmake b/config/cmake/examples/HDF5_Examples_options.cmake index cdd49eb13ad..684ec5bf641 100644 --- a/config/cmake/examples/HDF5_Examples_options.cmake +++ b/config/cmake/examples/HDF5_Examples_options.cmake @@ -28,6 +28,19 @@ ### build with shared libraries #set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=ON") +############################################################################################# +#### maximum parallel processor count for build and test #### +#set(MAX_PROC_COUNT 8) + +############################################################################################# +#### alternate toolsets (Windows usually) #### +#set(CMAKE_GENERATOR_TOOLSET "Intel C++ Compiler 17.0") + +############################################################################################# +### use a toolchain file (supported everywhere) #### +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/clang.cmake") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/intel.cmake") + ############################################################################################# #### languages #### ### disable C builds diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in index e5bd2406735..c20e18f54a4 100644 --- a/config/cmake/hdf5-config.cmake.in +++ b/config/cmake/hdf5-config.cmake.in @@ -32,29 +32,40 @@ set (${HDF5_PACKAGE_NAME}_VALID_COMPONENTS #----------------------------------------------------------------------------- # User Options #----------------------------------------------------------------------------- -set (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL @HDF5_ENABLE_PARALLEL@) -set (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN @HDF5_BUILD_FORTRAN@) -set (${HDF5_PACKAGE_NAME}_BUILD_CPP_LIB @HDF5_BUILD_CPP_LIB@) -set (${HDF5_PACKAGE_NAME}_BUILD_JAVA @HDF5_BUILD_JAVA@) -set (${HDF5_PACKAGE_NAME}_BUILD_TOOLS @HDF5_BUILD_TOOLS@) -set (${HDF5_PACKAGE_NAME}_BUILD_HL_LIB @HDF5_BUILD_HL_LIB@) -set (${HDF5_PACKAGE_NAME}_BUILD_HL_GIF_TOOLS @HDF5_BUILD_HL_GIF_TOOLS@) -set (${HDF5_PACKAGE_NAME}_ENABLE_THREADSAFE @HDF5_ENABLE_THREADSAFE@) +# Languages: +set (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN @HDF5_BUILD_FORTRAN@) +set (${HDF5_PACKAGE_NAME}_BUILD_CPP_LIB @HDF5_BUILD_CPP_LIB@) +set (${HDF5_PACKAGE_NAME}_BUILD_JAVA @HDF5_BUILD_JAVA@) +set (${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN "@HDF5_INSTALL_MOD_FORTRAN@") +#----------------------------------------------------------------------------- +# Features: +set (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL @HDF5_ENABLE_PARALLEL@) +set (${HDF5_PACKAGE_NAME}_PARALLEL_FILTERED_WRITES @PARALLEL_FILTERED_WRITES@) +set (${HDF5_PACKAGE_NAME}_LARGE_PARALLEL_IO @LARGE_PARALLEL_IO@) +set (${HDF5_PACKAGE_NAME}_BUILD_HL_LIB @HDF5_BUILD_HL_LIB@) +set (${HDF5_PACKAGE_NAME}_BUILD_DIMENSION_SCALES_WITH_NEW_REF @DIMENSION_SCALES_WITH_NEW_REF@) +set (${HDF5_PACKAGE_NAME}_BUILD_TOOLS @HDF5_BUILD_TOOLS@) +set (${HDF5_PACKAGE_NAME}_BUILD_HL_GIF_TOOLS @HDF5_BUILD_HL_GIF_TOOLS@) +set (${HDF5_PACKAGE_NAME}_ENABLE_THREADSAFE @HDF5_ENABLE_THREADSAFE@) +set (${HDF5_PACKAGE_NAME}_DEFAULT_API_VERSION "@DEFAULT_API_VERSION@") +set (${HDF5_PACKAGE_NAME}_ENABLE_DEPRECATED_SYMBOLS @HDF5_ENABLE_DEPRECATED_SYMBOLS@) +set (${HDF5_PACKAGE_NAME}_ENABLE_Z_LIB_SUPPORT @HDF5_ENABLE_Z_LIB_SUPPORT@) +set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT @HDF5_ENABLE_SZIP_SUPPORT@) +set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_ENCODING @HDF5_ENABLE_SZIP_ENCODING@) +set (${HDF5_PACKAGE_NAME}_ENABLE_MAP_API @H5_HAVE_MAP_API@) +set (${HDF5_PACKAGE_NAME}_ENABLE_DIRECT_VFD @H5_HAVE_DIRECT@) +set (${HDF5_PACKAGE_NAME}_ENABLE_MIRROR_VFD @H5_HAVE_MIRROR_VFD@) +set (${HDF5_PACKAGE_NAME}_ENABLE_SUBFILING_VFD @HDF5_ENABLE_SUBFILING_VFD@) +set (${HDF5_PACKAGE_NAME}_ENABLE_ROS3_VFD @HDF5_ENABLE_ROS3_VFD@) +set (${HDF5_PACKAGE_NAME}_ENABLE_HDFS_VFD @H5_HAVE_LIBHDFS@) set (${HDF5_PACKAGE_NAME}_ENABLE_PLUGIN_SUPPORT @HDF5_ENABLE_PLUGIN_SUPPORT@) -set (${HDF5_PACKAGE_NAME}_ENABLE_Z_LIB_SUPPORT @HDF5_ENABLE_Z_LIB_SUPPORT@) -set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT @HDF5_ENABLE_SZIP_SUPPORT@) -set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_ENCODING @HDF5_ENABLE_SZIP_ENCODING@) -set (${HDF5_PACKAGE_NAME}_ENABLE_ROS3_VFD @HDF5_ENABLE_ROS3_VFD@) -set (${HDF5_PACKAGE_NAME}_ENABLE_SUBFILING_VFD @HDF5_ENABLE_SUBFILING_VFD@) +#----------------------------------------------------------------------------- set (${HDF5_PACKAGE_NAME}_BUILD_SHARED_LIBS @H5_ENABLE_SHARED_LIB@) set (${HDF5_PACKAGE_NAME}_BUILD_STATIC_LIBS @H5_ENABLE_STATIC_LIB@) set (${HDF5_PACKAGE_NAME}_PACKAGE_EXTLIBS @HDF5_PACKAGE_EXTLIBS@) -set (${HDF5_PACKAGE_NAME}_EXPORT_LIBRARIES @HDF5_LIBRARIES_TO_EXPORT@) -set (${HDF5_PACKAGE_NAME}_ARCHITECTURE "@CMAKE_GENERATOR_ARCHITECTURE@") -set (${HDF5_PACKAGE_NAME}_TOOLSET "@CMAKE_GENERATOR_TOOLSET@") -set (${HDF5_PACKAGE_NAME}_DEFAULT_API_VERSION "@DEFAULT_API_VERSION@") -set (${HDF5_PACKAGE_NAME}_PARALLEL_FILTERED_WRITES @PARALLEL_FILTERED_WRITES@) -set (${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN "@HDF5_INSTALL_MOD_FORTRAN@") +set (${HDF5_PACKAGE_NAME}_EXPORT_LIBRARIES @HDF5_LIBRARIES_TO_EXPORT@) +set (${HDF5_PACKAGE_NAME}_ARCHITECTURE "@CMAKE_GENERATOR_ARCHITECTURE@") +set (${HDF5_PACKAGE_NAME}_TOOLSET "@CMAKE_GENERATOR_TOOLSET@") #----------------------------------------------------------------------------- # Dependencies diff --git a/config/cmake/runTest.cmake b/config/cmake/runTest.cmake index 1304d36735b..d21765a8e36 100644 --- a/config/cmake/runTest.cmake +++ b/config/cmake/runTest.cmake @@ -218,14 +218,6 @@ if (NOT TEST_SKIP_COMPARE) file (READ ${TEST_FOLDER}/${TEST_REFERENCE} TEST_STREAM) list (LENGTH TEST_STREAM test_len) if (test_len GREATER 0) - # if (WIN32) # no longer needed for CMake > 3.15 - # configure_file(${TEST_FOLDER}/${TEST_REFERENCE} ${TEST_FOLDER}/${TEST_REFERENCE}.tmp NEWLINE_STYLE CRLF) - # if (EXISTS "${TEST_FOLDER}/${TEST_REFERENCE}.tmp") - # file(RENAME ${TEST_FOLDER}/${TEST_REFERENCE}.tmp ${TEST_FOLDER}/${TEST_REFERENCE}) - # endif () - # #file (READ ${TEST_FOLDER}/${TEST_REFERENCE} TEST_STREAM) - # #file (WRITE ${TEST_FOLDER}/${TEST_REFERENCE} "${TEST_STREAM}") - # endif () if (NOT TEST_SORT_COMPARE) # now compare the output with the reference @@ -293,14 +285,6 @@ if (NOT TEST_SKIP_COMPARE) file (READ ${TEST_FOLDER}/${TEST_ERRREF} TEST_STREAM) list (LENGTH TEST_STREAM test_len) if (test_len GREATER 0) - # if (WIN32) # no longer needed for CMake > 3.15 - # configure_file(${TEST_FOLDER}/${TEST_ERRREF} ${TEST_FOLDER}/${TEST_ERRREF}.tmp NEWLINE_STYLE CRLF) - # if (EXISTS "${TEST_FOLDER}/${TEST_ERRREF}.tmp") - # file(RENAME ${TEST_FOLDER}/${TEST_ERRREF}.tmp ${TEST_FOLDER}/${TEST_ERRREF}) - # endif () - # #file (READ ${TEST_FOLDER}/${TEST_ERRREF} TEST_STREAM) - # #file (WRITE ${TEST_FOLDER}/${TEST_ERRREF} "${TEST_STREAM}") - # endif () # now compare the error output with the error reference execute_process ( diff --git a/config/cmake/scripts/CTestScript.cmake b/config/cmake/scripts/CTestScript.cmake index f277864b7ee..2a57db8db7b 100644 --- a/config/cmake/scripts/CTestScript.cmake +++ b/config/cmake/scripts/CTestScript.cmake @@ -202,14 +202,14 @@ endforeach () # Initialize the CTEST commands #------------------------------ if (CMAKE_GENERATOR_TOOLSET) - set (CTEST_CONFIGURE_TOOLSET "-T${CMAKE_GENERATOR_TOOLSET}") + set (CTEST_CONFIGURE_TOOLSET "\"-T${CMAKE_GENERATOR_TOOLSET}\"") else () - set (CTEST_CONFIGURE_TOOLSET "") + set (CTEST_CONFIGURE_TOOLSET) endif() if (CMAKE_GENERATOR_ARCHITECTURE) - set (CTEST_CONFIGURE_ARCHITECTURE "-A${CMAKE_GENERATOR_ARCHITECTURE}") + set (CTEST_CONFIGURE_ARCHITECTURE "\"-A${CMAKE_GENERATOR_ARCHITECTURE}\"") else () - set (CTEST_CONFIGURE_ARCHITECTURE "") + set (CTEST_CONFIGURE_ARCHITECTURE) endif() if (LOCAL_MEMCHECK_TEST) if(LOCAL_USE_VALGRIND) @@ -217,7 +217,7 @@ if (LOCAL_MEMCHECK_TEST) find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind) endif() set (CTEST_CONFIGURE_COMMAND - "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/mccacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_CONFIGURE_ARCHITECTURE}\" \"${CTEST_CONFIGURE_TOOLSET}\" \"${CTEST_SOURCE_DIRECTORY}\"" + "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/mccacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" ${CTEST_CONFIGURE_ARCHITECTURE} ${CTEST_CONFIGURE_TOOLSET} \"${CTEST_SOURCE_DIRECTORY}\"" ) else () if (LOCAL_COVERAGE_TEST) @@ -226,7 +226,7 @@ else () endif () endif () set (CTEST_CONFIGURE_COMMAND - "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_CONFIGURE_ARCHITECTURE}\" \"${CTEST_CONFIGURE_TOOLSET}\" \"${CTEST_SOURCE_DIRECTORY}\"" + "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" ${CTEST_CONFIGURE_ARCHITECTURE} ${CTEST_CONFIGURE_TOOLSET} \"${CTEST_SOURCE_DIRECTORY}\"" ) endif () diff --git a/config/cmake/scripts/HDF5options.cmake b/config/cmake/scripts/HDF5options.cmake index 92bfd37ecbe..5d078461be9 100644 --- a/config/cmake/scripts/HDF5options.cmake +++ b/config/cmake/scripts/HDF5options.cmake @@ -26,7 +26,6 @@ ############################################################################################# ### use a toolchain file (supported everywhere) #### - #set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_TOOLCHAIN_FILE:STRING=config/toolchain/intel.cmake") ############################################################################################# diff --git a/config/freebsd b/config/freebsd index 2fb962fe308..b0e825a9e26 100644 --- a/config/freebsd +++ b/config/freebsd @@ -29,7 +29,10 @@ fi # Figure out GNU C compiler flags . $srcdir/config/gnu-flags -# Figure out Intel C compiler flags +# Figure out Intel oneAPI C compiler flags +. $srcdir/config/oneapi-flags + +# Figure out Intel classic C compiler flags . $srcdir/config/intel-flags # The default Fortran 90 compiler @@ -43,6 +46,10 @@ if test "X-" = "X-$FC"; then FC=gfortran FC_BASENAME=gfortran ;; + icx*) + FC=ifx + FC_BASENAME=ifx + ;; icc*) FC=ifort FC_BASENAME=ifort @@ -57,8 +64,11 @@ fi # Figure out FORTRAN compiler flags . $srcdir/config/gnu-fflags -# Figure out Intel F90 compiler flags -. $srcdir/config/intel-fflags +# Figure out Intel oneAPI FC compiler flags +. $srcdir/config/oneapi-fflags + +# Figure out Intel classic FC compiler flags +. $srcdir/config/classic-fflags # The default C++ compiler diff --git a/config/gnu-warnings/developer-general b/config/gnu-warnings/developer-general index 79ecd6a054b..af701725200 100644 --- a/config/gnu-warnings/developer-general +++ b/config/gnu-warnings/developer-general @@ -7,11 +7,3 @@ -Wswitch-enum -Wunsafe-loop-optimizations -Wunused-macros -# -Winline warnings aren't included here because, for at least -# GNU compilers, this flag appears to conflict specifically with -# the -Og optimization level flag added for Debug and Developer -# builds and will produce warnings about functions not being -# considered for inlining. The flag will be added to the list -# of compiler flags separately if developer warnings are enabled -# and the build type is not Debug or Developer -#-Winline diff --git a/config/intel-cxxflags b/config/intel-cxxflags index 107b08757a9..40a3f0e9d34 100644 --- a/config/intel-cxxflags +++ b/config/intel-cxxflags @@ -129,15 +129,15 @@ if test "X-icpc" = "X-$cxx_vendor"; then # Add various general warning flags in intel-warnings. # Use the C warnings as CXX warnings are the same - H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments general)" + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments classic/general)" ###################### # Developer warnings # ###################### # Use the C warnings as CXX warnings are the same - #NO_DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments no-developer-general) - #DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments developer-general) + #NO_DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments classic/no-developer-general) + #DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments classic/developer-general) ############################# # Version-specific warnings # @@ -157,19 +157,19 @@ if test "X-icpc" = "X-$cxx_vendor"; then # intel >= 15 if test $cxx_vers_major -ge 15; then # Use the C warnings as CXX warnings are the same - H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments 15)" + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments classic/15)" fi # intel >= 18 if test $cxx_vers_major -ge 18; then # Use the C warnings as CXX warnings are the same - H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments 18)" + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments classic/18)" fi # intel <= 19 if test $cxx_vers_major -le 19; then # Use the C warnings as CXX warnings are the same - H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments general-19)" + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments classic/general-19)" fi ################# diff --git a/config/intel-fflags b/config/intel-fflags index ad1ce7c4bb5..b6307c1bedb 100644 --- a/config/intel-fflags +++ b/config/intel-fflags @@ -123,7 +123,7 @@ if test "X-ifort" = "X-$f9x_vendor"; then ########### H5_FCFLAGS="$H5_FCFLAGS -free" - H5_FCFLAGS="$H5_FCFLAGS $(load_intel_arguments ifort-general)" + H5_FCFLAGS="$H5_FCFLAGS $(load_intel_arguments classic/ifort-general)" ############################# # Version-specific warnings # diff --git a/config/intel-flags b/config/intel-flags index fbec7efdf6b..134452cc11d 100644 --- a/config/intel-flags +++ b/config/intel-flags @@ -127,14 +127,14 @@ if test "X-icc" = "X-$cc_vendor"; then ########### # Add various general warning flags in intel-warnings. - H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments general)" + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments classic/general)" ###################### # Developer warnings # ###################### - #NO_DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments no-developer-general) - #DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments developer-general) + #NO_DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments classic/no-developer-general) + #DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments classic/developer-general) ############################# # Version-specific warnings # @@ -153,18 +153,18 @@ if test "X-icc" = "X-$cc_vendor"; then # intel >= 15 if test $cc_vers_major -ge 15; then - H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments 15)" + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments classic/15)" fi # intel >= 18 if test $cc_vers_major -ge 18; then - H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments 18)" + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments classic/18)" fi # intel <= 19 # this file has warnings only available before oneapi versions if test $cc_vers_major -le 19; then - H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments general-19)" + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments classic/general-19)" fi ################# diff --git a/config/intel-warnings/15 b/config/intel-warnings/classic/15 similarity index 100% rename from config/intel-warnings/15 rename to config/intel-warnings/classic/15 diff --git a/config/intel-warnings/18 b/config/intel-warnings/classic/18 similarity index 100% rename from config/intel-warnings/18 rename to config/intel-warnings/classic/18 diff --git a/config/intel-warnings/classic/developer-general b/config/intel-warnings/classic/developer-general new file mode 100644 index 00000000000..6f4e9e9f4b8 --- /dev/null +++ b/config/intel-warnings/classic/developer-general @@ -0,0 +1,3 @@ +-Wreorder +-Wport +-Wstrict-aliasing diff --git a/config/intel-warnings/general b/config/intel-warnings/classic/general similarity index 100% rename from config/intel-warnings/general rename to config/intel-warnings/classic/general diff --git a/config/intel-warnings/ifort-general b/config/intel-warnings/classic/ifort-general similarity index 100% rename from config/intel-warnings/ifort-general rename to config/intel-warnings/classic/ifort-general diff --git a/config/intel-warnings/win-developer-general b/config/intel-warnings/classic/win-developer-general similarity index 100% rename from config/intel-warnings/win-developer-general rename to config/intel-warnings/classic/win-developer-general diff --git a/config/intel-warnings/win-general b/config/intel-warnings/classic/win-general similarity index 100% rename from config/intel-warnings/win-general rename to config/intel-warnings/classic/win-general diff --git a/config/intel-warnings/win-ifort-general b/config/intel-warnings/classic/win-ifort-general similarity index 100% rename from config/intel-warnings/win-ifort-general rename to config/intel-warnings/classic/win-ifort-general diff --git a/config/intel-warnings/developer-general b/config/intel-warnings/developer-general deleted file mode 100644 index 861218eecb9..00000000000 --- a/config/intel-warnings/developer-general +++ /dev/null @@ -1,11 +0,0 @@ --Wreorder --Wport --Wstrict-aliasing -# -Winline warnings aren't included here because, for at least -# GNU compilers, this flag appears to conflict specifically with -# the -Og optimization level flag added for Debug and Developer -# builds and will produce warnings about functions not being -# considered for inlining. The flag will be added to the list -# of compiler flags separately if developer warnings are enabled -# and the build type is not Debug or Developer -#-Winline diff --git a/config/intel-warnings/oneapi/developer-general b/config/intel-warnings/oneapi/developer-general new file mode 100644 index 00000000000..122c33d14fa --- /dev/null +++ b/config/intel-warnings/oneapi/developer-general @@ -0,0 +1,2 @@ +-Wreorder +-Wstrict-aliasing diff --git a/config/intel-warnings/oneapi/general b/config/intel-warnings/oneapi/general new file mode 100644 index 00000000000..bd866b6966d --- /dev/null +++ b/config/intel-warnings/oneapi/general @@ -0,0 +1 @@ +-Wall diff --git a/config/intel-warnings/oneapi/ifort-general b/config/intel-warnings/oneapi/ifort-general new file mode 100644 index 00000000000..1644c7cb82f --- /dev/null +++ b/config/intel-warnings/oneapi/ifort-general @@ -0,0 +1 @@ +-warn all diff --git a/config/intel-warnings/oneapi/win-developer-general b/config/intel-warnings/oneapi/win-developer-general new file mode 100644 index 00000000000..ba86a0f1916 --- /dev/null +++ b/config/intel-warnings/oneapi/win-developer-general @@ -0,0 +1,2 @@ +/Wreorder +/Wstrict-aliasing diff --git a/config/intel-warnings/oneapi/win-general b/config/intel-warnings/oneapi/win-general new file mode 100644 index 00000000000..ef54b2b6116 --- /dev/null +++ b/config/intel-warnings/oneapi/win-general @@ -0,0 +1 @@ +/Wall diff --git a/config/intel-warnings/oneapi/win-ifort-general b/config/intel-warnings/oneapi/win-ifort-general new file mode 100644 index 00000000000..a3359590c74 --- /dev/null +++ b/config/intel-warnings/oneapi/win-ifort-general @@ -0,0 +1 @@ +/warn:all diff --git a/config/linux-gnulibc1 b/config/linux-gnulibc1 index 7f3c3398048..92f2be63df5 100644 --- a/config/linux-gnulibc1 +++ b/config/linux-gnulibc1 @@ -38,12 +38,18 @@ fi # Figure out CCE C compiler flags . $srcdir/config/cce-flags -# Figure out Intel C compiler flags +# Figure out Intel oneAPI C compiler flags +. $srcdir/config/oneapi-flags + +# Figure out Intel classic C compiler flags . $srcdir/config/intel-flags # Figure out Clang C compiler flags . $srcdir/config/clang-flags +# Figure out NVHPC C compiler flags +. $srcdir/config/nvidia-flags + # Use default Fortran 90 compiler according to what C compiler is used. if test "X-" = "X-$FC"; then case $CC_BASENAME in @@ -55,6 +61,14 @@ if test "X-" = "X-$FC"; then FC=pgf90 FC_BASENAME=pgf90 ;; + nvc*) + FC=nvfortran + FC_BASENAME=nvfortran + ;; + icx*) + FC=ifx + FC_BASENAME=ifx + ;; icc*) FC=ifort FC_BASENAME=ifort @@ -72,7 +86,7 @@ if test "X-" = "X-$FC"; then else case $FC in # The PGI and Intel compilers are automatically detected below - ifc*|ifort*|pgf90*) + ifc*|ifort*|pgf90*|nvfortran*) ;; *f95*) @@ -119,12 +133,18 @@ fi # Figure out CCE FC compiler flags . $srcdir/config/cce-fflags -# Figure out Intel FC compiler flags +# Figure out Intel oneAPI FC compiler flags +. $srcdir/config/oneapi-fflags + +# Figure out Intel classic FC compiler flags . $srcdir/config/intel-fflags # Figure out Clang FC compiler flags . $srcdir/config/clang-fflags +# Figure out NVHPC FC compiler flags +. $srcdir/config/nvidia-fflags + case $FC_BASENAME in # # Absoft compiler @@ -200,7 +220,10 @@ if test -z "$CXX"; then CXX_BASENAME=g++ fi -# Figure out Intel CXX compiler flags +# Figure out Intel oneAPI CXX compiler flags +. $srcdir/config/oneapi-cxxflags + +# Figure out Intel classic CXX compiler flags # Do this ahead of GNU to avoid icpc being detected as g++ . $srcdir/config/intel-cxxflags @@ -213,6 +236,9 @@ fi # Figure out Clang CXX compiler flags . $srcdir/config/clang-cxxflags +# Figure out NVHPC CXX compiler flags +. $srcdir/config/nvidia-cxxflags + # compiler version strings # check if the compiler_version_info is already set @@ -235,7 +261,16 @@ case $CC in sed 's/\"/\\\"/g' |\ sed 's/^\([a-z]* \)/ built with \1/1'` cc_version_info=`echo $cc_version_info` - ;; + ;; + + *nvc*) + cc_version_info=`$CC $CFLAGS $H5_CFLAGS -V 2>&1 | grep 'nvc'` + ;; + + *icx*) + cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; *icc*) cc_version_info=`$CC $CCFLAGS $H5_CCFLAGS -V 2>&1 | grep 'Version' |\ @@ -271,6 +306,11 @@ case $FC in fc_version_info=`echo $fc_version_info` ;; + *ifx*) + fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; + *ifc*|*ifort*) fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` @@ -298,6 +338,11 @@ case $FC in *pgf90*) fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'pgf90'` ;; + + *nvfortran*) + fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'nvfortran'` + ;; + *nagfor*|*nagftn*) RM='rm -f' tmpfile=/tmp/cmpver.$$ @@ -322,6 +367,9 @@ fi # check if the compiler_version_info is already set if test -z "$cxx_version_info"; then case $CXX in + *nvc++*) + cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'nvc++'` + ;; *pgc++*) cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'pgc++'` ;; @@ -329,6 +377,10 @@ case $CXX in cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS --version 2>&1 |\ grep 'GCC' | sed 's/\(.*(GCC) [-a-z0-9\. ]*\).*/\1/'` ;; + *icpx*) + cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'Version' |\ + sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` + ;; *icpc*) cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 | grep 'Version' |\ sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'` diff --git a/config/netbsd b/config/netbsd index 04761f294a8..0ed84f7b3d2 100644 --- a/config/netbsd +++ b/config/netbsd @@ -26,7 +26,10 @@ fi # Figure out C compiler flags . $srcdir/config/gnu-flags -# Figure out Intel C compiler flags +# Figure out Intel oneAPI C compiler flags +. $srcdir/config/oneapi-flags + +# Figure out Intel classic C compiler flags . $srcdir/config/intel-flags # The default Fortran 90 compiler @@ -36,6 +39,10 @@ if test "X-" = "X-$FC"; then FC=gfortran FC_BASENAME=gfortran ;; + icx*) + FC=ifx + FC_BASENAME=ifx + ;; icc*) FC=ifort FC_BASENAME=ifort @@ -50,6 +57,8 @@ fi # Figure out FORTRAN compiler flags . $srcdir/config/gnu-fflags -# Figure out Intel F90 compiler flags +# Figure out Intel oneAPI FC compiler flags . $srcdir/config/intel-fflags +# Figure out Intel classic FC compiler flags +. $srcdir/config/oneapi-fflags diff --git a/config/nvidia-cxxflags b/config/nvidia-cxxflags new file mode 100644 index 00000000000..6becd26887a --- /dev/null +++ b/config/nvidia-cxxflags @@ -0,0 +1,101 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# NVIDIA nvc++ compiler or a derivative. It is careful not to do anything +# if the compiler is not NVIDIA; otherwise `cxx_flags_set' is set to `yes' +# + +# Get the compiler version in a way that works for NVIDIA nvc++ +# unless a compiler version is already known +# +# cxx_vendor: The compiler name: nvc++ +# cxx_version: Version number: 5.0-2, 5.2-2 +# +if test X = "X$cxx_flags_set"; then + cxx_version="`$CXX $CXXFLAGS -V 2>&1 |grep '^nvc++ '`" + if test X != "X$cxx_version"; then + cxx_vendor=`echo $cxx_version |sed 's/\([a-z]*++\).*/\1/'` + cxx_version=`echo $cxx_version |sed 's/nvc++ \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$CXX' is NVIDIA $cxx_vendor-$cxx_version" + + # Some version numbers + # NVIDIA version numbers are of the form: "major.minor-patch" + cxx_vers_major=`echo $cxx_version | cut -f1 -d.` + cxx_vers_minor=`echo $cxx_version | cut -f2 -d. | cut -f1 -d-` + cxx_vers_patch=`echo $cxx_version | cut -f2 -d. | cut -f2 -d-` + test -n "$cxx_vers_major" || cxx_vers_major=0 + test -n "$cxx_vers_minor" || cxx_vers_minor=0 + test -n "$cxx_vers_patch" || cxx_vers_patch=0 + cxx_vers_all=`expr $cxx_vers_major '*' 1000000 + $cxx_vers_minor '*' 1000 + $cxx_vers_patch` + fi +fi + +# Common PGI flags for various situations +if test "X-nvc++" = "X-$cxx_vendor"; then + + ########### + # General # + ########### + + # Default to C++11 standard + H5_CXXFLAGS="$H5_CXXFLAGS -std=c++11 -Minform=warn" + + ############## + # Production # + ############## + + PROD_CXXFLAGS= + + ######### + # Debug # + ######### + + # NDEBUG is handled explicitly in configure + # -g is handled by the symbols flags + DEBUG_CXXFLAGS="-Mbounds" + + ########### + # Symbols # + ########### + + NO_SYMBOLS_CXXFLAGS="-s" + SYMBOLS_CXXFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_CXXFLAGS="-Mprof=func,line" + # Use this for profiling with gprof + #PROFILE_CXXFLAGS="-pg" + + ################ + # Optimization # + ################ + + HIGH_OPT_CXXFLAGS="-O4" + DEBUG_OPT_CXXFLAGS="-gopt -O2" + NO_OPT_CXXFLAGS="-O0" + + ################# + # Flags are set # + ################# + cxx_flags_set=yes +fi + +# Clear cxx info if no flags set +if test "X-$cxx_flags_set" = "X-"; then + cxx_vendor= + cxx_version= +fi diff --git a/config/nvidia-fflags b/config/nvidia-fflags new file mode 100644 index 00000000000..77677e1036a --- /dev/null +++ b/config/nvidia-fflags @@ -0,0 +1,139 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# NVIDIA nvfortran compiler or a derivative. It is careful not to do anything +# if the compiler is not NVIDIA; otherwise `f9x_flags_set' is set to `yes' +# + +# Get the compiler version in a way that works for NVIDIA nvfortran +# unless a compiler version is already known +# +# f9x_vendor: The compiler name: nvfortran +# f9x_version: Version number: +# +if test X = "X$f9x_flags_set"; then + f9x_version="`$FC $FCFLAGS -V 2>&1 |grep '^nvfortran '`" + if test X != "X$f9x_version"; then + is_mpi="`$FC $FCFLAGS -help 2>&1 |grep 'link MPI'`" + f9x_vendor=`echo $f9x_version |sed 's/\([a-z0-9]*\).*/\1/'` + f9x_version=`echo $f9x_version |sed 's/nvfortran \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$FC' is NVIDIA $f9x_vendor-$f9x_version" + + # Some version numbers + # NVIDIA version numbers are of the form: "major.minor-patch" + f9x_vers_major=`echo $f9x_version | cut -f1 -d.` + f9x_vers_minor=`echo $f9x_version | cut -f2 -d. | cut -f1 -d-` + f9x_vers_patch=`echo $f9x_version | cut -f2 -d. | cut -f2 -d-` + test -n "$f9x_vers_major" || f9x_vers_major=0 + test -n "$f9x_vers_minor" || f9x_vers_minor=0 + test -n "$f9x_vers_patch" || f9x_vers_patch=0 + f9x_vers_all=`expr $f9x_vers_major '*' 1000000 + $f9x_vers_minor '*' 1000 + $f9x_vers_patch` + fi +fi + +# Common NVIDIA flags for various situations +if test "X-nvfortran" = "X-$f9x_vendor"; then + + ############################### + # Architecture-specific flags # + ############################### + + arch= + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ############## + # Production # + ############## + + # Check for MPI wrapper being used and tweak down compiler options + # Comment out the Tweaking since it caused problems to mpich1.2.6. + # Need to investigate the reasons to tweak. + #if test "X-" == "X-$is_mpi"; then + # PROD_FCFLAGS="-fast -s -Mnoframe" + #else + # PROD_FCFLAGS="-O2 -s" + #fi + PROD_FCFLAGS="-fast -Mnoframe -fPIC" + + ######### + # Debug # + ######### + + DEBUG_FCFLAGS="-Mbounds -Mchkptr -Mdclchk -fPIC" + + ########### + # Symbols # + ########### + + NO_SYMBOLS_FCFLAGS="-s" + SYMBOLS_FCFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_FCFLAGS="-Mprof=func,line" + # Use this for profiling with gprof + #PROFILE_FCFLAGS="-pg" + + ################ + # Optimization # + ################ + + HIGH_OPT_FCFLAGS= + DEBUG_OPT_FCFLAGS= + NO_OPT_FCFLAGS= + + ############ + # Warnings # + ############ + + ########### + # General # + ########### + + FC_BASENAME=nvfortran + Fortran_COMPILER_ID=NVIDIA + F9XSUFFIXFLAG="" + FSEARCH_DIRS="" + # Uncomment the following to add something specific for FCFLAGS. + #FCFLAGS="$FCFLAGS" + + + + ################# + # Flags are set # + ################# + f9x_flags_set=yes +fi + +# Clear f9x info if no flags set +if test "X-$f9x_flags_set" = "X-"; then + f9x_vendor= + f9x_version= +fi + diff --git a/config/nvidia-flags b/config/nvidia-flags new file mode 100644 index 00000000000..c140edd9830 --- /dev/null +++ b/config/nvidia-flags @@ -0,0 +1,124 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# NVIDIA nvc compiler or a derivative. It is careful not to do anything +# if the compiler is not nvcc; otherwise `cc_flags_set' is set to `yes' +# + +# Get the compiler version in a way that works for nvc +# unless a compiler version is already known +# +# cc_vendor: The compiler name: nvc +# cc_version: Version number: 5.0-2, 5.2-2 +# +if test X = "X$cc_flags_set"; then + cc_version="`$CC $CFLAGS -V 2>&1 |grep '^nvc '`" + if test X != "X$cc_version"; then + is_mpi="`$CC $CFLAGS -help 2>&1 |grep 'MPI'`" + cc_vendor=`echo $cc_version |sed 's/\([a-z]*\).*/\1/'` + cc_version=`echo $cc_version |sed 's/nvc \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$CC' is NVIDIA $cc_vendor-$cc_version" + + # Some version numbers + # NVIDIA version numbers are of the form: "major.minor-patch" + cc_vers_major=`echo $cc_version | cut -f1 -d.` + cc_vers_minor=`echo $cc_version | cut -f2 -d. | cut -f1 -d-` + cc_vers_patch=`echo $cc_version | cut -f2 -d. | cut -f2 -d-` + test -n "$cc_vers_major" || cc_vers_major=0 + test -n "$cc_vers_minor" || cc_vers_minor=0 + test -n "$cc_vers_patch" || cc_vers_patch=0 + cc_vers_all=`expr $cc_vers_major '*' 1000000 + $cc_vers_minor '*' 1000 + $cc_vers_patch` + fi +fi + +# Common PGI flags for various situations +if test "X-nvc" = "X-$cc_vendor" -o "X-nvcc" = "X-$cc_vendor"; then + # Insert section about version specific problems from compiler flags here, + # if necessary. + + arch= + # Architecture-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ########### + # General # + ########### + + # Default to C99 standard. + H5_CFLAGS="$H5_CFLAGS $arch -c99 -Minform=warn" + + ############## + # Production # + ############## + + # NDEBUG is handled explicitly by the configure script + #PROD_CFLAGS="-fast" + PROD_CFLAGS="" # -fast implies -O2 and -O2+ currently has test failures. + + ######### + # Debug # + ######### + + # NDEBUG is handled explicitly by the configure script + # -g is handled by the symbols flags + DEBUG_CFLAGS="-Mbounds" + + ########### + # Symbols # + ########### + + NO_SYMBOLS_CFLAGS="-s" + SYMBOLS_CFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_CFLAGS="-Mprof=func,line" + # Use this for profiling with gprof + #PROFILE_CFLAGS="-pg" + + ################ + # Optimization # + ################ + + HIGH_OPT_CFLAGS="-O1" # -O2+ currently has test failures. + #DEBUG_OPT_CFLAGS="-gopt -O2" + DEBUG_OPT_CFLAGS="-gopt -O1" # -O2+ currently has test failures. + NO_OPT_CFLAGS="-O0" + + ################# + # Flags are set # + ################# + cc_flags_set=yes +fi + +# Clear cc info if no flags set +if test "X-$cc_flags_set" = "X-"; then + cc_vendor= + cc_version= +fi diff --git a/config/oneapi-cxxflags b/config/oneapi-cxxflags new file mode 100644 index 00000000000..d9819b94c44 --- /dev/null +++ b/config/oneapi-cxxflags @@ -0,0 +1,155 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# Intel icpx compiler or a derivative. It is careful not to do anything +# if the compiler is not Intel; otherwise `cxx_flags_set' is set to `yes' +# + +# +# Prepend `$srcdir/config/intel-warnings/` to the filename suffix(es) given as +# subroutine argument(s), remove comments starting with # and ending +# at EOL, replace spans of whitespace (including newlines) with spaces, +# and re-emit the file(s) thus filtered on the standard output stream. +# +load_intel_arguments() +{ + set -- $(for arg; do + sed 's,#.*$,,' $srcdir/config/intel-warnings/${arg} + done) + IFS=' ' echo "$*" +} + +# Get the compiler version in a way that works for icpx +# icpx unless a compiler version is already known +# +# cxx_vendor: The compiler name: icpx +# cxx_version: Version number: 2023.2.0 +# +if test X = "X$cxx_flags_set"; then + cxx_version="`$CXX $CXXFLAGS $H5_CXXFLAGS -V 2>&1 |grep 'oneAPI'`" + if test X != "X$cxx_version"; then + cxx_vendor=icpx + cxx_version=`echo $cxx_version |sed 's/.*Version \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$CXX' is Intel oneAPI $cxx_vendor-$cxx_version" + + # Some version numbers + # Intel oneAPI version numbers are of the form: "major.minor.patch" + cxx_vers_major=`echo $cxx_version | cut -f1 -d.` + cxx_vers_minor=`echo $cxx_version | cut -f2 -d.` + cxx_vers_patch=`echo $cxx_version | cut -f2 -d.` + test -n "$cxx_vers_major" || cxx_vers_major=0 + test -n "$cxx_vers_minor" || cxx_vers_minor=0 + test -n "$cxx_vers_patch" || cxx_vers_patch=0 + cxx_vers_all=`expr $cxx_vers_major '*' 1000000 + $cxx_vers_minor '*' 1000 + $cxx_vers_patch` + fi +fi + +# Common Intel flags for various situations +if test "X-icpx" = "X-$cxx_vendor"; then + # Insert section about version specific problems from compiler flags here, + # if necessary. + + arch= + # Architecture-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ########### + # General # + ########### + + # Default to C++11 standard + H5_CXXFLAGS="$H5_CXXFLAGS $arch -std=c++11" + + ############## + # Production # + ############## + + PROD_CXXFLAGS= + + ######### + # Debug # + ######### + + # NDEBUG is handled explicitly in configure + # -g is handled by the symbols flags + DEBUG_CXXFLAGS= + + ########### + # Symbols # + ########### + + NO_SYMBOLS_CXXFLAGS="-Wl,-s" + SYMBOLS_CXXFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_CXXFLAGS="-p" + + ################ + # Optimization # + ################ + + HIGH_OPT_CXXFLAGS="-O3" + DEBUG_OPT_CXXFLAGS="-O0" + NO_OPT_CXXFLAGS="-O0" + + ############ + # Warnings # + ############ + + ########### + # General # + ########### + + # Add various general warning flags in intel-warnings. + # Use the C warnings as CXX warnings are the same + H5_CXXFLAGS="$H5_CXXFLAGS $(load_intel_arguments oneapi/general)" + + ###################### + # Developer warnings # + ###################### + + # Use the C warnings as CXX warnings are the same + DEVELOPER_WARNING_CXXFLAGS=$(load_intel_arguments oneapi/developer-general) + + ############################# + # Version-specific warnings # + ############################# + + ################# + # Flags are set # + ################# + cxx_flags_set=yes +fi + +# Clear cxx info if no flags set +if test "X-$cxx_flags_set" = "X-"; then + cxx_vendor= + cxx_version= +fi diff --git a/config/oneapi-fflags b/config/oneapi-fflags new file mode 100644 index 00000000000..a63108d0b99 --- /dev/null +++ b/config/oneapi-fflags @@ -0,0 +1,145 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# Intel oneAPI ifx compiler or a derivative. It is careful not to do anything +# if the compiler is not Intel; otherwise `f9x_flags_set' is set to `yes' +# + +# +# Prepend `$srcdir/config/intel-warnings/` to the filename suffix(es) given as +# subroutine argument(s), remove comments starting with # and ending +# at EOL, replace spans of whitespace (including newlines) with spaces, +# and re-emit the file(s) thus filtered on the standard output stream. +# +load_intel_arguments() +{ + set -- $(for arg; do + sed 's,#.*$,,' $srcdir/config/intel-warnings/${arg} + done) + IFS=' ' echo "$*" +} + +# Get the compiler version in a way that works for ifx +# ifx unless a compiler version is already known +# +# f9x_vendor: The compiler name: ifx +# f9x_version: Version number: 2023.2.0 +# +if test X = "X$f9x_flags_set"; then + f9x_version="`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 |grep '^Intel'`" + if test X != "X$f9x_version"; then + f9x_vendor=ifx + f9x_version="`echo $f9x_version |sed 's/.*Version \([-a-z0-9\.\-]*\).*/\1/'`" + echo "compiler '$FC' is Intel oneAPI $f9x_vendor-$f9x_version" + + # Some version numbers + # Intel oneAPI version numbers are of the form: "major.minor.patch" + f9x_vers_major=`echo $f9x_version | cut -f1 -d.` + f9x_vers_minor=`echo $f9x_version | cut -f2 -d.` + f9x_vers_patch=`echo $f9x_version | cut -f2 -d.` + test -n "$f9x_vers_major" || f9x_vers_major=0 + test -n "$f9x_vers_minor" || f9x_vers_minor=0 + test -n "$f9x_vers_patch" || f9x_vers_patch=0 + f9x_vers_all=`expr $f9x_vers_major '*' 1000000 + $f9x_vers_minor '*' 1000 + $f9x_vers_patch` + fi +fi + +if test "X-ifx" = "X-$f9x_vendor"; then + + FC_BASENAME=ifx + F9XSUFFIXFLAG="" + FSEARCH_DIRS="" + + ############################### + # Architecture-specific flags # + ############################### + + arch= + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ############## + # Production # + ############## + + PROD_FCFLAGS= + + ######### + # Debug # + ######### + + # Don't use -check uninit or you'll get false positives from H5_buildiface + DEBUG_FCFLAGS="-check all,nouninit" + + ########### + # Symbols # + ########### + + NO_SYMBOLS_FCFLAGS= + SYMBOLS_FCFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_FCFLAGS="-p" + + ################ + # Optimization # + ################ + + HIGH_OPT_FCFLAGS="-O3" + DEBUG_OPT_FCFLAGS="-O0" + NO_OPT_FCFLAGS="-O0" + + ############ + # Warnings # + ############ + + ########### + # General # + ########### + + H5_FCFLAGS="$H5_FCFLAGS -free" + H5_FCFLAGS="$H5_FCFLAGS $(load_intel_arguments oneapi/ifort-general)" + + ############################# + # Version-specific warnings # + ############################# + + + ################# + # Flags are set # + ################# + f9x_flags_set=yes +fi + +# Clear f9x info if no flags set +if test "X-$f9x_flags_set" = "X-"; then + f9x_vendor= + f9x_version= +fi + diff --git a/config/oneapi-flags b/config/oneapi-flags new file mode 100644 index 00000000000..629e93f02f1 --- /dev/null +++ b/config/oneapi-flags @@ -0,0 +1,151 @@ +# -*- shell-script -*- +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. + + +# This file should be sourced into configure if the compiler is the +# Intel icx compiler or a derivative. It is careful not to do anything +# if the compiler is not Intel; otherwise `cc_flags_set' is set to `yes' +# + +# +# Prepend `$srcdir/config/intel-warnings/` to the filename suffix(es) given as +# subroutine argument(s), remove comments starting with # and ending +# at EOL, replace spans of whitespace (including newlines) with spaces, +# and re-emit the file(s) thus filtered on the standard output stream. +# +load_intel_arguments() +{ + set -- $(for arg; do + sed 's,#.*$,,' $srcdir/config/intel-warnings/${arg} + done) + IFS=' ' echo "$*" +} + +# Get the compiler version in a way that works for icx +# icx unless a compiler version is already known +# cc_vendor: The compiler name: icx +# cc_version: Version number: 2023.2.0 +# +if test X = "X$cc_flags_set"; then + cc_version="`$CC $CFLAGS $H5_CFLAGS -V 2>&1 |grep 'oneAPI'`" + if test X != "X$cc_version"; then + cc_vendor=icx + cc_version=`echo $cc_version |sed 's/.*Version \([-a-z0-9\.\-]*\).*/\1/'` + echo "compiler '$CC' is Intel oneAPI $cc_vendor-$cc_version" + + # Some version numbers + # Intel oneAPI version numbers are of the form: "major.minor.patch" + cc_vers_major=`echo $cc_version | cut -f1 -d.` + cc_vers_minor=`echo $cc_version | cut -f2 -d.` + cc_vers_patch=`echo $cc_version | cut -f2 -d.` + test -n "$cc_vers_major" || cc_vers_major=0 + test -n "$cc_vers_minor" || cc_vers_minor=0 + test -n "$cc_vers_patch" || cc_vers_patch=0 + cc_vers_all=`expr $cc_vers_major '*' 1000000 + $cc_vers_minor '*' 1000 + $cc_vers_patch` + fi +fi + +# Common Intel flags for various situations +if test "X-icx" = "X-$cc_vendor"; then + # Insert section about version specific problems from compiler flags here, + # if necessary. + + arch= + # Architecture-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "$host_os-$host_cpu" in + # *-i686) + # arch="-march=i686" + # ;; + #esac + + # Host-specific flags + # Nothing currently. (Uncomment code below and modify to add any) + #case "`hostname`" in + # sleipnir.ncsa.uiuc.edu) + # arch="$arch -pipe" + # ;; + #esac + + ########### + # General # + ########### + + # Default to C99 standard. + H5_CFLAGS="$H5_CFLAGS $arch -std=c99" + + ############## + # Production # + ############## + + PROD_CFLAGS= + + ######### + # Debug # + ######### + + # NDEBUG is handled explicitly in configure + DEBUG_CFLAGS= + + ########### + # Symbols # + ########### + + NO_SYMBOLS_CFLAGS="-Wl,-s" + SYMBOLS_CFLAGS="-g" + + ############# + # Profiling # + ############# + + PROFILE_CFLAGS="-p" + + ################ + # Optimization # + ################ + + HIGH_OPT_CFLAGS="-O3" + DEBUG_OPT_CFLAGS="-O0" + NO_OPT_CFLAGS="-O0" + + ############ + # Warnings # + ############ + + ########### + # General # + ########### + + # Add various general warning flags in intel-warnings. + H5_CFLAGS="$H5_CFLAGS $(load_intel_arguments oneapi/general)" + + ###################### + # Developer warnings # + ###################### + + DEVELOPER_WARNING_CFLAGS=$(load_intel_arguments oneapi/developer-general) + + ############################# + # Version-specific warnings # + ############################# + + ################# + # Flags are set # + ################# + cc_flags_set=yes +fi + +# Clear cc info if no flags set +if test "X-$cc_flags_set" = "X-"; then + cc_vendor= + cc_version= +fi diff --git a/config/sanitizer/README.md b/config/sanitizer/README.md index 308f9c393aa..e3141455401 100644 --- a/config/sanitizer/README.md +++ b/config/sanitizer/README.md @@ -304,4 +304,5 @@ file(GLOB_RECURSE CMAKE_FILES ) cmake_format(TARGET_NAME ${CMAKE_FILES}) -``` \ No newline at end of file +``` + diff --git a/configure.ac b/configure.ac index 02c47d61002..44ab43a4664 100644 --- a/configure.ac +++ b/configure.ac @@ -1207,6 +1207,30 @@ AC_ARG_ENABLE([doxygen], AC_MSG_RESULT([$HDF5_DOXYGEN]) +## Check if they would like to enable doxygen warnings as errors +## + +## This needs to be exposed for the library info file. +AC_SUBST([HDF5_DOXY_WARNINGS]) + +## Default is to consider doxygen warnings as errors +DOXY_ERR=yes + +AC_MSG_CHECKING([if doxygen warnings as errors is enabled]) + +AC_ARG_ENABLE([doxygen-errors], + [AS_HELP_STRING([--enable-doxygen-errors], + [Error on HDF5 doxygen warnings [default=yes]])], + [DOXY_ERR=$enableval]) + +if test "X$DOXY_ERR" = "Xyes"; then + HDF5_DOXY_WARNINGS="FAIL_ON_WARNINGS" +else + HDF5_DOXY_WARNINGS="NO" + +fi +AC_MSG_RESULT([$HDF5_DOXY_WARNINGS]) + if test "X$HDF5_DOXYGEN" = "Xyes"; then DX_DOXYGEN_FEATURE(ON) DX_DOT_FEATURE(OFF) @@ -1269,6 +1293,7 @@ if test "X$HDF5_DOXYGEN" = "Xyes"; then DOXYGEN_STRIP_FROM_PATH='$(SRCDIR)' DOXYGEN_STRIP_FROM_INC_PATH='$(SRCDIR)' DOXYGEN_PREDEFINED='H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD H5_DOXYGEN H5_HAVE_SUBFILING_VFD H5_HAVE_IOC_VFD H5_HAVE_MIRROR_VFD' + DOXYGEN_WARN_AS_ERROR=${HDF5_DOXY_WARNINGS} DX_INIT_DOXYGEN([HDF5], [./doxygen/Doxyfile], [hdf5lib_docs]) fi diff --git a/doxygen/CMakeLists.txt b/doxygen/CMakeLists.txt index 7bfbe74906a..7dd7660621d 100644 --- a/doxygen/CMakeLists.txt +++ b/doxygen/CMakeLists.txt @@ -30,6 +30,7 @@ if (DOXYGEN_FOUND) set (DOXYGEN_STRIP_FROM_PATH ${HDF5_SOURCE_DIR}) set (DOXYGEN_STRIP_FROM_INC_PATH ${HDF5_SOURCE_DIR}) set (DOXYGEN_PREDEFINED "H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD H5_DOXYGEN H5_HAVE_SUBFILING_VFD H5_HAVE_IOC_VFD H5_HAVE_MIRROR_VFD") + set (DOXYGEN_WARN_AS_ERROR ${HDF5_DOXY_WARNINGS}) # This configure and individual custom targets work together # Replace variables inside @@ with the current values @@ -38,7 +39,7 @@ if (DOXYGEN_FOUND) install ( DIRECTORY ${HDF5_BINARY_DIR}/hdf5lib_docs/html DESTINATION ${HDF5_INSTALL_DOC_DIR} - COMPONENT Documents + COMPONENT hdfdocuments ) if (NOT TARGET doxygen) diff --git a/doxygen/Doxyfile.in b/doxygen/Doxyfile.in index b24b9f9e509..464e09a7201 100644 --- a/doxygen/Doxyfile.in +++ b/doxygen/Doxyfile.in @@ -625,7 +625,7 @@ WARN_NO_PARAMDOC = NO # a warning is encountered. # The default value is: NO. -WARN_AS_ERROR = FAIL_ON_WARNINGS +WARN_AS_ERROR = @DOXYGEN_WARN_AS_ERROR@ # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which diff --git a/doxygen/dox/LearnBasics2.dox b/doxygen/dox/LearnBasics2.dox index 87bbe87fe47..f436a027d81 100644 --- a/doxygen/dox/LearnBasics2.dox +++ b/doxygen/dox/LearnBasics2.dox @@ -728,12 +728,12 @@ There are numerous datatype functions that allow a user to alter a pre-defined d Refer to the \ref H5T in the \ref RM. Example functions are #H5Tset_size and #H5Tset_precision. \section secLBDtypeSpec Specific Datatypes -On the Examples by API -page under Datatypes +On the \ref ExAPI +page under \ref sec_exapi_dtypes you will find many example programs for creating and reading datasets with different datatypes. Below is additional information on some of the datatypes. See -the Examples by API +the \ref ExAPI page for examples of these datatypes. \subsection subsecLBDtypeSpec Array Datatype vs Array Dataspace diff --git a/doxygen/dox/LearnBasics3.dox b/doxygen/dox/LearnBasics3.dox index 748745827f2..a91368b00fc 100644 --- a/doxygen/dox/LearnBasics3.dox +++ b/doxygen/dox/LearnBasics3.dox @@ -152,13 +152,13 @@ To use the compact storage layout, call: #H5Pset_layout \li Create a dataset with the modified property list. (See #H5Dcreate) \li Close the property list. (See #H5Pclose) For example code, see the \ref HDF5Examples page. -Specifically look at the Examples by API. +Specifically look at the \ref ExAPI. There are examples for different languages. The C example to create a chunked dataset is: -h5ex_d_chunk.c +h5ex_d_chunk.c The C example to create a compact dataset is: -h5ex_d_compact.c +h5ex_d_compact.c \section secLBDsetLayoutChange Changing the Layout after Dataset Creation The dataset layout is a Dataset Creation Property List. This means that once the dataset has been @@ -290,7 +290,7 @@ is met, at a certain point in the future.) \subsection subsecLBContentsProgUsing Using #H5Literate, #H5Lvisit and #H5Ovisit For example code, see the \ref HDF5Examples page. -Specifically look at the Examples by API. +Specifically look at the \ref ExAPI. There are examples for different languages, where examples of using #H5Literate and #H5Ovisit/#H5Lvisit are included. The h5ex_g_traverse example traverses a file using H5Literate: diff --git a/doxygen/dox/Overview.dox b/doxygen/dox/Overview.dox index 54cc638d9d6..438788eb028 100644 --- a/doxygen/dox/Overview.dox +++ b/doxygen/dox/Overview.dox @@ -23,8 +23,8 @@ documents cover a mix of tasks, concepts, and reference, to help a specific \par Versions Version-specific documentation (see the version in the title area) can be found here: - - HDF5 develop branch (this site) - - HDF5 1.14.x + - HDF5 develop branch (this site) + - HDF5 1.14.x - HDF5 1.12.x - HDF5 1.10.x - HDF5 1.8.x diff --git a/doxygen/dox/ViewTools.dox b/doxygen/dox/ViewTools.dox index 66b2def0624..951605674be 100644 --- a/doxygen/dox/ViewTools.dox +++ b/doxygen/dox/ViewTools.dox @@ -53,7 +53,7 @@ packages, which can be obtained from the HDF-EOS and Tools and Information Center. Specifically, the following examples are used in this tutorial topic: \li HDF5 Files created from compiling the \ref LBExamples -\li HDF5 Files on the Examples by API page +\li HDF5 Files on the \ref ExAPI page \li NPP JPSS files, SVM01_npp.. (gzipped) and SVM09_npp.. (gzipped) \li HDF-EOS OMI-Aura file @@ -497,7 +497,7 @@ In the file shown below the dataset / \endcode You can obtain the h5ex_d_gzip.c program that created this file, as well as the file created, -from the Examples by API page. +from the \ref ExAPI page. \subsection subsecViewToolsViewDset_h5ls h5ls Specific datasets can be specified with h5ls by simply adding the dataset path and dataset after the @@ -925,7 +925,7 @@ The output of the above command is shown below: \subsection subsecViewToolsViewDtypes_h5dump h5dump The following datatypes are discussed, using the output of h5dump with HDF5 files from the -Examples by API page: +\ref ExAPI page:
  • @ref subsubsecViewToolsViewDtypes_array
  • @ref subsubsecViewToolsViewDtypes_objref
  • @@ -1036,7 +1036,7 @@ the elements or slab that is selected. A dataset with a Region Reference datatyp one or more Region References. An example of a Region Reference dataset (h5ex_t_regref.h5) can be found on the -Examples by API page, +\ref ExAPI page, under Datatypes. If you examine this dataset with h5dump you will see that /DS1 is a Region Reference dataset as indicated by its datatype, highlighted in bold below: \code diff --git a/doxygen/dox/ViewTools2.dox b/doxygen/dox/ViewTools2.dox index 4d8788a81fc..71d74652d4a 100644 --- a/doxygen/dox/ViewTools2.dox +++ b/doxygen/dox/ViewTools2.dox @@ -727,7 +727,7 @@ GROUP "/" { \endcode The second example imports string data. The example program that creates this file can be downloaded -from the Examples by API page. +from the \ref ExAPI page. Note that string data requires use of the h5dump -y option to exclude indexes and the h5dump --width=1 diff --git a/doxygen/examples/H5D_examples.c b/doxygen/examples/H5D_examples.c index ae483ee5566..1ad27947ee6 100644 --- a/doxygen/examples/H5D_examples.c +++ b/doxygen/examples/H5D_examples.c @@ -10,7 +10,7 @@ int chunk_cb(const hsize_t *offset, unsigned filter_mask, haddr_t addr, hsize_t size, void *op_data) { // only print the allocated chunk size only - printf("%ld\n", size); + printf("%" PRIuHSIZE "\n", size); return EXIT_SUCCESS; } //! diff --git a/doxygen/examples/tables/propertyLists.dox b/doxygen/examples/tables/propertyLists.dox index 2f74c03770f..340e13c26a5 100644 --- a/doxygen/examples/tables/propertyLists.dox +++ b/doxygen/examples/tables/propertyLists.dox @@ -711,6 +711,10 @@ of the library for reading or writing the actual data. Gets the cause for not performing selection or vector I/O on the last parallel I/O call. +#H5Pget_actual_selection_io_mode +Gets the type(s) (scalar, vector, selection) of raw data I/O performed on the last I/O call. + + #H5Pset_modify_write_buf/#H5Pget_modify_write_buf Sets/gets a flag allowing the library to modify the contents of the write buffer. diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt index cf9b427b3a6..efaf963c108 100644 --- a/fortran/CMakeLists.txt +++ b/fortran/CMakeLists.txt @@ -22,7 +22,7 @@ endif () #----------------------------------------------------------------------------- # Testing #----------------------------------------------------------------------------- -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) if (MPI_Fortran_FOUND) add_subdirectory (testpar) diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt index 199a0c8f148..57e17e5a675 100644 --- a/fortran/src/CMakeLists.txt +++ b/fortran/src/CMakeLists.txt @@ -332,7 +332,7 @@ if (BUILD_STATIC_LIBS) target_compile_options(${HDF5_F90_LIB_TARGET} PRIVATE "${HDF5_CMAKE_Fortran_FLAGS}") target_compile_definitions(${HDF5_F90_LIB_TARGET} PRIVATE - "$<$:HDF5F90_WINDOWS"> + "$<$:HDF5F90_WINDOWS>" "$<$:${WIN_COMPILE_FLAGS}>" ) target_link_libraries (${HDF5_F90_LIB_TARGET} diff --git a/fortran/src/H5Fff.F90 b/fortran/src/H5Fff.F90 index cfae7652e63..d31117784e5 100644 --- a/fortran/src/H5Fff.F90 +++ b/fortran/src/H5Fff.F90 @@ -43,6 +43,7 @@ MODULE H5F ! Number of objects opened in H5open_f INTEGER(SIZE_T) :: H5OPEN_NUM_OBJ + #ifndef H5_DOXYGEN INTERFACE INTEGER(C_INT) FUNCTION h5fis_accessible(name, & @@ -71,12 +72,6 @@ END FUNCTION h5fis_accessible INTEGER(HSIZE_T) :: tot_space !< Amount of free space in the file END TYPE H5F_info_free_t -!> @brief H5_ih_info_t derived type. - TYPE, BIND(C) :: H5_ih_info_t - INTEGER(HSIZE_T) :: index_size !< btree and/or list - INTEGER(HSIZE_T) :: heap_size !< Heap size - END TYPE H5_ih_info_t - !> @brief H5F_info_t_sohm derived type. TYPE, BIND(C) :: H5F_info_sohm_t INTEGER(C_INT) :: version !< Version # of shared object header info diff --git a/fortran/src/H5Lff.F90 b/fortran/src/H5Lff.F90 index bedfb8c6acc..004e5b23fdd 100644 --- a/fortran/src/H5Lff.F90 +++ b/fortran/src/H5Lff.F90 @@ -753,6 +753,9 @@ END FUNCTION H5Lexists hdferr = 0 IF(link_exists_c.LT.0_C_INT) hdferr = -1 + hdferr = 0 + IF(link_exists_c.LT.0) hdferr = -1 + END SUBROUTINE h5lexists_f !> diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90 index 4a0a1632e78..9c8b09141b8 100644 --- a/fortran/src/H5Off.F90 +++ b/fortran/src/H5Off.F90 @@ -110,12 +110,6 @@ MODULE H5O TYPE(mesg_t) :: mesg END TYPE c_hdr_t -!> @brief Extra metadata storage for obj & attributes - TYPE, BIND(C) :: H5_ih_info_t - INTEGER(hsize_t) :: index_size !< btree and/or list - INTEGER(hsize_t) :: heap_size !< heap - END TYPE H5_ih_info_t - !> @brief meta_size_t derived type TYPE, BIND(C) :: meta_size_t TYPE(H5_ih_info_t) :: obj !< v1/v2 B-tree & local/fractal heap for groups, B-tree for chunked datasets @@ -1269,7 +1263,7 @@ SUBROUTINE h5oget_info_by_idx_f(loc_id, group_name, index_field, order, n, & INTERFACE INTEGER FUNCTION h5oget_info_by_idx_c(loc_id, group_name, namelen, & index_field, order, n, lapl_id_default, object_info, fields) BIND(C, NAME='h5oget_info_by_idx_c') - IMPORT :: c_char, c_ptr, c_funptr + IMPORT :: c_char, c_ptr IMPORT :: HID_T, SIZE_T, HSIZE_T INTEGER(HID_T) , INTENT(IN) :: loc_id CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: group_name diff --git a/fortran/src/H5Pf.c b/fortran/src/H5Pf.c index 3a97d7eab4a..87e6bfbebe6 100644 --- a/fortran/src/H5Pf.c +++ b/fortran/src/H5Pf.c @@ -69,30 +69,6 @@ h5pcreate_c(hid_t_f *cls, hid_t_f *prp_id) return ret_value; } -/****if* H5Pf/h5pclose_c - * NAME - * h5pclose_c - * PURPOSE - * Call H5Pclose to close property lis - * INPUTS - * prp_id - identifier of the property list to be closed - * RETURNS - * 0 on success, -1 on failure - * SOURCE - */ - -int_f -h5pclose_c(hid_t_f *prp_id) -/******/ -{ - int_f ret_value = 0; - - if (H5Pclose((hid_t)*prp_id) < 0) - ret_value = -1; - - return ret_value; -} - /****if* H5Pf/h5pcopy_c * NAME * h5pcopy_c @@ -2252,52 +2228,6 @@ h5pget_hyper_vector_size_c(hid_t_f *prp_id, size_t_f *size) return ret_value; } -/****if* H5Pf/h5pcreate_class_c - * NAME - * h5pcreate_class_c - * PURPOSE - * Call H5Pcreate_class ito create a new property class - * INPUTS - * parent - property list class identifier - * name - name of the new class - * name_len - length of the "name" buffer - * OUTPUTS - * class - new class identifier - * RETURNS - * 0 on success, -1 on failure - * SOURCE - */ -int_f -h5pcreate_class_c(hid_t_f *parent, _fcd name, int_f *name_len, hid_t_f *cls, H5P_cls_create_func_t create, - void *create_data, H5P_cls_copy_func_t copy, void *copy_data, H5P_cls_close_func_t close, - void *close_data) -/******/ -{ - int ret_value = -1; - hid_t c_class; - char *c_name; - - c_name = (char *)HD5f2cstring(name, (size_t)*name_len); - if (c_name == NULL) - goto DONE; - - /* - * Call H5Pcreate_class function. - */ - c_class = - H5Pcreate_class((hid_t)*parent, c_name, create, create_data, copy, copy_data, close, close_data); - - if (c_class < 0) - goto DONE; - *cls = (hid_t_f)c_class; - ret_value = 0; - -DONE: - if (c_name != NULL) - free(c_name); - return ret_value; -} - /****if* H5Pf/h5pregister_c * NAME * h5pregister_c diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90 index bbc7a9dc066..576509534ae 100644 --- a/fortran/src/H5Pff.F90 +++ b/fortran/src/H5Pff.F90 @@ -400,15 +400,16 @@ SUBROUTINE h5pclose_f(prp_id, hdferr) INTEGER(HID_T), INTENT(IN) :: prp_id INTEGER, INTENT(OUT) :: hdferr INTERFACE - INTEGER FUNCTION h5pclose_c(prp_id) & - BIND(C,NAME='h5pclose_c') + INTEGER(C_INT) FUNCTION H5Pclose(prp_id) & + BIND(C,NAME='H5Pclose') + IMPORT :: C_INT IMPORT :: HID_T IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: prp_id - END FUNCTION h5pclose_c + INTEGER(HID_T), VALUE :: prp_id + END FUNCTION H5Pclose END INTERFACE - hdferr = h5pclose_c(prp_id) + hdferr = INT(H5Pclose(prp_id)) END SUBROUTINE h5pclose_f !> @@ -5005,31 +5006,32 @@ SUBROUTINE h5pcreate_class_f(parent, name, class, hdferr, create, create_data, & INTEGER , INTENT(OUT) :: hdferr TYPE(C_PTR) , OPTIONAL, INTENT(IN) :: create_data, copy_data, close_data TYPE(C_FUNPTR) , OPTIONAL, INTENT(IN) :: create, copy, close - INTEGER :: name_len - TYPE(C_PTR) :: create_data_default, copy_data_default, close_data_default + TYPE(C_PTR) :: create_data_default, copy_data_default, close_data_default TYPE(C_FUNPTR) :: create_default, copy_default, close_default + + CHARACTER(LEN=LEN_TRIM(name)+1,KIND=C_CHAR) :: c_name + INTERFACE - INTEGER FUNCTION h5pcreate_class_c(parent, name, name_len, class, & + INTEGER(HID_T) FUNCTION H5Pcreate_class(parent, name, & create, create_data, copy, copy_data, close, close_data) & - BIND(C, NAME='h5pcreate_class_c') - IMPORT :: c_char, c_ptr, c_funptr + BIND(C, NAME='H5Pcreate_class') + IMPORT :: C_CHAR, C_PTR, C_FUNPTR IMPORT :: HID_T - INTEGER(HID_T), INTENT(IN) :: parent - CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(IN) :: name - INTEGER, INTENT(IN) :: name_len - INTEGER(HID_T), INTENT(OUT) :: class - TYPE(C_PTR), VALUE :: create_data, copy_data, close_data - TYPE(C_FUNPTR), VALUE :: create, copy, close - END FUNCTION h5pcreate_class_c + INTEGER(HID_T), VALUE :: parent + CHARACTER(KIND=C_CHAR), DIMENSION(*) :: name + TYPE(C_PTR), VALUE :: create_data, copy_data, close_data + TYPE(C_FUNPTR), VALUE :: create, copy, close + END FUNCTION H5Pcreate_class END INTERFACE - name_len = LEN(name) - create_default = c_null_funptr !fix:scot - create_data_default = c_null_ptr - copy_default = c_null_funptr !fix:scot - copy_data_default = c_null_ptr - close_default = c_null_funptr !fix:scot - close_data_default = c_null_ptr + c_name = TRIM(name)//C_NULL_CHAR + + create_default = C_NULL_FUNPTR + create_data_default = C_NULL_PTR + copy_default = C_NULL_FUNPTR + copy_data_default = C_NULL_PTR + close_default = C_NULL_FUNPTR + close_data_default = C_NULL_PTR IF(PRESENT(create)) create_default = create IF(PRESENT(create_data)) create_data_default = create_data @@ -5038,11 +5040,14 @@ END FUNCTION h5pcreate_class_c IF(PRESENT(close)) close_default = close IF(PRESENT(close_data)) close_data_default = close_data - hdferr = h5pcreate_class_c(parent, name , name_len, class, & + class = H5Pcreate_class(parent, c_name, & create_default, create_data_default, & copy_default, copy_data_default, & close_default, close_data_default) + hdferr = 0 + IF(class.LT.0) hdferr = -1 + END SUBROUTINE h5pcreate_class_f !> @@ -6405,7 +6410,7 @@ END SUBROUTINE H5Pset_file_space_strategy_f !! \brief Gets the file space handling strategy and persisting free-space values for a file creation property list. !! !! \param plist_id File creation property list identifier -!! \param strategy The file space handling strategy to be used. +!! \param strategy The file space handling strategy to be used !! \param persist Indicate whether free space should be persistent or not !! \param threshold The free-space section size threshold value !! \param hdferr \fortran_error @@ -6507,6 +6512,42 @@ END FUNCTION H5Pget_file_space_page_size hdferr = INT(h5pget_file_space_page_size(prp_id, fsp_size)) END SUBROUTINE h5pget_file_space_page_size_f +!> +!! \ingroup FH5P +!! +!! \brief Retrieves the type(s) of I/O that HDF5 actually performed on raw data +!! during the last I/O call. +!! +!! \param plist_id File creation property list identifier +!! \param actual_selection_io_mode A bitwise set value indicating the type(s) of I/O performed +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5Pget_actual_selection_io_mode() +!! + SUBROUTINE h5pget_actual_selection_io_mode_f(plist_id, actual_selection_io_mode, hdferr) + + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: plist_id + INTEGER , INTENT(OUT) :: actual_selection_io_mode + INTEGER , INTENT(OUT) :: hdferr + + INTEGER(C_INT32_T) :: c_actual_selection_io_mode + + INTERFACE + INTEGER(C_INT) FUNCTION H5Pget_actual_selection_io_mode(plist_id, actual_selection_io_mode) & + BIND(C, NAME='H5Pget_actual_selection_io_mode') + IMPORT :: HID_T, C_INT32_T, C_INT + IMPLICIT NONE + INTEGER(HID_T), VALUE :: plist_id + INTEGER(C_INT32_T) :: actual_selection_io_mode + END FUNCTION H5Pget_actual_selection_io_mode + END INTERFACE + + hdferr = INT(H5Pget_actual_selection_io_mode(plist_id, c_actual_selection_io_mode)) + + actual_selection_io_mode = INT(c_actual_selection_io_mode) + + END SUBROUTINE h5pget_actual_selection_io_mode_f END MODULE H5P diff --git a/fortran/src/H5VLff.F90 b/fortran/src/H5VLff.F90 index 5a1fa9fd3b5..4467a59b171 100644 --- a/fortran/src/H5VLff.F90 +++ b/fortran/src/H5VLff.F90 @@ -401,4 +401,68 @@ END FUNCTION H5VLunregister_connector END SUBROUTINE H5VLunregister_connector_f +!> +!! \ingroup FH5VL +!! +!! \brief Retrieves the token representation from an address for a location identifier. +!! +!! \param loc_id Specifies a location identifier +!! \param addr Address for object in the file +!! \param token Token representing the object in the file +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5VLnative_addr_to_token() +!! + SUBROUTINE h5vlnative_addr_to_token_f(loc_id, addr, token, hdferr) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: loc_id + INTEGER(HADDR_T) , INTENT(IN) :: addr + TYPE(H5O_TOKEN_T_F), INTENT(OUT) :: token + INTEGER , INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5VLnative_addr_to_token(loc_id, addr, token) BIND(C, NAME='H5VLnative_addr_to_token') + IMPORT :: HID_T, C_INT, HADDR_T, H5O_TOKEN_T_F + INTEGER(HID_T) , VALUE :: loc_id + INTEGER(HADDR_T), VALUE :: addr + TYPE(H5O_TOKEN_T_F) :: token + END FUNCTION H5VLnative_addr_to_token + END INTERFACE + + hdferr = INT(H5VLnative_addr_to_token(loc_id, addr, token)) + + END SUBROUTINE h5vlnative_addr_to_token_f + +!> +!! \ingroup FH5VL +!! +!! \brief Retrieves the object address from a token representation for a location identifier. +!! +!! \param loc_id Specifies a location identifier +!! \param token Token representing the object in the file +!! \param addr Address for object in the file +!! \param hdferr \fortran_error +!! +!! See C API: @ref H5VLnative_token_to_addr() +!! + SUBROUTINE h5vlnative_token_to_addr_f(loc_id, token, addr, hdferr) + IMPLICIT NONE + INTEGER(HID_T) , INTENT(IN) :: loc_id + TYPE(H5O_TOKEN_T_F), INTENT(IN) :: token + INTEGER(HADDR_T) , INTENT(OUT) :: addr + INTEGER , INTENT(OUT) :: hdferr + + INTERFACE + INTEGER(C_INT) FUNCTION H5VLnative_token_to_addr(loc_id, token, addr) BIND(C, NAME='H5VLnative_token_to_addr') + IMPORT :: HID_T, C_INT, HADDR_T, H5O_TOKEN_T_F + INTEGER(HID_T) , VALUE :: loc_id + TYPE(H5O_TOKEN_T_F), VALUE :: token + INTEGER(HADDR_T) :: addr + END FUNCTION H5VLnative_token_to_addr + END INTERFACE + + hdferr = INT(H5VLnative_token_to_addr(loc_id, token, addr)) + + END SUBROUTINE h5vlnative_token_to_addr_f + END MODULE H5VL diff --git a/fortran/src/H5_f.c b/fortran/src/H5_f.c index 181047b5454..0392c2bdfa6 100644 --- a/fortran/src/H5_f.c +++ b/fortran/src/H5_f.c @@ -477,6 +477,10 @@ h5init_flags_c(int_f *h5d_flags, size_t_f *h5d_size_flags, int_f *h5e_flags, hid h5d_flags[55] = (int_f)H5D_MPIO_LINK_CHUNK; h5d_flags[56] = (int_f)H5D_MPIO_MULTI_CHUNK; + h5d_flags[57] = (int_f)H5D_SCALAR_IO; + h5d_flags[58] = (int_f)H5D_VECTOR_IO; + h5d_flags[59] = (int_f)H5D_SELECTION_IO; + /* * H5E flags */ diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90 index 68b3dd874bb..53156731946 100644 --- a/fortran/src/H5_ff.F90 +++ b/fortran/src/H5_ff.F90 @@ -74,7 +74,7 @@ MODULE H5LIB ! ! H5D flags declaration ! - INTEGER, PARAMETER :: H5D_FLAGS_LEN = 57 + INTEGER, PARAMETER :: H5D_FLAGS_LEN = 60 INTEGER, DIMENSION(1:H5D_FLAGS_LEN) :: H5D_flags INTEGER, PARAMETER :: H5D_SIZE_FLAGS_LEN = 2 INTEGER(SIZE_T), DIMENSION(1:H5D_SIZE_FLAGS_LEN) :: H5D_size_flags @@ -467,6 +467,9 @@ END FUNCTION h5init1_flags_c H5D_MPIO_NO_CHUNK_OPTIMIZATION_F = H5D_flags(55) H5D_MPIO_LINK_CHUNK_F = H5D_flags(56) H5D_MPIO_MULTI_CHUNK_F = H5D_flags(57) + H5D_SCALAR_IO_F = H5D_flags(58) + H5D_VECTOR_IO_F = H5D_flags(59) + H5D_SELECTION_IO_F = H5D_flags(60) H5D_CHUNK_CACHE_NSLOTS_DFLT_F = H5D_size_flags(1) H5D_CHUNK_CACHE_NBYTES_DFLT_F = H5D_size_flags(2) diff --git a/fortran/src/H5config_f.inc.cmake b/fortran/src/H5config_f.inc.cmake index 34fb091c787..71bce0e18c2 100644 --- a/fortran/src/H5config_f.inc.cmake +++ b/fortran/src/H5config_f.inc.cmake @@ -23,6 +23,12 @@ #undef H5_HAVE_SUBFILING_VFD #endif +! Define if on APPLE +#cmakedefine01 H5_HAVE_DARWIN +#if H5_HAVE_DARWIN == 0 +#undef H5_HAVE_DARWIN +#endif + ! Define if the intrinsic function STORAGE_SIZE exists #define H5_FORTRAN_HAVE_STORAGE_SIZE @H5_FORTRAN_HAVE_STORAGE_SIZE@ @@ -81,4 +87,4 @@ #cmakedefine01 H5_NO_DEPRECATED_SYMBOLS #if H5_NO_DEPRECATED_SYMBOLS == 0 #undef H5_NO_DEPRECATED_SYMBOLS -#endif \ No newline at end of file +#endif diff --git a/fortran/src/H5config_f.inc.in b/fortran/src/H5config_f.inc.in index 7fb76e12449..991e4b0750b 100644 --- a/fortran/src/H5config_f.inc.in +++ b/fortran/src/H5config_f.inc.in @@ -20,6 +20,9 @@ ! Define if we have subfiling support #undef HAVE_SUBFILING_VFD +! Define if on APPLE +#undef HAVE_DARWIN + ! Define if the intrinsic function STORAGE_SIZE exists #undef FORTRAN_HAVE_STORAGE_SIZE diff --git a/fortran/src/H5f90global.F90 b/fortran/src/H5f90global.F90 index e60f1e83320..aa046235eb3 100644 --- a/fortran/src/H5f90global.F90 +++ b/fortran/src/H5f90global.F90 @@ -25,6 +25,12 @@ MODULE H5GLOBAL IMPLICIT NONE +!> @brief H5_ih_info_t derived type. + TYPE, BIND(C) :: H5_ih_info_t + INTEGER(HSIZE_T) :: index_size !< btree and/or list + INTEGER(HSIZE_T) :: heap_size !< Heap size + END TYPE H5_ih_info_t + !> \addtogroup FH5 !> @{ ! Parameters used in the function 'h5kind_to_type' located in H5_ff.F90. @@ -368,6 +374,12 @@ MODULE H5GLOBAL !DEC$ATTRIBUTES DLLEXPORT :: H5D_MPIO_NO_CHUNK_OPTIMIZATION_F !DEC$ATTRIBUTES DLLEXPORT :: H5D_MPIO_LINK_CHUNK_F !DEC$ATTRIBUTES DLLEXPORT :: H5D_MPIO_MULTI_CHUNK_F + + !DEC$ATTRIBUTES DLLEXPORT :: H5D_SCALAR_IO_F + !DEC$ATTRIBUTES DLLEXPORT :: H5D_VECTOR_IO_F + !DEC$ATTRIBUTES DLLEXPORT :: H5D_SELECTION_IO_F + + !DEC$endif !> \addtogroup FH5D !> @{ @@ -444,6 +456,10 @@ MODULE H5GLOBAL INTEGER :: H5D_MPIO_NO_CHUNK_OPTIMIZATION_F !< H5D_MPIO_NO_CHUNK_OPTIMIZATION INTEGER :: H5D_MPIO_LINK_CHUNK_F !< H5D_MPIO_LINK_CHUNK INTEGER :: H5D_MPIO_MULTI_CHUNK_F !< H5D_MPIO_MULTI_CHUNK + + INTEGER :: H5D_SCALAR_IO_F !< Scalar (or legacy MPIO) I/O was performed + INTEGER :: H5D_VECTOR_IO_F !< Vector I/O was performed + INTEGER :: H5D_SELECTION_IO_F !< Selection I/O was performed ! ! H5E flags declaration ! diff --git a/fortran/src/H5f90proto.h b/fortran/src/H5f90proto.h index 28a4fa66e7b..0fe1b2017a1 100644 --- a/fortran/src/H5f90proto.h +++ b/fortran/src/H5f90proto.h @@ -368,7 +368,6 @@ H5_FCDLL int_f h5otoken_cmp_c(hid_t_f *loc_id, H5O_token_t *token1, H5O_token_t * Functions from H5Pf.c */ H5_FCDLL int_f h5pcreate_c(hid_t_f *cls, hid_t_f *prp_id); -H5_FCDLL int_f h5pclose_c(hid_t_f *prp_id); H5_FCDLL int_f h5pcopy_c(hid_t_f *prp_id, hid_t_f *new_prp_id); H5_FCDLL int_f h5pequal_c(hid_t_f *plist1_id, hid_t_f *plist2_id, int_f *c_flag); H5_FCDLL int_f h5pget_class_c(hid_t_f *prp_id, hid_t_f *classtype); @@ -451,9 +450,6 @@ H5_FCDLL int_f h5pset_small_data_block_size_c(hid_t_f *plist, hsize_t_f *size); H5_FCDLL int_f h5pget_small_data_block_size_c(hid_t_f *plist, hsize_t_f *size); H5_FCDLL int_f h5pset_hyper_vector_size_c(hid_t_f *plist, size_t_f *size); H5_FCDLL int_f h5pget_hyper_vector_size_c(hid_t_f *plist, size_t_f *size); -H5_FCDLL int_f h5pcreate_class_c(hid_t_f *parent, _fcd name, int_f *name_len, hid_t_f *cls, - H5P_cls_create_func_t create, void *create_data, H5P_cls_copy_func_t copy, - void *copy_data, H5P_cls_close_func_t close, void *close_data); H5_FCDLL int_f h5pregister_c(hid_t_f *cls, _fcd name, int_f *name_len, size_t_f *size, void *value); H5_FCDLL int_f h5pinsert_c(hid_t_f *plist, _fcd name, int_f *name_len, size_t_f *size, void *value); H5_FCDLL int_f h5pset_c(hid_t_f *prp_id, _fcd name, int_f *name_len, void *value); diff --git a/fortran/src/h5fc.in b/fortran/src/h5fc.in index b793648d854..c5da815f3f6 100644 --- a/fortran/src/h5fc.in +++ b/fortran/src/h5fc.in @@ -110,15 +110,15 @@ usage() { # A wonderfully informative "usage" message. echo "usage: $prog_name [OPTIONS] " echo " OPTIONS:" - echo " -help This help message." - echo " -echo Show all the shell commands executed" - echo " -prefix=DIR Prefix directory to find HDF5 lib/ and include/" - echo " subdirectories [default: $prefix]" - echo " -show Show the commands without executing them" - echo " -showconfig Show the HDF5 library configuration summary" - echo " -shlib Compile with shared HDF5 libraries [default for hdf5 built" + echo " -help | --help | -h This help message." + echo " -echo Show all the shell commands executed" + echo " -prefix=DIR Prefix directory to find HDF5 lib/ and include/" + echo " subdirectories [default: $prefix]" + echo " -show Show the commands without executing them" + echo " -showconfig Show the HDF5 library configuration summary" + echo " -shlib Compile with shared HDF5 libraries [default for hdf5 built" echo " without static libraries]" - echo " -noshlib Compile with static HDF5 libraries [default for hdf5 built" + echo " -noshlib Compile with static HDF5 libraries [default for hdf5 built" echo " with static libraries]" echo " " echo " - the normal compile line options for your compiler." @@ -230,6 +230,12 @@ for arg in $@ ; do -help) usage ;; + --help) + usage + ;; + -h) + usage + ;; *\"*) qarg="'"$arg"'" allargs="$allargs $qarg" diff --git a/fortran/src/hdf5_fortrandll.def.in b/fortran/src/hdf5_fortrandll.def.in index 3b6600c061e..a8399a38ca7 100644 --- a/fortran/src/hdf5_fortrandll.def.in +++ b/fortran/src/hdf5_fortrandll.def.in @@ -258,7 +258,6 @@ H5P_mp_H5PSET_PRESERVE_F H5P_mp_H5PGET_PRESERVE_F H5P_mp_H5PGET_CLASS_F H5P_mp_H5PCOPY_F -H5P_mp_H5PCLOSE_F H5P_mp_H5PSET_CHUNK_F H5P_mp_H5PGET_CHUNK_F H5P_mp_H5PSET_DEFLATE_F @@ -331,7 +330,6 @@ H5P_mp_H5PCOPY_PROP_F H5P_mp_H5PREMOVE_F H5P_mp_H5PUNREGISTER_F H5P_mp_H5PCLOSE_CLASS_F -H5P_mp_H5PCREATE_CLASS_F H5P_mp_H5PREGISTER_INTEGER H5P_mp_H5PREGISTER_CHAR H5P_mp_H5PINSERT_CHAR @@ -417,6 +415,7 @@ H5P_mp_H5PSET_FILE_SPACE_STRATEGY_F H5P_mp_H5PGET_FILE_SPACE_STRATEGY_F H5P_mp_H5PSET_FILE_SPACE_PAGE_SIZE_F H5P_mp_H5PGET_FILE_SPACE_PAGE_SIZE_F +H5P_mp_H5PGET_ACTUAL_SELECTION_IO_MODE_F ; Parallel @H5_NOPAREXP@H5P_mp_H5PSET_FAPL_MPIO_F @H5_NOPAREXP@H5P_mp_H5PGET_FAPL_MPIO_F @@ -554,6 +553,8 @@ H5VL_mp_H5VLGET_CONNECTOR_ID_BY_VALUE_F H5VL_mp_H5VLGET_CONNECTOR_NAME_F H5VL_mp_H5VLCLOSE_F H5VL_mp_H5VLUNREGISTER_CONNECTOR_F +H5VL_mp_H5VLNATIVE_ADDR_TO_TOKEN_F +H5VL_mp_H5VLNATIVE_TOKEN_TO_ADDR_F ; H5Z H5Z_mp_H5ZUNREGISTER_F H5Z_mp_H5ZFILTER_AVAIL_F diff --git a/fortran/test/tH5D.F90 b/fortran/test/tH5D.F90 index c38123518fe..fc774144f14 100644 --- a/fortran/test/tH5D.F90 +++ b/fortran/test/tH5D.F90 @@ -38,6 +38,7 @@ MODULE TH5D USE HDF5 ! This module contains all necessary modules USE TH5_MISC USE TH5_MISC_GEN + USE ISO_C_BINDING CONTAINS SUBROUTINE datasettest(cleanup, total_error) @@ -514,8 +515,6 @@ END SUBROUTINE extenddsettest SUBROUTINE test_userblock_offset(cleanup, total_error) - USE ISO_C_BINDING - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(OUT) :: total_error @@ -631,8 +630,6 @@ END SUBROUTINE test_userblock_offset SUBROUTINE test_dset_fill(cleanup, total_error) - USE ISO_C_BINDING - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(OUT) :: total_error @@ -640,7 +637,6 @@ SUBROUTINE test_dset_fill(cleanup, total_error) INTEGER, PARAMETER :: DIM0=10 INTEGER, PARAMETER :: int_kind_1 = SELECTED_INT_KIND(2) !should map to INTEGER*1 on most modern processors INTEGER, PARAMETER :: int_kind_4 = SELECTED_INT_KIND(4) !should map to INTEGER*2 on most modern processors - INTEGER, PARAMETER :: int_kind_8 = SELECTED_INT_KIND(9) !should map to INTEGER*4 on most modern processors INTEGER, PARAMETER :: int_kind_16 = SELECTED_INT_KIND(18) !should map to INTEGER*8 on most modern processors INTEGER(KIND=int_kind_1) , DIMENSION(1:DIM0), TARGET :: data_i1 INTEGER(KIND=int_kind_4) , DIMENSION(1:DIM0), TARGET :: data_i4 @@ -991,8 +987,6 @@ END SUBROUTINE test_dset_fill SUBROUTINE test_direct_chunk_io(cleanup, total_error) - USE ISO_C_BINDING - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup diff --git a/fortran/test/tH5E_F03.F90 b/fortran/test/tH5E_F03.F90 index 2d8dd331bfe..c2bf74be061 100644 --- a/fortran/test/tH5E_F03.F90 +++ b/fortran/test/tH5E_F03.F90 @@ -92,17 +92,15 @@ END FUNCTION my_hdf5_error_handler_nodata END MODULE test_my_hdf5_error_handler - - MODULE TH5E_F03 + USE ISO_C_BINDING + USE test_my_hdf5_error_handler + CONTAINS SUBROUTINE test_error(total_error) - USE ISO_C_BINDING - USE test_my_hdf5_error_handler - IMPLICIT NONE INTEGER(hid_t), PARAMETER :: FAKE_ID = -1 diff --git a/fortran/test/tH5F.F90 b/fortran/test/tH5F.F90 index a5b67acac9a..b4d973e6844 100644 --- a/fortran/test/tH5F.F90 +++ b/fortran/test/tH5F.F90 @@ -44,8 +44,6 @@ MODULE TH5F CONTAINS SUBROUTINE h5openclose(total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error @@ -141,8 +139,6 @@ SUBROUTINE h5openclose(total_error) END SUBROUTINE h5openclose SUBROUTINE mountingtest(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -502,8 +498,6 @@ END SUBROUTINE mountingtest ! SUBROUTINE reopentest(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -690,8 +684,6 @@ END SUBROUTINE reopentest ! correct output for a given obj_id and filename. ! SUBROUTINE check_get_name(obj_id, fix_filename, len_filename, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE INTEGER(HID_T) :: obj_id ! Object identifier CHARACTER(LEN=80), INTENT(IN) :: fix_filename ! Expected filename @@ -780,8 +772,6 @@ END SUBROUTINE check_get_name ! SUBROUTINE get_name_test(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -846,8 +836,6 @@ END SUBROUTINE get_name_test ! created using the got property lists SUBROUTINE plisttest(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -946,8 +934,6 @@ END SUBROUTINE plisttest ! SUBROUTINE file_close(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error @@ -1075,8 +1061,6 @@ END SUBROUTINE file_close ! SUBROUTINE file_space(filename, cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE CHARACTER(*), INTENT(IN) :: filename LOGICAL, INTENT(IN) :: cleanup @@ -1168,8 +1152,6 @@ END SUBROUTINE file_space ! SUBROUTINE test_file_info(filename, cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE CHARACTER(*), INTENT(IN) :: filename LOGICAL, INTENT(IN) :: cleanup diff --git a/fortran/test/tH5G.F90 b/fortran/test/tH5G.F90 index 83e0101bd3b..36c51df54a1 100644 --- a/fortran/test/tH5G.F90 +++ b/fortran/test/tH5G.F90 @@ -26,6 +26,9 @@ MODULE TH5G + USE HDF5 ! This module contains all necessary modules + USE TH5_MISC + CONTAINS SUBROUTINE group_test(cleanup, total_error) @@ -35,9 +38,6 @@ SUBROUTINE group_test(cleanup, total_error) ! h5glink(2)_f, h5gunlink_f, h5gmove(2)_f, h5gget_linkval_f, h5gset_comment_f, ! h5gget_comment_f - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error diff --git a/fortran/test/tH5O_F03.F90 b/fortran/test/tH5O_F03.F90 index e608a30d65c..a4cf282ecca 100644 --- a/fortran/test/tH5O_F03.F90 +++ b/fortran/test/tH5O_F03.F90 @@ -73,6 +73,8 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie INTEGER :: cmp_value INTEGER :: i INTEGER :: ierr + INTEGER(HADDR_T) :: addr + TYPE(H5O_TOKEN_T_F) :: token status = 0 @@ -82,7 +84,7 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie RETURN ENDIF token_c%token = oinfo_c%token%token - CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr); + CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr) IF( (ierr .EQ. -1) .OR. (cmp_value .NE. 0) ) THEN status = -1 RETURN @@ -96,6 +98,22 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie RETURN ENDIF + CALL h5vlnative_token_to_addr_f(loc_id, oinfo_f%token, addr, ierr) + IF( ierr .EQ. -1) THEN + status = -1 + RETURN + ENDIF + CALL h5vlnative_addr_to_token_f(loc_id, addr, token, ierr) + IF( ierr .EQ. -1) THEN + status = -1 + RETURN + ENDIF + CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token, cmp_value, ierr) + IF( (ierr .EQ. -1) .OR. (cmp_value .NE. 0) ) THEN + status = -1 + RETURN + ENDIF + ENDIF IF((field .EQ. H5O_INFO_TIME_F).OR.(field .EQ. H5O_INFO_ALL_F))THEN @@ -132,7 +150,7 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie status = 0 IF( oinfo_c%fileno .NE. oinfo_f%fileno) status = status + 1 token_c%token = oinfo_c%token%token - CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr); + CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr) IF( (ierr .EQ. -1) .OR. (cmp_value .NE. 0) ) THEN status = -1 RETURN @@ -156,7 +174,7 @@ INTEGER FUNCTION compare_h5o_info_t( loc_id, oinfo_f, oinfo_c, field, full_f_fie status = 0 IF( oinfo_c%fileno .NE. oinfo_f%fileno) status = status + 1 token_c%token = oinfo_c%token%token - CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr); + CALL H5Otoken_cmp_f(loc_id, oinfo_f%token, token_c, cmp_value, ierr) IF( (ierr .EQ. -1) .OR. (cmp_value .NE. 0) ) THEN status = -1 RETURN @@ -234,25 +252,24 @@ INTEGER FUNCTION visit_obj_cb( group_id, name, oinfo_c, op_data) bind(C) ENDIF ! Check H5Oget_info_by_name_f; if partial field values were filled correctly - CALL H5Oget_info_by_name_f(group_id, name2, oinfo_f, ierr); + CALL H5Oget_info_by_name_f(group_id, name2, oinfo_f, ierr) visit_obj_cb = compare_h5o_info_t( group_id, oinfo_f, oinfo_c, op_data%field, .TRUE. ) IF(visit_obj_cb.EQ.-1) RETURN ! Check H5Oget_info_by_name_f, only check field values - CALL H5Oget_info_by_name_f(group_id, name2, oinfo_f, ierr, fields = op_data%field); + CALL H5Oget_info_by_name_f(group_id, name2, oinfo_f, ierr, fields = op_data%field) visit_obj_cb = compare_h5o_info_t(group_id, oinfo_f, oinfo_c, op_data%field, .FALSE. ) IF(visit_obj_cb.EQ.-1) RETURN - IF(op_data%idx.EQ.1)THEN ! Check H5Oget_info_f, only check field values - CALL H5Oget_info_f(group_id, oinfo_f, ierr, fields = op_data%field); + CALL H5Oget_info_f(group_id, oinfo_f, ierr, fields = op_data%field) visit_obj_cb = compare_h5o_info_t(group_id, oinfo_f, oinfo_c, op_data%field, .FALSE. ) IF(visit_obj_cb.EQ.-1) RETURN ! Check H5Oget_info_f; if partial field values where filled correctly - CALL H5Oget_info_f(group_id, oinfo_f, ierr); + CALL H5Oget_info_f(group_id, oinfo_f, ierr) visit_obj_cb = compare_h5o_info_t(group_id, oinfo_f, oinfo_c, op_data%field, .TRUE. ) IF(visit_obj_cb.EQ.-1) RETURN @@ -267,6 +284,10 @@ END MODULE visit_cb MODULE TH5O_F03 + USE HDF5 + USE TH5_MISC + USE ISO_C_BINDING + CONTAINS !*************************************************************** !** @@ -276,9 +297,6 @@ MODULE TH5O_F03 SUBROUTINE test_h5o_refcount(total_error) - USE HDF5 - USE TH5_MISC - USE ISO_C_BINDING IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error @@ -415,11 +433,8 @@ END SUBROUTINE test_h5o_refcount SUBROUTINE test_obj_visit(total_error) - USE HDF5 - USE TH5_MISC - USE visit_cb - USE ISO_C_BINDING + IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error @@ -553,9 +568,6 @@ END SUBROUTINE test_obj_visit SUBROUTINE test_obj_info(total_error) - USE HDF5 - USE TH5_MISC - USE ISO_C_BINDING IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error @@ -682,7 +694,6 @@ SUBROUTINE test_obj_info(total_error) CALL check("h5oget_info_by_idx_f", -1, total_error) ENDIF - ! Close objects CALL h5dclose_f(did, error) CALL check("h5dclose_f", error, total_error) @@ -702,8 +713,6 @@ END SUBROUTINE test_obj_info SUBROUTINE build_visit_file(fid) - USE HDF5 - USE TH5_MISC IMPLICIT NONE INTEGER(hid_t) :: fid ! File ID diff --git a/fortran/test/tH5P.F90 b/fortran/test/tH5P.F90 index c73016bc7cf..78d665f0aa1 100644 --- a/fortran/test/tH5P.F90 +++ b/fortran/test/tH5P.F90 @@ -869,6 +869,7 @@ SUBROUTINE test_in_place_conversion(cleanup, total_error) REAL(KIND=C_DOUBLE), DIMENSION(1:array_len) :: wbuf_d_org REAL(KIND=C_FLOAT), DIMENSION(1:array_len), TARGET :: rbuf INTEGER :: i + INTEGER :: actual_selection_io_mode TYPE(C_PTR) :: f_ptr ! create the data @@ -919,6 +920,10 @@ SUBROUTINE test_in_place_conversion(cleanup, total_error) ! Should not be equal for in-place buffer use CALL VERIFY("h5dwrite_f -- in-place", wbuf_d(1), wbuf_d_org(1), total_error, .FALSE.) + CALL h5pget_actual_selection_io_mode_f(plist_id, actual_selection_io_mode, error) + CALL check("h5pget_actual_selection_io_mode_f", error, total_error) + CALL VERIFY("h5pget_actual_selection_io_mode_f", actual_selection_io_mode, H5D_SCALAR_IO_F, total_error) + f_ptr = C_LOC(rbuf) CALL h5dread_f(dset_id, h5kind_to_type(KIND(rbuf(1)), H5_REAL_KIND), f_ptr, error) CALL check("h5dread_f", error, total_error) diff --git a/fortran/test/tH5P_F03.F90 b/fortran/test/tH5P_F03.F90 index 24934eb3e05..c962d52821b 100644 --- a/fortran/test/tH5P_F03.F90 +++ b/fortran/test/tH5P_F03.F90 @@ -439,7 +439,6 @@ END SUBROUTINE test_genprop_class_callback SUBROUTINE test_h5p_file_image(total_error) - USE, INTRINSIC :: iso_c_binding IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error INTEGER(hid_t) :: fapl_1 = -1 @@ -653,7 +652,6 @@ END SUBROUTINE external_test_offset ! SUBROUTINE test_vds(total_error) - USE ISO_C_BINDING IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error diff --git a/fortran/test/tH5Sselect.F90 b/fortran/test/tH5Sselect.F90 index 6dfd7e69f06..b6d28d32e07 100644 --- a/fortran/test/tH5Sselect.F90 +++ b/fortran/test/tH5Sselect.F90 @@ -319,9 +319,6 @@ END SUBROUTINE test_select_hyperslab SUBROUTINE test_select_element(cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC - IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup INTEGER, INTENT(INOUT) :: total_error diff --git a/fortran/test/tH5T.F90 b/fortran/test/tH5T.F90 index 953d6d07020..a38cbeadf53 100644 --- a/fortran/test/tH5T.F90 +++ b/fortran/test/tH5T.F90 @@ -819,8 +819,6 @@ END SUBROUTINE basic_data_type_test SUBROUTINE enumtest(cleanup, total_error) - USE HDF5 - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(IN) :: cleanup diff --git a/fortran/test/tH5T_F03.F90 b/fortran/test/tH5T_F03.F90 index 661a0cc05f9..39845971644 100644 --- a/fortran/test/tH5T_F03.F90 +++ b/fortran/test/tH5T_F03.F90 @@ -3407,8 +3407,6 @@ SUBROUTINE multiple_dset_rw(total_error) ! Failure: number of errors !------------------------------------------------------------------------- ! - USE iso_c_binding - USE hdf5 IMPLICIT NONE INTEGER, INTENT(INOUT) :: total_error ! number of errors diff --git a/fortran/test/tH5Z.F90 b/fortran/test/tH5Z.F90 index 3ac51d6a977..c6ab3832d18 100644 --- a/fortran/test/tH5Z.F90 +++ b/fortran/test/tH5Z.F90 @@ -25,15 +25,15 @@ !***** MODULE TH5Z + USE HDF5 ! This module contains all necessary modules + USE TH5_MISC + CONTAINS SUBROUTINE filters_test(total_error) ! This subroutine tests following functionalities: h5zfilter_avail_f, h5zunregister_f - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC - IMPLICIT NONE INTEGER, INTENT(OUT) :: total_error LOGICAL :: status @@ -164,8 +164,6 @@ SUBROUTINE filters_test(total_error) END SUBROUTINE filters_test SUBROUTINE szip_test(szip_flag, cleanup, total_error) - USE HDF5 ! This module contains all necessary modules - USE TH5_MISC IMPLICIT NONE LOGICAL, INTENT(OUT) :: szip_flag diff --git a/fortran/test/vol_connector.F90 b/fortran/test/vol_connector.F90 index e2235f4bbbf..7394a31d6af 100644 --- a/fortran/test/vol_connector.F90 +++ b/fortran/test/vol_connector.F90 @@ -227,7 +227,6 @@ END MODULE VOL_TMOD PROGRAM vol_connector - USE HDF5 USE VOL_TMOD IMPLICIT NONE diff --git a/fortran/testpar/CMakeTests.cmake b/fortran/testpar/CMakeTests.cmake index 8c157241500..473049fb976 100644 --- a/fortran/testpar/CMakeTests.cmake +++ b/fortran/testpar/CMakeTests.cmake @@ -17,3 +17,4 @@ ############################################################################## add_test (NAME MPI_TEST_FORT_parallel_test COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $ ${MPIEXEC_POSTFLAGS}) add_test (NAME MPI_TEST_FORT_subfiling_test COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $ ${MPIEXEC_POSTFLAGS}) +add_test (NAME MPI_TEST_FORT_async_test COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $ ${MPIEXEC_POSTFLAGS}) diff --git a/fortran/testpar/Makefile.am b/fortran/testpar/Makefile.am index 7f9f2846928..1c374090601 100644 --- a/fortran/testpar/Makefile.am +++ b/fortran/testpar/Makefile.am @@ -36,7 +36,7 @@ TEST_PROG_PARA=parallel_test subfiling_test async_test check_PROGRAMS=$(TEST_PROG_PARA) # Temporary files -CHECK_CLEANFILES+=parf[12].h5 subf.h5* +CHECK_CLEANFILES+=parf[12].h5 h5*_tests.h5 subf.h5* *.mod # Test source files parallel_test_SOURCES=ptest.F90 hyper.F90 mdset.F90 multidsetrw.F90 diff --git a/fortran/testpar/async.F90 b/fortran/testpar/async.F90 index 88ecc3edecd..02045cb7d58 100644 --- a/fortran/testpar/async.F90 +++ b/fortran/testpar/async.F90 @@ -1240,10 +1240,6 @@ END MODULE test_async_APIs ! PROGRAM async_test USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_INT64_T - USE HDF5 - USE MPI - USE TH5_MISC - USE TH5_MISC_GEN USE test_async_APIs IMPLICIT NONE diff --git a/fortran/testpar/hyper.F90 b/fortran/testpar/hyper.F90 index edd93cf9b8f..ec3a657afbb 100644 --- a/fortran/testpar/hyper.F90 +++ b/fortran/testpar/hyper.F90 @@ -55,6 +55,7 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors) INTEGER :: local_no_collective_cause INTEGER :: global_no_collective_cause INTEGER :: no_selection_io_cause + INTEGER :: actual_selection_io_mode ! ! initialize the array data between the processes (3) @@ -236,6 +237,20 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors) CALL h5dwrite_f(dset_id,H5T_NATIVE_INTEGER,wbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id) CALL check("h5dwrite_f", hdferror, nerrors) + CALL h5pget_actual_selection_io_mode_f(dxpl_id, actual_selection_io_mode, hdferror) + CALL check("h5pget_actual_selection_io_mode_f", hdferror, nerrors) + IF(do_collective)THEN + IF(actual_selection_io_mode .NE. H5D_SELECTION_IO_F)THEN + PRINT*, "Incorrect actual selection io mode" + nerrors = nerrors + 1 + ENDIF + ELSE + IF(actual_selection_io_mode .NE. IOR(H5D_SELECTION_IO_F, H5D_SCALAR_IO_F))THEN + PRINT*, "Incorrect actual selection io mode" + nerrors = nerrors + 1 + ENDIF + ENDIF + ! Check h5pget_mpio_actual_io_mode_f function CALL h5pget_mpio_actual_io_mode_f(dxpl_id, actual_io_mode, hdferror) CALL check("h5pget_mpio_actual_io_mode_f", hdferror, nerrors) diff --git a/fortran/testpar/subfiling.F90 b/fortran/testpar/subfiling.F90 index 043ac6cb771..a677bea6121 100644 --- a/fortran/testpar/subfiling.F90 +++ b/fortran/testpar/subfiling.F90 @@ -54,6 +54,7 @@ PROGRAM subfiling_test INTEGER(HID_T) :: driver_id CHARACTER(len=8) :: hex1, hex2 + CHARACTER(len=1) :: arg ! ! initialize MPI @@ -336,10 +337,14 @@ PROGRAM subfiling_test WRITE(*,"(A,A)") "Failed to find the stub subfile ",TRIM(filename) nerrors = nerrors + 1 ENDIF - - CALL EXECUTE_COMMAND_LINE("stat --format='%i' "//filename//" >> tmp_inode", EXITSTAT=i) +#ifdef H5_HAVE_DARWIN + arg(1:1)="f" +#else + arg(1:1)="c" +#endif + CALL EXECUTE_COMMAND_LINE("stat -"//arg(1:1)//" %i "//filename//" >> tmp_inode", EXITSTAT=i) IF(i.ne.0)THEN - WRITE(*,"(A,A)") "Failed to stat the stub subfile ",TRIM(filename) + WRITE(*,"(A,A)") "Failed to stat the stub subfile ",TRIM(filename) nerrors = nerrors + 1 ENDIF diff --git a/hl/CMakeLists.txt b/hl/CMakeLists.txt index a777b72f540..45a9a22dded 100644 --- a/hl/CMakeLists.txt +++ b/hl/CMakeLists.txt @@ -26,6 +26,6 @@ if (HDF5_BUILD_EXAMPLES) endif () #-- Build the Unit testing if requested -if (BUILD_TESTING AND HDF5_TEST_SERIAL) +if (HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING AND HDF5_TEST_SERIAL) add_subdirectory (test) endif () diff --git a/hl/c++/examples/ptExampleFL.cpp b/hl/c++/examples/ptExampleFL.cpp index 0e280f0355b..cb407e21f65 100644 --- a/hl/c++/examples/ptExampleFL.cpp +++ b/hl/c++/examples/ptExampleFL.cpp @@ -72,7 +72,7 @@ main(void) if (err < 0) fprintf(stderr, "Error getting packet count."); - printf("Number of packets in packet table after five appends: %llu\n", count); + printf("Number of packets in packet table after five appends: %" PRIuHSIZE "\n", count); /* Initialize packet table's "current record" */ ptable.ResetIndex(); diff --git a/hl/fortran/src/H5TBfc.c b/hl/fortran/src/H5TBfc.c index 9c257777b2f..d339def6f66 100644 --- a/hl/fortran/src/H5TBfc.c +++ b/hl/fortran/src/H5TBfc.c @@ -92,10 +92,12 @@ h5tbmake_table_c(size_t_f *namelen1, _fcd name1, hid_t_f *loc_id, size_t_f *name /* * call H5TBmake_table function. */ + H5_GCC_CLANG_DIAG_OFF("cast-qual") if (H5TBmake_table(c_name1, (hid_t)*loc_id, c_name, c_nfields, (hsize_t)*nrecords, (size_t)*type_size, (const char **)c_field_names, c_field_offset, c_field_types, (hsize_t)*chunk_size, NULL, *compress, NULL) < 0) HGOTO_DONE(FAIL); + H5_GCC_CLANG_DIAG_ON("cast-qual") done: if (c_name) @@ -193,10 +195,12 @@ h5tbmake_table_ptr_c(size_t_f *namelen1, _fcd name1, hid_t_f *loc_id, size_t_f * /* * call H5TBmake_table function. */ + H5_GCC_CLANG_DIAG_OFF("cast-qual") if (H5TBmake_table(c_name1, (hid_t)*loc_id, c_name, c_nfields, (hsize_t)*nrecords, (size_t)*type_size, (const char **)c_field_names, c_field_offset, c_field_types, (hsize_t)*chunk_size, fill_data, *compress, data) < 0) HGOTO_DONE(FAIL); + H5_GCC_CLANG_DIAG_ON("cast-qual") done: if (c_name) diff --git a/hl/test/Makefile.am b/hl/test/Makefile.am index 1d1cb0f92fb..6f66291b25d 100644 --- a/hl/test/Makefile.am +++ b/hl/test/Makefile.am @@ -20,7 +20,7 @@ include $(top_srcdir)/config/commence.am # Add include directories to C preprocessor flags AM_CPPFLAGS+=-I. -I$(srcdir) -I$(top_builddir)/src -I$(top_srcdir)/src -I$(top_builddir)/test -I$(top_srcdir)/test -I$(top_srcdir)/hl/src -# The tests depend on the hdf5, hdf5 test, and hdf5_hl libraries +# The tests depend on the hdf5, hdf5 test, and hdf5_hl libraries LDADD=$(LIBH5_HL) $(LIBH5TEST) $(LIBHDF5) # Test programs. These are our main targets. They should be listed in the diff --git a/hl/test/test_table.c b/hl/test/test_table.c index c475e7fee95..c6614343037 100644 --- a/hl/test/test_table.c +++ b/hl/test/test_table.c @@ -198,7 +198,7 @@ test_table(hid_t fid, int do_write) hsize_t chunk_size = 10; int compress = 0; int *fill = NULL; - particle_t fill1[1] = {{"no data", -1, -99.0, -99.0, -1}}; + particle_t fill1[1] = {{"no data", -1, -99.0F, -99.0, -1}}; int fill1_new[1] = {-100}; hsize_t position; char tname[20]; @@ -226,23 +226,23 @@ test_table(hid_t fid, int do_write) particle2_t rbuf2[NRECORDS]; particle3_t rbuf3[NRECORDS]; particle_t rbufc[NRECORDS * 2]; - particle_t abuf[2] = {{"eight", 80, 8.0, 80.0, 80}, {"nine", 90, 9.0, 90.0, 90}}; - particle_t ibuf[2] = {{"zero", 0, 0.0, 0.0, 0}, {"zero", 0, 0.0, 0.0, 0}}; + particle_t abuf[2] = {{"eight", 80, 8.0F, 80.0, 80}, {"nine", 90, 9.0F, 90.0, 90}}; + particle_t ibuf[2] = {{"zero", 0, 0.0F, 0.0, 0}, {"zero", 0, 0.0F, 0.0, 0}}; particle_t wbufd[NRECORDS]; particle_t wbuf[NRECORDS] = {{ "zero", 0, - 0.0, + 0.0F, 0.0, 0, }, - {"one", 10, 1.0, 10.0, 10}, - {"two", 20, 2.0, 20.0, 20}, - {"three", 30, 3.0, 30.0, 30}, - {"four", 40, 4.0, 40.0, 40}, - {"five", 50, 5.0, 50.0, 50}, - {"six", 60, 6.0, 60.0, 60}, - {"seven", 70, 7.0, 70.0, 70}}; + {"one", 10, 1.0F, 10.0, 10}, + {"two", 20, 2.0F, 20.0, 20}, + {"three", 30, 3.0F, 30.0, 30}, + {"four", 40, 4.0F, 40.0, 40}, + {"five", 50, 5.0F, 50.0, 50}, + {"six", 60, 6.0F, 60.0, 60}, + {"seven", 70, 7.0F, 70.0, 70}}; /* buffers for the field "Pressure" and "New_field" */ float pressure_in[NRECORDS] = {0.0F, 1.0F, 2.0F, 3.0F, 4.0F, 5.0F, 6.0F, 7.0F}; float pressure_out[NRECORDS]; diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt index ae37ceb9ad8..4965f2c9584 100644 --- a/java/CMakeLists.txt +++ b/java/CMakeLists.txt @@ -49,7 +49,7 @@ endif () #----------------------------------------------------------------------------- # Testing #----------------------------------------------------------------------------- -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) endif () diff --git a/java/test/Makefile.am b/java/test/Makefile.am index 9f39be9ca1c..7f6ab0169a8 100644 --- a/java/test/Makefile.am +++ b/java/test/Makefile.am @@ -90,7 +90,8 @@ noinst_DATA = $(jarfile) check_SCRIPTS = junit.sh TEST_SCRIPT = $(check_SCRIPTS) -CLEANFILES = classnoinst.stamp $(jarfile) $(JAVAROOT)/$(pkgpath)/*.class junit.sh +CLEANFILES = classnoinst.stamp $(jarfile) $(JAVAROOT)/$(pkgpath)/*.class junit.sh \ + *.h5 testExport*.txt clean: rm -rf $(JAVAROOT)/* diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index 835892471c2..c837d1c4e02 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -859,6 +859,7 @@ HDF5_ENABLE_ANALYZER_TOOLS "enable the use of Clang tools" HDF5_ENABLE_SANITIZERS "execute the Clang sanitizer" OFF HDF5_ENABLE_FORMATTERS "format source files" OFF HDF5_DIMENSION_SCALES_NEW_REF "Use new-style references with dimension scale APIs" OFF +HDF5_ENABLE_DOXY_WARNINGS "Enable fail if doxygen parsing has warnings." ON ---------------- HDF5 Advanced Test Options --------------------- if (BUILD_TESTING) @@ -885,19 +886,19 @@ HDF5_ENABLE_PLUGIN_SUPPORT "Enable PLUGIN Filters" HDF5_ENABLE_SZIP_SUPPORT "Use SZip Filter" ON HDF5_ENABLE_Z_LIB_SUPPORT "Enable Zlib Filters" ON -ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0 -ZLIB_TGZ_ORIGPATH "Use ZLIB from original location" "https://github.com/madler/zlib/releases/download/v1.2.13" -ZLIB_TGZ_NAME "Use ZLIB from original compressed file" "zlib-1.2.13.tar.gz" -ZLIB_USE_LOCALCONTENT "Use local file for ZLIB FetchContent" ON +ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" OFF +ZLIB_TGZ_ORIGPATH "Use ZLIB from original location" "https://github.com/madler/zlib/releases/download/v1.2.13" +ZLIB_TGZ_NAME "Use ZLIB from original compressed file" "zlib-1.2.13.tar.gz" +ZLIB_USE_LOCALCONTENT "Use local file for ZLIB FetchContent" ON -SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0 +SZIP_USE_EXTERNAL "Use External Library Building for SZIP" OFF if (HDF5_ENABLE_SZIP_SUPPORT) - HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" ON -LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" -LIBAEC_TGZ_NAME "Use LIBAEC from original compressed file" "libaec-v1.0.6.tar.gz" + HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" ON +LIBAEC_TGZ_ORIGPATH "Use LIBAEC from original location" "https://github.com/MathisRosenhauer/libaec/releases/download/v1.0.6/libaec-1.0.6.tar.gz" +LIBAEC_TGZ_NAME "Use LIBAEC from original compressed file" "libaec-v1.0.6.tar.gz" LIBAEC_USE_LOCALCONTENT "Use local file for LIBAEC FetchContent" ON -PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGINS" 0 +PLUGIN_USE_EXTERNAL "Use External Library Building for PLUGINS" OFF if (WINDOWS) H5_DEFAULT_PLUGINDIR "%ALLUSERSPROFILE%/hdf5/lib/plugin" else () diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index b50fe611213..5dd36ea101c 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -47,6 +47,26 @@ New Features Configuration: ------------- + - Improved support for Intel oneAPI + + * Separates the old 'classic' Intel compiler settings and warnings + from the oneAPI settings + * Uses `-check nouninit` in debug builds to avoid false positives + when building H5_buildiface with `-check all` + * Both Autotools and CMake + + - Added new options for CMake and Autotools to control the Doxygen + warnings as errors setting. + + * HDF5_ENABLE_DOXY_WARNINGS: ON/OFF (Default: ON) + * --enable-doxygen-errors: enable/disable (Default: enable) + + The default will fail compile if the doxygen parsing generates warnings. + The option can be disabled if certain versions of doxygen have parsing + issues. i.e. 1.9.5, 1.9.8. + + Addresses GitHub issue #3398 + - Added support for AOCC and classic Flang w/ the Autotools * Adds a config/clang-fflags options file to support Flang @@ -73,6 +93,7 @@ New Features Removed HDF options for using FETCH_CONTENT explicitly: BUILD_SZIP_WITH_FETCHCONTENT:BOOL BUILD_ZLIB_WITH_FETCHCONTENT:BOOL + - Thread-safety + static library disabled on Windows w/ CMake The thread-safety feature requires hooks in DllMain(), which is only @@ -215,6 +236,22 @@ New Features Library: -------- + - Added a simple cache to the read-only S3 (ros3) VFD + + The read-only S3 VFD now caches the first N bytes of a file stored + in S3 to avoid a lot of small I/O operations when opening files. + This cache is per-file and created when the file is opened. + + N is currently 16 MiB or the size of the file, whichever is smaller. + + Addresses GitHub issue #3381 + + - Added new API function H5Pget_actual_selection_io_mode() + + This function allows the user to determine if the library performed + selection I/O, vector I/O, or scalar (legacy) I/O during the last HDF5 + operation performed with the provided DXPL. + - Added support for in-place type conversion in most cases In-place type conversion allows the library to perform type conversion @@ -283,10 +320,25 @@ New Features performing I/O on all the filtered datasets at once and then performing I/O on all the unfiltered datasets at once. + - Changed H5Pset_evict_on_close so that it can be called with a parallel + build of HDF5 + + Previously, H5Pset_evict_on_close would always fail when called from a + parallel build of HDF5, stating that the feature is not supported with + parallel HDF5. This failure would occur even if a parallel build of HDF5 + was used with a serial HDF5 application. H5Pset_evict_on_close can now + be called regardless of the library build type and the library will + instead fail during H5Fcreate/H5Fopen if the "evict on close" property + has been set to true and the file is being opened for parallel access + with more than 1 MPI process. + Fortran Library: ---------------- + - Added Fortran APIs: + h5vlnative_addr_to_token_f and h5vlnative_token_to_address_f + - Fixed an uninitialized error return value for hdferr to return the error state of the h5aopen_by_idx_f API. @@ -296,7 +348,8 @@ New Features - Fortran async APIs H5A, H5D, H5ES, H5G, H5F, H5L and H5O were added. - Added Fortran APIs: - h5pset_selection_io_f, h5pget_selection_io_f + h5pset_selection_io_f, h5pget_selection_io_f, + h5pget_actual_selection_io_mode_f, h5pset_modify_write_buf_f, h5pget_modify_write_buf_f - Added Fortran APIs: @@ -355,6 +408,72 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed a file handle leak in the core VFD + + When opening a file with the core VFD and a file image, if the file + already exists, the file check would leak the POSIX file handle. + + Fixes GitHub issue #635 + + - Fixed some issues with chunk index metadata not getting read + collectively when collective metadata reads are enabled + + When looking up dataset chunks during I/O, the parallel library + temporarily disables collective metadata reads since it's generally + unlikely that the application will read the same chunks from all + MPI ranks. Leaving collective metadata reads enabled during + chunk lookups can lead to hangs or other bad behavior depending + on the chunk indexing structure used for the dataset in question. + However, due to the way that dataset chunk index metadata was + previously loaded in a deferred manner, this could mean that + the metadata for the main chunk index structure or its + accompanying pieces of metadata (e.g., fixed array data blocks) + could end up being read independently if these chunk lookup + operations are the first chunk index-related operation that + occurs on a dataset. This behavior is generally observed when + opening a dataset for which the metadata isn't in the metadata + cache yet and then immediately performing I/O on that dataset. + This behavior is not generally observed when creating a dataset + and then performing I/O on it, as the relevant metadata will + usually be in the metadata cache as a side effect of creating + the chunk index structures during dataset creation. + + This issue has been fixed by adding callbacks to the different + chunk indexing structure classes that allow more explicit control + over when chunk index metadata gets loaded. When collective + metadata reads are enabled, the necessary index metadata will now + get loaded collectively by all MPI ranks at the start of dataset + I/O to ensure that the ranks don't unintentionally read this + metadata independently further on. These changes fix collective + loading of the main chunk index structure, as well as v2 B-tree + root nodes, extensible array index blocks and fixed array data + blocks. There are still pieces of metadata that cannot currently + be loaded collectively, however, such as extensible array data + blocks, data block pages and super blocks, as well as fixed array + data block pages. These pieces of metadata are not necessarily + read in by all MPI ranks since this depends on which chunks the + ranks have selected in the dataset. Therefore, reading of these + pieces of metadata remains an independent operation. + + - Fixed potential hangs in parallel library during collective I/O with + independent metadata writes + + When performing collective parallel writes to a dataset where metadata + writes are requested as (or left as the default setting of) independent, + hangs could potentially occur during metadata cache sync points. This + was due to incorrect management of the internal state tracking whether + an I/O operation should be collective or not, causing the library to + attempt collective writes of metadata when they were meant to be + independent writes. During the metadata cache sync points, if the number + of cache entries being flushed was a multiple of the number of MPI ranks + in the MPI communicator used to access the HDF5 file, an equal amount of + collective MPI I/O calls were made and the dataset write call would be + successful. However, when the number of cache entries being flushed was + NOT a multiple of the number of MPI ranks, the ranks with more entries + than others would get stuck in an MPI_File_set_view call, while other + ranks would get stuck in a post-write MPI_Barrier call. This issue has + been fixed by correctly switching to independent I/O temporarily when + writing metadata independently during collective dataset I/O. - Dropped support for MPI-2 @@ -700,6 +819,16 @@ Bug Fixes since HDF5-1.14.0 release Configuration ------------- + - Fixed an issue where the h5tools_test_utils test program was being + installed on the system for Autotools builds of HDF5 + + The h5tools_test_utils test program was mistakenly added to bin_PROGRAMS + in its Makefile.am configuration file, causing the executable to be + installed on the system. The executable is now added to noinst_PROGRAMS + instead and will no longer be installed on the system for Autotools builds + of HDF5. The CMake configuration code already avoids installing the + executable on the system. + - Fixed a configuration issue that prevented building of the Subfiling VFD on macOS Checks were added to the CMake and Autotools code to verify that CLOCK_MONOTONIC_COARSE, @@ -791,6 +920,19 @@ Bug Fixes since HDF5-1.14.0 release Tools ----- + - Fixed an issue with unmatched MPI messages in ph5diff + + The "manager" MPI rank in ph5diff was unintentionally sending "program end" + messages to its workers twice, leading to an error from MPICH similar to the + following: + + Abort(810645519) on node 1 (rank 1 in comm 0): Fatal error in internal_Finalize: Other MPI error, error stack: + internal_Finalize(50)...........: MPI_Finalize failed + MPII_Finalize(394)..............: + MPIR_Comm_delete_internal(1224).: Communicator (handle=44000000) being freed has 1 unmatched message(s) + MPIR_Comm_release_always(1250)..: + MPIR_finalize_builtin_comms(154): + - Fixed an issue in h5repack for variable-length typed datasets When repacking datasets into a new file, h5repack tries to determine whether @@ -831,6 +973,7 @@ Bug Fixes since HDF5-1.14.0 release ----------- - + High-Level Library ------------------ - @@ -1007,20 +1150,40 @@ Platforms Tested x86_64; Version 19.10-0 - Windows 10 x64 Visual Studio 2015 w/ Intel C/C++/Fortran 18 (cmake) - Visual Studio 2017 w/ Intel C/C++/Fortran 19 (cmake) - Visual Studio 2019 w/ clang 12.0.0 + Windows 10 x64 Visual Studio 2019 w/ clang 12.0.0 with MSVC-like command-line (C/C++ only - cmake) - Visual Studio 2019 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2019 w/ Intel C/C++ only cmake) Visual Studio 2022 w/ clang 15.0.1 with MSVC-like command-line (C/C++ only - cmake) - Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2022 (cmake) + Visual Studio 2022 w/ Intel C/C++/Fortran oneAPI 2023 (cmake) Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake) Known Problems ============== + When HDF5 is compiled with NVHPC versions 23.5 - 23.9 (additional versions may + also be applicable) and with -O2 (or higher) and -DNDEBUG, test failures occur + in the following tests: + + H5PLUGIN-filter_plugin + H5TEST-flush2 + H5TEST-testhdf5-base + MPI_TEST_t_filters_parallel + + Since these tests pass with an optimization level of -O1 (and -O0) and it is + currently unclear whether the test failures are due to issues in HDF5 or issues + in the 'nvc' compiler, the maximum optimization level for NVHPC has been set + to -O1 until the test failures can be resolved. Note that even at -O1 optimization + level, there still appears to be a sporadic test failure in the Java JUnit tests + that has occasionally been seen in JUnit-TestH5Pfapl and JUnit-TestH5D. It is also + unclear whether this is an issue in HDF5 or with the 'nvc' compiler. Finally, note + that NVHPC 23.9 will fail to compile the test/tselect.c test file with a compiler + error of 'use of undefined value' when the optimization level is -O2 or higher. + Nvidia is aware of this issue and has suggested lowering the optimization level to + -O1 for the time being: + https://forums.developer.nvidia.com/t/hdf5-no-longer-compiles-with-nv-23-9/269045. + IEEE standard arithmetic enables software to raise exceptions such as overflow, division by zero, and other illegal operations without interrupting or halting the program flow. The HDF5 C library intentionally performs these exceptions. diff --git a/src/H5CX.c b/src/H5CX.c index aa9d0b5aece..c46c58af35e 100644 --- a/src/H5CX.c +++ b/src/H5CX.c @@ -299,6 +299,11 @@ typedef struct H5CX_t { bool no_selection_io_cause_set; /* Whether reason for not performing selection I/O is set */ bool no_selection_io_cause_valid; /* Whether reason for not performing selection I/O is valid */ + uint32_t + actual_selection_io_mode; /* Actual selection I/O mode used (H5D_ACTUAL_SELECTION_IO_MODE_NAME) */ + hbool_t actual_selection_io_mode_set; /* Whether actual selection I/O mode is set */ + hbool_t actual_selection_io_mode_valid; /* Whether actual selection I/O mode is valid */ + /* Cached LCPL properties */ H5T_cset_t encoding; /* Link name character encoding */ bool encoding_valid; /* Whether link name character encoding is valid */ @@ -380,6 +385,8 @@ typedef struct H5CX_dxpl_cache_t { H5D_selection_io_mode_t selection_io_mode; /* Selection I/O mode (H5D_XFER_SELECTION_IO_MODE_NAME) */ uint32_t no_selection_io_cause; /* Reasons for not performing selection I/O (H5D_XFER_NO_SELECTION_IO_CAUSE_NAME) */ + uint32_t actual_selection_io_mode; /* Actual selection I/O mode + (H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME) */ bool modify_write_buf; /* Whether the library can modify write buffers */ } H5CX_dxpl_cache_t; @@ -571,13 +578,18 @@ H5CX_init(void) /* Get the selection I/O mode */ if (H5P_get(dx_plist, H5D_XFER_SELECTION_IO_MODE_NAME, &H5CX_def_dxpl_cache.selection_io_mode) < 0) - HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve parallel transfer method"); + HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve selection I/O mode"); /* Get the local & global reasons for breaking selection I/O values */ if (H5P_get(dx_plist, H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, &H5CX_def_dxpl_cache.no_selection_io_cause) < 0) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve cause for no selection I/O"); + /* Get the actual selection I/O mode */ + if (H5P_get(dx_plist, H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, + &H5CX_def_dxpl_cache.actual_selection_io_mode) < 0) + HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve actual selection I/O mode"); + /* Get the modify write buffer property */ if (H5P_get(dx_plist, H5D_XFER_MODIFY_WRITE_BUF_NAME, &H5CX_def_dxpl_cache.modify_write_buf) < 0) HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "Can't retrieve modify write buffer property"); @@ -778,6 +790,11 @@ H5CX__push_common(H5CX_node_t *cnode) cnode->ctx.tag = H5AC__INVALID_TAG; cnode->ctx.ring = H5AC_RING_USER; +#ifdef H5_HAVE_PARALLEL + cnode->ctx.btype = MPI_BYTE; + cnode->ctx.ftype = MPI_BYTE; +#endif + /* Push context node onto stack */ cnode->next = *head; *head = cnode; @@ -2509,6 +2526,47 @@ H5CX_get_no_selection_io_cause(uint32_t *no_selection_io_cause) FUNC_LEAVE_NOAPI(ret_value) } /* end H5CX_get_no_selection_io_cause() */ +/*------------------------------------------------------------------------- + * Function: H5CX_get_actual_selection_io_mode + * + * Purpose: Retrieves the actual I/O mode (scalar, vector, and/or selection) for the current API call + *context. + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5CX_get_actual_selection_io_mode(uint32_t *actual_selection_io_mode) +{ + H5CX_node_t **head = NULL; /* Pointer to head of API context list */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + assert(actual_selection_io_mode); + head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ + assert(head && *head); + assert(H5P_DEFAULT != (*head)->ctx.dxpl_id); + + /* This property is a special case - we want to wipe out any previous setting. Copy the default setting + * if it has not been set yet. */ + if ((*head)->ctx.dxpl_id != H5P_DATASET_XFER_DEFAULT && !(*head)->ctx.actual_selection_io_mode_set && + !(*head)->ctx.actual_selection_io_mode_valid) { + (*head)->ctx.actual_selection_io_mode = H5CX_def_dxpl_cache.actual_selection_io_mode; + (*head)->ctx.actual_selection_io_mode_set = true; + } + H5CX_RETRIEVE_PROP_VALID_SET(dxpl, H5P_DATASET_XFER_DEFAULT, H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, + actual_selection_io_mode) + + /* Get the value */ + *actual_selection_io_mode = (*head)->ctx.actual_selection_io_mode; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5CX_get_actual_selection_io_mode() */ + /*------------------------------------------------------------------------- * Function: H5CX_get_modify_write_buf * @@ -3438,7 +3496,7 @@ H5CX_test_set_mpio_coll_rank0_bcast(bool mpio_coll_rank0_bcast) #endif /* H5_HAVE_PARALLEL */ /*------------------------------------------------------------------------- - * Function: H5CX_set_no_selecction_io_cause + * Function: H5CX_set_no_selection_io_cause * * Purpose: Sets the reason for not performing selection I/O for * the current API call context. @@ -3467,7 +3525,39 @@ H5CX_set_no_selection_io_cause(uint32_t no_selection_io_cause) } /* end if */ FUNC_LEAVE_NOAPI_VOID -} /* end H5CX_set_no_selectiion_io_cause() */ +} /* end H5CX_set_no_selection_io_cause() */ + +/*------------------------------------------------------------------------- + * Function: H5CX_set_actual_selection_io_mode + * + * Purpose: Sets the actual selection I/O mode for the current API + * call context. + * + * Return: + * + *------------------------------------------------------------------------- + */ +void +H5CX_set_actual_selection_io_mode(uint32_t actual_selection_io_mode) +{ + H5CX_node_t **head = NULL; /* Pointer to head of API context list */ + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Sanity checks */ + head = H5CX_get_my_context(); /* Get the pointer to the head of the API context, for this thread */ + assert(head && *head); + assert((*head)->ctx.dxpl_id != H5P_DEFAULT); + + /* If we're using the default DXPL, don't modify it */ + if ((*head)->ctx.dxpl_id != H5P_DATASET_XFER_DEFAULT) { + /* Cache the value for later, marking it to set in DXPL when context popped */ + (*head)->ctx.actual_selection_io_mode = actual_selection_io_mode; + (*head)->ctx.actual_selection_io_mode_set = true; + } + + FUNC_LEAVE_NOAPI_VOID +} /* end H5CX_set_actual_selection_io_mode() */ /*------------------------------------------------------------------------- * Function: H5CX_get_ohdr_flags @@ -3524,7 +3614,17 @@ H5CX__pop_common(bool update_dxpl_props) /* Check for cached DXPL properties to return to application */ if (update_dxpl_props) { + /* actual_selection_io_mode is a special case - we always want to set it in the property list even if + * it was never set by the library, in that case it indicates no I/O was performed and we don't want + * to leave the (possibly incorrect) old value in the property list, so set from the default property + * list */ + if ((*head)->ctx.dxpl_id != H5P_DATASET_XFER_DEFAULT && !(*head)->ctx.actual_selection_io_mode_set) { + (*head)->ctx.actual_selection_io_mode = H5CX_def_dxpl_cache.actual_selection_io_mode; + (*head)->ctx.actual_selection_io_mode_set = true; + } + H5CX_SET_PROP(H5D_XFER_NO_SELECTION_IO_CAUSE_NAME, no_selection_io_cause) + H5CX_SET_PROP(H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, actual_selection_io_mode) #ifdef H5_HAVE_PARALLEL H5CX_SET_PROP(H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, mpio_actual_chunk_opt) H5CX_SET_PROP(H5D_MPIO_ACTUAL_IO_MODE_NAME, mpio_actual_io_mode) diff --git a/src/H5CXprivate.h b/src/H5CXprivate.h index aa6883b62ef..76812ee55ef 100644 --- a/src/H5CXprivate.h +++ b/src/H5CXprivate.h @@ -116,6 +116,7 @@ H5_DLL herr_t H5CX_get_vlen_alloc_info(H5T_vlen_alloc_info_t *vl_alloc_info); H5_DLL herr_t H5CX_get_dt_conv_cb(H5T_conv_cb_t *cb_struct); H5_DLL herr_t H5CX_get_selection_io_mode(H5D_selection_io_mode_t *selection_io_mode); H5_DLL herr_t H5CX_get_no_selection_io_cause(uint32_t *no_selection_io_cause); +H5_DLL herr_t H5CX_get_actual_selection_io_mode(uint32_t *actual_selection_io_mode); H5_DLL herr_t H5CX_get_modify_write_buf(bool *modify_write_buf); /* "Getter" routines for LCPL properties cached in API context */ @@ -162,6 +163,7 @@ H5_DLL herr_t H5CX_init(void); /* "Setter" routines for cached DXPL properties that must be returned to application */ H5_DLL void H5CX_set_no_selection_io_cause(uint32_t no_selection_io_cause); +H5_DLL void H5CX_set_actual_selection_io_mode(uint32_t actual_selection_io_mode); #ifdef H5_HAVE_PARALLEL H5_DLL void H5CX_set_mpio_actual_chunk_opt(H5D_mpio_actual_chunk_opt_mode_t chunk_opt); diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index d7bf5b1dbda..c8db5352ff6 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -154,8 +154,9 @@ herr_t H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, haddr_t *candidates_list_ptr, int mpi_rank, int mpi_size) { - unsigned first_entry_to_flush; - unsigned last_entry_to_flush; + H5FD_mpio_xfer_t orig_xfer_mode; + unsigned first_entry_to_flush; + unsigned last_entry_to_flush; #ifndef NDEBUG unsigned total_entries_to_clear = 0; unsigned total_entries_to_flush = 0; @@ -169,11 +170,12 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha haddr_t last_addr; #endif /* H5C_DO_SANITY_CHECKS */ #if H5C_APPLY_CANDIDATE_LIST__DEBUG - char tbl_buf[1024]; + char *tbl_buf = NULL; #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ unsigned m, n; - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + unsigned u; /* Local index variable */ + bool restore_io_mode = false; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -185,21 +187,57 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha assert(0 <= mpi_rank); assert(mpi_rank < mpi_size); + /* Get I/O transfer mode */ + if (H5CX_get_io_xfer_mode(&orig_xfer_mode) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode"); + /* Initialize the entries_to_flush and entries_to_clear arrays */ memset(entries_to_flush, 0, sizeof(entries_to_flush)); memset(entries_to_clear, 0, sizeof(entries_to_clear)); #if H5C_APPLY_CANDIDATE_LIST__DEBUG - fprintf(stdout, "%s:%d: setting up candidate assignment table.\n", __func__, mpi_rank); + { + const char *const table_header = "candidate list = "; + size_t tbl_buf_size; + size_t tbl_buf_left; + size_t entry_nchars; + int bytes_printed; - memset(tbl_buf, 0, sizeof(tbl_buf)); + fprintf(stdout, "%s:%d: setting up candidate assignment table.\n", __func__, mpi_rank); + + /* Calculate maximum number of characters printed for each + * candidate entry, including the leading space and "0x" + */ + entry_nchars = (sizeof(long long) * CHAR_BIT / 4) + 3; + + tbl_buf_size = strlen(table_header) + (num_candidates * entry_nchars) + 1; + if (NULL == (tbl_buf = H5MM_malloc(tbl_buf_size))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate debug buffer"); + tbl_buf_left = tbl_buf_size; + + if ((bytes_printed = snprintf(tbl_buf, tbl_buf_left, table_header)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed; + + for (u = 0; u < num_candidates; u++) { + if ((bytes_printed = snprintf(&(tbl_buf[tbl_buf_size - tbl_buf_left]), tbl_buf_left, " 0x%llx", + (long long)(*(candidates_list_ptr + u)))) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed; + } - snprintf(tbl_buf, sizeof(tbl_buf), "candidate list = "); - for (u = 0; u < num_candidates; u++) - sprintf(&(tbl_buf[strlen(tbl_buf)]), " 0x%llx", (long long)(*(candidates_list_ptr + u))); - sprintf(&(tbl_buf[strlen(tbl_buf)]), "\n"); + if ((bytes_printed = snprintf(&(tbl_buf[tbl_buf_size - tbl_buf_left]), tbl_buf_left, "\n")) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed + 1; /* NUL terminator */ - fprintf(stdout, "%s", tbl_buf); + fprintf(stdout, "%s", tbl_buf); + + H5MM_free(tbl_buf); + tbl_buf = NULL; + } #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ if (f->shared->coll_md_write) { @@ -258,18 +296,50 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha last_entry_to_flush = candidate_assignment_table[mpi_rank + 1] - 1; #if H5C_APPLY_CANDIDATE_LIST__DEBUG - for (u = 0; u < 1024; u++) - tbl_buf[u] = '\0'; - snprintf(tbl_buf, sizeof(tbl_buf), "candidate assignment table = "); - for (u = 0; u <= (unsigned)mpi_size; u++) - sprintf(&(tbl_buf[strlen(tbl_buf)]), " %u", candidate_assignment_table[u]); - sprintf(&(tbl_buf[strlen(tbl_buf)]), "\n"); - fprintf(stdout, "%s", tbl_buf); - - fprintf(stdout, "%s:%d: flush entries [%u, %u].\n", __func__, mpi_rank, first_entry_to_flush, - last_entry_to_flush); - - fprintf(stdout, "%s:%d: marking entries.\n", __func__, mpi_rank); + { + const char *const table_header = "candidate assignment table = "; + unsigned umax = UINT_MAX; + size_t tbl_buf_size; + size_t tbl_buf_left; + size_t entry_nchars; + int bytes_printed; + + /* Calculate the maximum number of characters printed for each entry */ + entry_nchars = (size_t)(log10(umax) + 1) + 1; + + tbl_buf_size = strlen(table_header) + ((size_t)mpi_size * entry_nchars) + 1; + if (NULL == (tbl_buf = H5MM_malloc(tbl_buf_size))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate debug buffer"); + tbl_buf_left = tbl_buf_size; + + if ((bytes_printed = snprintf(tbl_buf, tbl_buf_left, table_header)) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed; + + for (u = 0; u <= (unsigned)mpi_size; u++) { + if ((bytes_printed = snprintf(&(tbl_buf[tbl_buf_size - tbl_buf_left]), tbl_buf_left, " %u", + candidate_assignment_table[u])) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed; + } + + if ((bytes_printed = snprintf(&(tbl_buf[tbl_buf_size - tbl_buf_left]), tbl_buf_left, "\n")) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSERRSTR, FAIL, "can't add to candidate list"); + assert((size_t)bytes_printed < tbl_buf_left); + tbl_buf_left -= (size_t)bytes_printed + 1; /* NUL terminator */ + + fprintf(stdout, "%s", tbl_buf); + + H5MM_free(tbl_buf); + tbl_buf = NULL; + + fprintf(stdout, "%s:%d: flush entries [%u, %u].\n", __func__, mpi_rank, first_entry_to_flush, + last_entry_to_flush); + + fprintf(stdout, "%s:%d: marking entries.\n", __func__, mpi_rank); + } #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ for (u = 0; u < num_candidates; u++) { @@ -354,6 +424,19 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha num_candidates, total_entries_to_clear, total_entries_to_flush); #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ + /* + * If collective I/O was requested, but collective metadata + * writes were not requested, temporarily disable collective + * I/O while flushing candidate entries so that we don't cause + * a hang in the case where the number of candidate entries + * to flush isn't a multiple of mpi_size. + */ + if ((orig_xfer_mode == H5FD_MPIO_COLLECTIVE) && !f->shared->coll_md_write) { + if (H5CX_set_io_xfer_mode(H5FD_MPIO_INDEPENDENT) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode"); + restore_io_mode = true; + } + /* We have now marked all the entries on the candidate list for * either flush or clear -- now scan the LRU and the pinned list * for these entries and do the deed. Do this via a call to @@ -367,6 +450,13 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha if (H5C__flush_candidate_entries(f, entries_to_flush, entries_to_clear) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed"); + /* Restore collective I/O if we temporarily disabled it */ + if (restore_io_mode) { + if (H5CX_set_io_xfer_mode(orig_xfer_mode) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode"); + restore_io_mode = false; + } + /* If we've deferred writing to do it collectively, take care of that now */ if (f->shared->coll_md_write) { /* Sanity check */ @@ -378,6 +468,10 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha } /* end if */ done: + /* Restore collective I/O if we temporarily disabled it */ + if (restore_io_mode && (H5CX_set_io_xfer_mode(orig_xfer_mode) < 0)) + HDONE_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode"); + if (candidate_assignment_table != NULL) candidate_assignment_table = (unsigned *)H5MM_xfree((void *)candidate_assignment_table); if (cache_ptr->coll_write_list) { diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index d79f7d0b031..4f8a867974e 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -24,30 +24,32 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Bprivate.h" /* B-link trees */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5Fprivate.h" /* Files */ -#include "H5FDprivate.h" /* File drivers */ +#include "H5private.h" /* Generic Functions */ +#include "H5Bprivate.h" /* B-link trees */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fprivate.h" /* Files */ +#include "H5FDprivate.h" /* File drivers */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5Iprivate.h" /* IDs */ -#include "H5MFprivate.h" /* File space management */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5Oprivate.h" /* Object headers */ +#include "H5Iprivate.h" /* IDs */ +#include "H5MFprivate.h" /* File space management */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5Oprivate.h" /* Object headers */ #include "H5Sprivate.h" /* Dataspaces */ -#include "H5VMprivate.h" /* Vector and array functions */ +#include "H5VMprivate.h" /* Vector and array functions */ /****************/ /* Local Macros */ /****************/ +#define H5D_BTREE_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree.shared) + /******************/ /* Local Typedefs */ /******************/ /* - * B-tree key. A key contains the minimum logical N-dimensional coordinates and + * B-tree key. A key contains the minimum logical N-dimensional coordinates and * the logical size of the chunk to which this key refers. The * fastest-varying dimension is assumed to reference individual bytes of the * array, so a 100-element 1-d array of 4-byte integers would really be a 2-d @@ -61,9 +63,9 @@ * The chunk's file address is part of the B-tree and not part of the key. */ typedef struct H5D_btree_key_t { - hsize_t scaled[H5O_LAYOUT_NDIMS]; /*logical offset to start*/ - uint32_t nbytes; /*size of stored data */ - unsigned filter_mask; /*excluded filters */ + hsize_t scaled[H5O_LAYOUT_NDIMS]; /*logical offset to start */ + uint32_t nbytes; /*size of stored data */ + unsigned filter_mask; /*excluded filters */ } H5D_btree_key_t; /* B-tree callback info for iteration over chunks */ @@ -111,10 +113,14 @@ static herr_t H5D__btree_debug_key(FILE *stream, int indent, int fwidth, const v static herr_t H5D__btree_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__btree_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__btree_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__btree_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__btree_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__btree_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__btree_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__btree_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__btree_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__btree_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__btree_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -137,9 +143,13 @@ const H5D_chunk_ops_t H5D_COPS_BTREE[1] = {{ false, /* v1 B-tree indices does not support SWMR access */ H5D__btree_idx_init, /* insert */ H5D__btree_idx_create, /* create */ + H5D__btree_idx_open, /* open */ + H5D__btree_idx_close, /* close */ + H5D__btree_idx_is_open, /* is_open */ H5D__btree_idx_is_space_alloc, /* is_space_alloc */ H5D__btree_idx_insert, /* insert */ H5D__btree_idx_get_addr, /* get_addr */ + H5D__btree_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__btree_idx_iterate, /* iterate */ H5D__btree_idx_remove, /* remove */ @@ -158,21 +168,21 @@ const H5D_chunk_ops_t H5D_COPS_BTREE[1] = {{ /* inherits B-tree like properties from H5B */ static H5B_class_t H5B_BTREE[1] = {{ - H5B_CHUNK_ID, /*id */ - sizeof(H5D_btree_key_t), /*sizeof_nkey */ - H5D__btree_get_shared, /*get_shared */ - H5D__btree_new_node, /*new */ - H5D__btree_cmp2, /*cmp2 */ - H5D__btree_cmp3, /*cmp3 */ - H5D__btree_found, /*found */ - H5D__btree_insert, /*insert */ - false, /*follow min branch? */ - false, /*follow max branch? */ - H5B_LEFT, /*critical key */ - H5D__btree_remove, /*remove */ - H5D__btree_decode_key, /*decode */ - H5D__btree_encode_key, /*encode */ - H5D__btree_debug_key /*debug */ + H5B_CHUNK_ID, /* id */ + sizeof(H5D_btree_key_t), /* sizeof_nkey */ + H5D__btree_get_shared, /* get_shared */ + H5D__btree_new_node, /* new */ + H5D__btree_cmp2, /* cmp2 */ + H5D__btree_cmp3, /* cmp3 */ + H5D__btree_found, /* found */ + H5D__btree_insert, /* insert */ + false, /* follow min branch? */ + false, /* follow max branch? */ + H5B_LEFT, /* critical key */ + H5D__btree_remove, /* remove */ + H5D__btree_decode_key, /* decode */ + H5D__btree_encode_key, /* encode */ + H5D__btree_debug_key /* debug */ }}; /*******************/ @@ -183,13 +193,13 @@ static H5B_class_t H5B_BTREE[1] = {{ H5FL_DEFINE_STATIC(H5O_layout_chunk_t); /*------------------------------------------------------------------------- - * Function: H5D__btree_get_shared + * Function: H5D__btree_get_shared * - * Purpose: Returns the shared B-tree info for the specified UDATA. + * Purpose: Returns the shared B-tree info for the specified UDATA. * - * Return: Success: Pointer to the raw B-tree page for this dataset + * Return: Success: Pointer to the raw B-tree page for this dataset * - * Failure: Can't fail + * Failure: Can't fail * *------------------------------------------------------------------------- */ @@ -210,17 +220,17 @@ H5D__btree_get_shared(const H5F_t H5_ATTR_UNUSED *f, const void *_udata) } /* end H5D__btree_get_shared() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_new_node + * Function: H5D__btree_new_node * - * Purpose: Adds a new entry to an i-storage B-tree. We can assume that - * the domain represented by UDATA doesn't intersect the domain - * already represented by the B-tree. + * Purpose: Adds a new entry to an i-storage B-tree. We can assume + * that the domain represented by UDATA doesn't intersect the + * domain already represented by the B-tree. * - * Return: Success: Non-negative. The address of leaf is returned - * through the ADDR argument. It is also added - * to the UDATA. + * Return: Success: Non-negative. The address of leaf is returned + * through the ADDR argument. It is also added + * to the UDATA. * - * Failure: Negative + * Failure: Negative * *------------------------------------------------------------------------- */ @@ -275,18 +285,18 @@ H5D__btree_new_node(H5F_t H5_ATTR_NDEBUG_UNUSED *f, H5B_ins_t op, void *_lt_key, } /* end H5D__btree_new_node() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_cmp2 + * Function: H5D__btree_cmp2 * - * Purpose: Compares two keys sort of like strcmp(). The UDATA pointer - * is only to supply extra information not carried in the keys - * (in this case, the dimensionality) and is not compared - * against the keys. + * Purpose: Compares two keys sort of like strcmp(). The UDATA pointer + * is only to supply extra information not carried in the keys + * (in this case, the dimensionality) and is not compared + * against the keys. * - * Return: Success: -1 if LT_KEY is less than RT_KEY; - * 1 if LT_KEY is greater than RT_KEY; - * 0 if LT_KEY and RT_KEY are equal. + * Return: Success: -1 if LT_KEY is less than RT_KEY; + * 1 if LT_KEY is greater than RT_KEY; + * 0 if LT_KEY and RT_KEY are equal. * - * Failure: FAIL (same as LT_KEYstorage); + assert(H5D_CHUNK_IDX_BTREE == idx_info->storage->idx_type); + assert(is_open); + + *is_open = H5D_BTREE_IDX_IS_OPEN(idx_info); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__btree_idx_is_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__btree_idx_is_space_alloc + * + * Purpose: Query if space is allocated for index method + * + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -886,11 +960,11 @@ H5D__btree_idx_is_space_alloc(const H5O_storage_chunk_t *storage) } /* end H5D__btree_idx_is_space_alloc() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_insert + * Function: H5D__btree_idx_insert * - * Purpose: Insert chunk entry into the indexing structure. + * Purpose: Insert chunk entry into the indexing structure. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -922,13 +996,13 @@ H5D__btree_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, } /* H5D__btree_idx_insert() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_get_addr + * Function: H5D__btree_idx_get_addr * - * Purpose: Get the file address of a chunk if file space has been - * assigned. Save the retrieved information in the udata - * supplied. + * Purpose: Get the file address of a chunk if file space has been + * assigned. Save the retrieved information in the udata + * supplied. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -959,14 +1033,34 @@ H5D__btree_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udat } /* H5D__btree_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_iterate_cb + * Function: H5D__btree_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. Currently a no-op. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__btree_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__btree_idx_load_metadata() */ + +/*------------------------------------------------------------------------- + * Function: H5D__btree_idx_iterate_cb * - * Purpose: Translate the B-tree specific chunk record into a generic + * Purpose: Translate the B-tree specific chunk record into a generic * form and make the callback to the generic chunk callback * routine. * - * Return: Success: Non-negative - * Failure: Negative + * Return: Success: Non-negative + * Failure: Negative * *------------------------------------------------------------------------- */ @@ -1001,12 +1095,12 @@ H5D__btree_idx_iterate_cb(H5F_t H5_ATTR_UNUSED *f, const void *_lt_key, haddr_t } /* H5D__btree_idx_iterate_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_iterate + * Function: H5D__btree_idx_iterate * - * Purpose: Iterate over the chunks in an index, making a callback + * Purpose: Iterate over the chunks in an index, making a callback * for each one. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1043,11 +1137,11 @@ H5D__btree_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t c } /* end H5D__btree_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_remove + * Function: H5D__btree_idx_remove * - * Purpose: Remove chunk from index. + * Purpose: Remove chunk from index. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1077,13 +1171,13 @@ H5D__btree_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t } /* H5D__btree_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_delete + * Function: H5D__btree_idx_delete * - * Purpose: Delete index and raw data storage for entire dataset + * Purpose: Delete index and raw data storage for entire dataset * (i.e. all chunks) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -1134,11 +1228,11 @@ H5D__btree_idx_delete(const H5D_chk_idx_info_t *idx_info) } /* end H5D__btree_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_copy_setup + * Function: H5D__btree_idx_copy_setup * - * Purpose: Set up any necessary information for copying chunks + * Purpose: Set up any necessary information for copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1178,11 +1272,11 @@ H5D__btree_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_ } /* end H5D__btree_idx_copy_setup() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_copy_shutdown + * Function: H5D__btree_idx_copy_shutdown * - * Purpose: Shutdown any information from copying chunks + * Purpose: Shutdown any information from copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1250,11 +1344,11 @@ H5D__btree_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) } /* end H5D__btree_idx_size() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_reset + * Function: H5D__btree_idx_reset * - * Purpose: Reset indexing information. + * Purpose: Reset indexing information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1274,11 +1368,11 @@ H5D__btree_idx_reset(H5O_storage_chunk_t *storage, bool reset_addr) } /* end H5D__btree_idx_reset() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_dump + * Function: H5D__btree_idx_dump * - * Purpose: Dump indexing information to a stream. + * Purpose: Dump indexing information to a stream. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1296,11 +1390,11 @@ H5D__btree_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream) } /* end H5D__btree_idx_dump() */ /*------------------------------------------------------------------------- - * Function: H5D__btree_idx_dest + * Function: H5D__btree_idx_dest * - * Purpose: Release indexing information in memory. + * Purpose: Release indexing information in memory. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1328,11 +1422,11 @@ H5D__btree_idx_dest(const H5D_chk_idx_info_t *idx_info) } /* end H5D__btree_idx_dest() */ /*------------------------------------------------------------------------- - * Function: H5D_btree_debug + * Function: H5D_btree_debug * - * Purpose: Debugs a B-tree node for indexed raw data storage. + * Purpose: Debugs a B-tree node for indexed raw data storage. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c index 4da9555d0c0..7a26b6d016c 100644 --- a/src/H5Dbtree2.c +++ b/src/H5Dbtree2.c @@ -27,16 +27,18 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ +#include "H5Dpkg.h" /* Datasets */ #include "H5FLprivate.h" /* Free Lists */ #include "H5MFprivate.h" /* File space management */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5VMprivate.h" /* Vector and array functions */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5VMprivate.h" /* Vector and array functions */ /****************/ /* Local Macros */ /****************/ +#define H5D_BT2_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree2.bt2) + /******************/ /* Local Typedefs */ /******************/ @@ -92,7 +94,6 @@ static herr_t H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, const vo const void *u_ctx); /* Helper routine */ -static herr_t H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info); static herr_t H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info); /* Callback for H5B2_iterate() which is called in H5D__bt2_idx_iterate() */ @@ -114,10 +115,14 @@ static herr_t H5D__bt2_mod_cb(void *_record, void *_op_data, bool *changed); static herr_t H5D__bt2_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__bt2_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__bt2_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__bt2_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -139,9 +144,13 @@ const H5D_chunk_ops_t H5D_COPS_BT2[1] = {{ true, /* Fixed array indices support SWMR access */ H5D__bt2_idx_init, /* init */ H5D__bt2_idx_create, /* create */ + H5D__bt2_idx_open, /* open */ + H5D__bt2_idx_close, /* close */ + H5D__bt2_idx_is_open, /* is_open */ H5D__bt2_idx_is_space_alloc, /* is_space_alloc */ H5D__bt2_idx_insert, /* insert */ H5D__bt2_idx_get_addr, /* get_addr */ + H5D__bt2_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__bt2_idx_iterate, /* iterate */ H5D__bt2_idx_remove, /* remove */ @@ -203,8 +212,8 @@ H5FL_ARR_DEFINE_STATIC(uint32_t, H5O_LAYOUT_NDIMS); * * Purpose: Create client callback context * - * Return: Success: non-NULL - * Failure: NULL + * Return: Success: non-NULL + * Failure: NULL * *------------------------------------------------------------------------- */ @@ -258,8 +267,8 @@ H5D__bt2_crt_context(void *_udata) * * Purpose: Destroy client callback context * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -286,10 +295,10 @@ H5D__bt2_dst_context(void *_ctx) * Function: H5D__bt2_store * * Purpose: Store native information into record for v2 B-tree - * (non-filtered) + * (non-filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -308,8 +317,8 @@ H5D__bt2_store(void *record, const void *_udata) /*------------------------------------------------------------------------- * Function: H5D__bt2_compare * - * Purpose: Compare two native information records, according to some key - * (non-filtered) + * Purpose: Compare two native information records, according to some + * key (non-filtered) * * Return: <0 if rec1 < rec2 * =0 if rec1 == rec2 @@ -341,10 +350,10 @@ H5D__bt2_compare(const void *_udata, const void *_rec2, int *result) * Function: H5D__bt2_unfilt_encode * * Purpose: Encode native information into raw form for storing on disk - * (non-filtered) + * (non-filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -373,10 +382,10 @@ H5D__bt2_unfilt_encode(uint8_t *raw, const void *_record, void *_ctx) * Function: H5D__bt2_unfilt_decode * * Purpose: Decode raw disk form of record into native form - * (non-filtered) + * (non-filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -403,12 +412,12 @@ H5D__bt2_unfilt_decode(const uint8_t *raw, void *_record, void *_ctx) } /* H5D__bt2_unfilt_decode() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_unfilt_debug + * Function: H5D__bt2_unfilt_debug * - * Purpose: Debug native form of record (non-filtered) + * Purpose: Debug native form of record (non-filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -440,10 +449,10 @@ H5D__bt2_unfilt_debug(FILE *stream, int indent, int fwidth, const void *_record, * Function: H5D__bt2_filt_encode * * Purpose: Encode native information into raw form for storing on disk - * (filtered) + * (filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -473,13 +482,13 @@ H5D__bt2_filt_encode(uint8_t *raw, const void *_record, void *_ctx) } /* H5D__bt2_filt_encode() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_filt_decode + * Function: H5D__bt2_filt_decode * - * Purpose: Decode raw disk form of record into native form - * (filtered) + * Purpose: Decode raw disk form of record into native form + * (filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -511,12 +520,12 @@ H5D__bt2_filt_decode(const uint8_t *raw, void *_record, void *_ctx) } /* H5D__bt2_filt_decode() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_filt_debug + * Function: H5D__bt2_filt_debug * - * Purpose: Debug native form of record (filtered) + * Purpose: Debug native form of record (filtered) * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -570,13 +579,13 @@ H5D__bt2_idx_init(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, const H5S_t } /* end H5D__bt2_idx_init() */ /*------------------------------------------------------------------------- - * Function: H5D__btree2_idx_depend + * Function: H5D__btree2_idx_depend * - * Purpose: Create flush dependency between v2 B-tree and dataset's + * Purpose: Create flush dependency between v2 B-tree and dataset's * object header. * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -629,63 +638,9 @@ H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info) } /* end H5D__btree2_idx_depend() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_open() - * - * Purpose: Opens an existing v2 B-tree. - * - * Note: This information is passively initialized from each index - * operation callback because those abstract chunk index operations - * are designed to work with the v2 B-tree chunk indices also, - * which don't require an 'open' for the data structure. + * Function: H5D__bt2_idx_create * - * Return: Success: non-negative - * Failure: negative - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info) -{ - H5D_bt2_ctx_ud_t u_ctx; /* user data for creating context */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check args */ - assert(idx_info); - assert(idx_info->f); - assert(idx_info->pline); - assert(idx_info->layout); - assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.btree2.bt2); - - /* Set up the user data */ - u_ctx.f = idx_info->f; - u_ctx.ndims = idx_info->layout->ndims - 1; - u_ctx.chunk_size = idx_info->layout->size; - u_ctx.dim = idx_info->layout->dim; - - /* Open v2 B-tree for the chunk index */ - if (NULL == - (idx_info->storage->u.btree2.bt2 = H5B2_open(idx_info->f, idx_info->storage->idx_addr, &u_ctx))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open v2 B-tree for tracking chunked dataset"); - - /* Check for SWMR writes to the file */ - if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) - if (H5D__btree2_idx_depend(idx_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, - "unable to create flush dependency on object header"); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__bt2_idx_open() */ - -/*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_create - * - * Purpose: Create the v2 B-tree for tracking dataset chunks + * Purpose: Create the v2 B-tree for tracking dataset chunks * * Return: SUCCEED/FAIL * @@ -758,11 +713,120 @@ H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info) } /* end H5D__bt2_idx_create() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_is_space_alloc + * Function: H5D__bt2_idx_open() + * + * Purpose: Opens an existing v2 B-tree. * - * Purpose: Query if space is allocated for index method + * Note: This information is passively initialized from each index + * operation callback because those abstract chunk index + * operations are designed to work with the v2 B-tree chunk + * indices also, which don't require an 'open' for the data + * structure. * - * Return: Non-negative on success/Negative on failure + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_bt2_ctx_ud_t u_ctx; /* user data for creating context */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check args */ + assert(idx_info); + assert(idx_info->f); + assert(idx_info->pline); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type); + assert(idx_info->storage); + assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(NULL == idx_info->storage->u.btree2.bt2); + + /* Set up the user data */ + u_ctx.f = idx_info->f; + u_ctx.ndims = idx_info->layout->ndims - 1; + u_ctx.chunk_size = idx_info->layout->size; + u_ctx.dim = idx_info->layout->dim; + + /* Open v2 B-tree for the chunk index */ + if (NULL == + (idx_info->storage->u.btree2.bt2 = H5B2_open(idx_info->f, idx_info->storage->idx_addr, &u_ctx))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open v2 B-tree for tracking chunked dataset"); + + /* Check for SWMR writes to the file */ + if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if (H5D__btree2_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, + "unable to create flush dependency on object header"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_close() + * + * Purpose: Closes an existing v2 B-tree. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_close(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_BT2 == idx_info->storage->idx_type); + assert(idx_info->storage->u.btree2.bt2); + + if (H5B2_close(idx_info->storage->u.btree2.bt2) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close v2 B-tree"); + idx_info->storage->u.btree2.bt2 = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_BT2 == idx_info->storage->idx_type); + assert(is_open); + + *is_open = H5D_BT2_IDX_IS_OPEN(idx_info); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__bt2_idx_is_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_is_space_alloc + * + * Purpose: Query if space is allocated for index method + * + * Return: true/false * *------------------------------------------------------------------------- */ @@ -778,14 +842,14 @@ H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage) } /* end H5D__bt2_idx_is_space_alloc() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_mod_cb + * Function: H5D__bt2_mod_cb * - * Purpose: Modify record for dataset chunk when it is found in a v2 B-tree. - * This is the callback for H5B2_update() which is called in - * H5D__bt2_idx_insert(). + * Purpose: Modify record for dataset chunk when it is found in a v2 + * B-tree. This is the callback for H5B2_update() which is + * called in H5D__bt2_idx_insert(). * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -817,18 +881,21 @@ H5D__bt2_mod_cb(void *_record, void *_op_data, bool *changed) } /* end H5D__bt2_mod_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_insert + * Function: H5D__bt2_idx_insert + * + * Purpose: Insert chunk address into the indexing structure. + * A non-filtered chunk: + * Should not exist + * Allocate the chunk and pass chunk address back up + * A filtered chunk: + * If it was not found, create the chunk and pass chunk + * address back up + * If it was found but its size changed, reallocate the chunk + * and pass chunk address back up + * If it was found but its size was the same, pass chunk + * address back up * - * Purpose: Insert chunk address into the indexing structure. - * A non-filtered chunk: - * Should not exist - * Allocate the chunk and pass chunk address back up - * A filtered chunk: - * If it was not found, create the chunk and pass chunk address back up - * If it was found but its size changed, reallocate the chunk and pass chunk address back up - * If it was found but its size was the same, pass chunk address back up - * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -854,7 +921,7 @@ H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, assert(H5_addr_defined(udata->chunk_block.offset)); /* Check if the v2 B-tree is open yet */ - if (NULL == idx_info->storage->u.btree2.bt2) { + if (!H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Open existing v2 B-tree */ if (H5D__bt2_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -889,14 +956,14 @@ H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, } /* H5D__bt2_idx_insert() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_found_cb + * Function: H5D__bt2_found_cb * - * Purpose: Retrieve record for dataset chunk when it is found in a v2 B-tree. - * This is the callback for H5B2_find() which is called in - * H5D__bt2_idx_get_addr() and H5D__bt2_idx_insert(). + * Purpose: Retrieve record for dataset chunk when it is found in a v2 + * B-tree. This is the callback for H5B2_find() which is called + * in H5D__bt2_idx_get_addr() and H5D__bt2_idx_insert(). * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -911,13 +978,13 @@ H5D__bt2_found_cb(const void *nrecord, void *op_data) } /* H5D__bt2_found_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_get_addr + * Function: H5D__bt2_idx_get_addr * - * Purpose: Get the file address of a chunk if file space has been - * assigned. Save the retrieved information in the udata - * supplied. + * Purpose: Get the file address of a chunk if file space has been + * assigned. Save the retrieved information in the udata + * supplied. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -944,7 +1011,7 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) assert(udata); /* Check if the v2 B-tree is open yet */ - if (NULL == idx_info->storage->u.btree2.bt2) { + if (!H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Open existing v2 B-tree */ if (H5D__bt2_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -1003,16 +1070,59 @@ H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) } /* H5D__bt2_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_iterate_cb + * Function: H5D__bt2_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + H5D_chunk_ud_t chunk_ud; + hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* + * After opening a dataset that uses a v2 Btree, the root + * node will generally not be read in until an element is + * looked up for the first time. Since there isn't currently + * a good way of controlling that explicitly, perform a fake + * lookup of a chunk to cause it to be read in. + */ + chunk_ud.common.layout = idx_info->layout; + chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.scaled = scaled; + + chunk_ud.chunk_block.offset = HADDR_UNDEF; + chunk_ud.chunk_block.length = 0; + chunk_ud.filter_mask = 0; + chunk_ud.new_unfilt_chunk = false; + chunk_ud.idx_hint = UINT_MAX; + + if (H5D__bt2_idx_get_addr(idx_info, &chunk_ud) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't load v2 B-tree root node"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_idx_load_metadata() */ + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_iterate_cb * - * Purpose: Translate the B-tree specific chunk record into a generic + * Purpose: Translate the B-tree specific chunk record into a generic * form and make the callback to the generic chunk callback * routine. - * This is the callback for H5B2_iterate() which is called in - * H5D__bt2_idx_iterate(). + * This is the callback for H5B2_iterate() which is called in + * H5D__bt2_idx_iterate(). * - * Return: Success: Non-negative - * Failure: Negative + * Return: Success: Non-negative + * Failure: Negative * *------------------------------------------------------------------------- */ @@ -1033,12 +1143,12 @@ H5D__bt2_idx_iterate_cb(const void *_record, void *_udata) } /* H5D__bt2_idx_iterate_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_iterate + * Function: H5D__bt2_idx_iterate * - * Purpose: Iterate over the chunks in an index, making a callback + * Purpose: Iterate over the chunks in an index, making a callback * for each one. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1062,7 +1172,7 @@ H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chu assert(chunk_udata); /* Check if the v2 B-tree is open yet */ - if (NULL == idx_info->storage->u.btree2.bt2) { + if (!H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Open existing v2 B-tree */ if (H5D__bt2_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -1087,15 +1197,16 @@ H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chu } /* end H5D__bt2_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_remove_cb() + * Function: H5D__bt2_remove_cb() * - * Purpose: Free space for 'dataset chunk' object as v2 B-tree - * is being deleted or v2 B-tree node is removed. - * This is the callback for H5B2_remove() and H5B2_delete() which - * which are called in H5D__bt2_idx_remove() and H5D__bt2_idx_delete(). + * Purpose: Free space for 'dataset chunk' object as v2 B-tree + * is being deleted or v2 B-tree node is removed. + * This is the callback for H5B2_remove() and H5B2_delete() + * which are called in H5D__bt2_idx_remove() and + * H5D__bt2_idx_delete(). * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -1121,11 +1232,11 @@ H5D__bt2_remove_cb(const void *_record, void *_udata) } /* H5D__bt2_remove_cb() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_remove + * Function: H5D__bt2_idx_remove * - * Purpose: Remove chunk from index. + * Purpose: Remove chunk from index. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1149,7 +1260,7 @@ H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *u assert(udata); /* Check if the v2 B-tree is open yet */ - if (NULL == idx_info->storage->u.btree2.bt2) { + if (!H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Open existing v2 B-tree */ if (H5D__bt2_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -1180,13 +1291,13 @@ H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *u } /* H5D__bt2_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_delete + * Function: H5D__bt2_idx_delete * - * Purpose: Delete index and raw data storage for entire dataset + * Purpose: Delete index and raw data storage for entire dataset * (i.e. all chunks) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -1233,11 +1344,11 @@ H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info) } /* end H5D__bt2_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_copy_setup + * Function: H5D__bt2_idx_copy_setup * - * Purpose: Set up any necessary information for copying chunks + * Purpose: Set up any necessary information for copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1264,7 +1375,7 @@ H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_id assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); /* Check if the source v2 B-tree is open yet */ - if (NULL == idx_info_src->storage->u.btree2.bt2) + if (!H5D_BT2_IDX_IS_OPEN(idx_info_src)) if (H5D__bt2_idx_open(idx_info_src) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree"); @@ -1284,11 +1395,11 @@ H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk_id } /* end H5D__bt2_idx_copy_setup() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_copy_shutdown + * Function: H5D__bt2_idx_copy_shutdown * - * Purpose: Shutdown any information from copying chunks + * Purpose: Shutdown any information from copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1324,8 +1435,8 @@ H5D__bt2_idx_copy_shutdown(H5O_storage_chunk_t *storage_src, H5O_storage_chunk_t * * Purpose: Retrieve the amount of index storage for chunked dataset * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -1355,23 +1466,23 @@ H5D__bt2_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) /* Get v2 B-tree size for indexing chunked dataset */ if (H5B2_size(bt2_cdset, index_size) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "can't retrieve v2 B-tree storage info for chunked dataset"); + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, + "can't retrieve v2 B-tree storage info for chunked dataset"); done: /* Close v2 B-tree index */ - if (bt2_cdset && H5B2_close(bt2_cdset) < 0) - HDONE_ERROR(H5E_SYM, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for tracking chunked dataset"); - idx_info->storage->u.btree2.bt2 = NULL; + if (H5D__bt2_idx_close(idx_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for tracking chunked dataset"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__bt2_idx_size() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_reset + * Function: H5D__bt2_idx_reset * - * Purpose: Reset indexing information. + * Purpose: Reset indexing information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1392,11 +1503,11 @@ H5D__bt2_idx_reset(H5O_storage_chunk_t *storage, bool reset_addr) } /* end H5D__bt2_idx_reset() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_dump + * Function: H5D__bt2_idx_dump * - * Purpose: Dump indexing information to a stream. + * Purpose: Dump indexing information to a stream. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1415,11 +1526,11 @@ H5D__bt2_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream) } /* end H5D__bt2_idx_dump() */ /*------------------------------------------------------------------------- - * Function: H5D__bt2_idx_dest + * Function: H5D__bt2_idx_dest * - * Purpose: Release indexing information in memory. + * Purpose: Release indexing information in memory. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -1436,16 +1547,14 @@ H5D__bt2_idx_dest(const H5D_chk_idx_info_t *idx_info) assert(idx_info->storage); /* Check if the v2-btree is open */ - if (idx_info->storage->u.btree2.bt2) { - + if (H5D_BT2_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in bt2 if needed */ if (H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer"); /* Close v2 B-tree */ - if (H5B2_close(idx_info->storage->u.btree2.bt2) < 0) + if (H5D__bt2_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close v2 B-tree"); - idx_info->storage->u.btree2.bt2 = NULL; } /* end if */ done: diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 9f4bd90b68a..41d774d0d3e 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -1124,18 +1124,33 @@ H5D__chunk_io_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dinfo) if (H5F_SHARED_HAS_FEATURE(io_info->f_sh, H5FD_FEAT_HAS_MPI) && H5F_shared_get_coll_metadata_reads(io_info->f_sh) && H5D__chunk_is_space_alloc(&dataset->shared->layout.storage)) { - H5D_chunk_ud_t udata; - hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + H5O_storage_chunk_t *sc = &(dataset->shared->layout.storage.u.chunk); + H5D_chk_idx_info_t idx_info; + bool index_is_open; + + idx_info.f = dataset->oloc.file; + idx_info.pline = &dataset->shared->dcpl_cache.pline; + idx_info.layout = &dataset->shared->layout.u.chunk; + idx_info.storage = sc; + + assert(sc && sc->ops && sc->ops->is_open); + if (sc->ops->is_open(&idx_info, &index_is_open) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to check if dataset chunk index is open"); + + if (!index_is_open) { + assert(sc->ops->open); + if (sc->ops->open(&idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to open dataset chunk index"); + } /* - * TODO: Until the dataset chunk index callback structure has - * callbacks for checking if an index is opened and also for - * directly opening the index, the following fake chunk lookup - * serves the purpose of forcing a chunk index open operation - * on all ranks + * Load any other chunk index metadata that we can, + * such as fixed array data blocks, while we know all + * MPI ranks will do so with collective metadata reads + * enabled */ - if (H5D__chunk_lookup(dataset, scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to collectively open dataset chunk index"); + if (sc->ops->load_metadata && sc->ops->load_metadata(&idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load additional chunk index metadata"); } #endif @@ -3827,15 +3842,29 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5D_chunk_ud_t *udat idx_info.storage = sc; #ifdef H5_HAVE_PARALLEL - /* Disable collective metadata read for chunk indexes as it is - * highly unlikely that users would read the same chunks from all - * processes. - */ if (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) { - md_reads_file_flag = H5P_FORCE_FALSE; - md_reads_context_flag = false; - H5F_set_coll_metadata_reads(idx_info.f, &md_reads_file_flag, &md_reads_context_flag); - restore_md_reads_state = true; + /* Disable collective metadata read for chunk indexes as it is + * highly unlikely that users would read the same chunks from all + * processes. + */ + if (H5F_get_coll_metadata_reads(idx_info.f)) { +#ifndef NDEBUG + bool index_is_open; + + /* + * The dataset's chunk index should be open at this point. + * Otherwise, we will end up reading it in independently, + * which may not be desired. + */ + sc->ops->is_open(&idx_info, &index_is_open); + assert(index_is_open); +#endif + + md_reads_file_flag = H5P_FORCE_FALSE; + md_reads_context_flag = false; + H5F_set_coll_metadata_reads(idx_info.f, &md_reads_file_flag, &md_reads_context_flag); + restore_md_reads_state = true; + } } #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Dearray.c b/src/H5Dearray.c index c713b6f18bb..965eaacaca3 100644 --- a/src/H5Dearray.c +++ b/src/H5Dearray.c @@ -26,19 +26,21 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5EAprivate.h" /* Extensible arrays */ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5EAprivate.h" /* Extensible arrays */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5MFprivate.h" /* File space management */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5VMprivate.h" /* Vector functions */ +#include "H5MFprivate.h" /* File space management */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5VMprivate.h" /* Vector functions */ /****************/ /* Local Macros */ /****************/ +#define H5D_EARRAY_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.earray.ea) + /* Value to fill unset array elements with */ #define H5D_EARRAY_FILL HADDR_UNDEF #define H5D_EARRAY_FILT_FILL \ @@ -106,10 +108,14 @@ static herr_t H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, hsize static herr_t H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__earray_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__earray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__earray_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__earray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static herr_t H5D__earray_idx_resize(H5O_layout_chunk_t *layout); static int H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); @@ -125,7 +131,6 @@ static herr_t H5D__earray_idx_dump(const H5O_storage_chunk_t *storage, FILE *str static herr_t H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info); /* Generic extensible array routines */ -static herr_t H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info); static herr_t H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info); /*********************/ @@ -137,9 +142,13 @@ const H5D_chunk_ops_t H5D_COPS_EARRAY[1] = {{ true, /* Extensible array indices support SWMR access */ H5D__earray_idx_init, /* init */ H5D__earray_idx_create, /* create */ + H5D__earray_idx_open, /* open */ + H5D__earray_idx_close, /* close */ + H5D__earray_idx_is_open, /* is_open */ H5D__earray_idx_is_space_alloc, /* is_space_alloc */ H5D__earray_idx_insert, /* insert */ H5D__earray_idx_get_addr, /* get_addr */ + H5D__earray_idx_load_metadata, /* load_metadata */ H5D__earray_idx_resize, /* resize */ H5D__earray_idx_iterate, /* iterate */ H5D__earray_idx_remove, /* remove */ @@ -270,10 +279,10 @@ H5D__earray_dst_context(void *_ctx) /*------------------------------------------------------------------------- * Function: H5D__earray_fill * - * Purpose: Fill "missing elements" in block of elements + * Purpose: Fill "missing elements" in block of elements * - * Return: Success: non-negative - * Failure: negative + * Return: Success: non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -705,59 +714,6 @@ H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__earray_idx_depend() */ -/*------------------------------------------------------------------------- - * Function: H5D__earray_idx_open - * - * Purpose: Opens an existing extensible array. - * - * Note: This information is passively initialized from each index - * operation callback because those abstract chunk index operations - * are designed to work with the v1 B-tree chunk indices also, - * which don't require an 'open' for the data structure. - * - * Return: Success: non-negative - * Failure: negative - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info) -{ - H5D_earray_ctx_ud_t udata; /* User data for extensible array open call */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check args */ - assert(idx_info); - assert(idx_info->f); - assert(idx_info->pline); - assert(idx_info->layout); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.earray.ea); - - /* Set up the user data */ - udata.f = idx_info->f; - udata.chunk_size = idx_info->layout->size; - - /* Open the extensible array for the chunk index */ - if (NULL == - (idx_info->storage->u.earray.ea = H5EA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open extensible array"); - - /* Check for SWMR writes to the file */ - if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) - if (H5D__earray_idx_depend(idx_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, - "unable to create flush dependency on object header"); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__earray_idx_open() */ - /*------------------------------------------------------------------------- * Function: H5D__earray_idx_init * @@ -905,12 +861,120 @@ H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__earray_idx_create() */ +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_open + * + * Purpose: Opens an existing extensible array. + * + * Note: This information is passively initialized from each index + * operation callback because those abstract chunk index + * operations are designed to work with the v1 B-tree chunk + * indices also, which don't require an 'open' for the data + * structure. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_earray_ctx_ud_t udata; /* User data for extensible array open call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check args */ + assert(idx_info); + assert(idx_info->f); + assert(idx_info->pline); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(NULL == idx_info->storage->u.earray.ea); + + /* Set up the user data */ + udata.f = idx_info->f; + udata.chunk_size = idx_info->layout->size; + + /* Open the extensible array for the chunk index */ + if (NULL == + (idx_info->storage->u.earray.ea = H5EA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open extensible array"); + + /* Check for SWMR writes to the file */ + if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if (H5D__earray_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, + "unable to create flush dependency on object header"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_close + * + * Purpose: Closes an existing extensible array. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_close(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + assert(idx_info->storage->u.earray.ea); + + if (H5EA_close(idx_info->storage->u.earray.ea) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); + idx_info->storage->u.earray.ea = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + assert(is_open); + + *is_open = H5D_EARRAY_IDX_IS_OPEN(idx_info); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_idx_is_open() */ + /*------------------------------------------------------------------------- * Function: H5D__earray_idx_is_space_alloc * * Purpose: Query if space is allocated for index method * - * Return: Non-negative on success/Negative on failure + * Return: true/false * *------------------------------------------------------------------------- */ @@ -953,7 +1017,7 @@ H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata assert(udata); /* Check if the extensible array is open yet */ - if (NULL == idx_info->storage->u.earray.ea) { + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); @@ -1021,7 +1085,7 @@ H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda assert(udata); /* Check if the extensible array is open yet */ - if (NULL == idx_info->storage->u.earray.ea) { + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); @@ -1086,6 +1150,51 @@ H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda FUNC_LEAVE_NOAPI(ret_value) } /* H5D__earray_idx_get_addr() */ +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info) +{ + H5D_chunk_ud_t chunk_ud; + hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* + * After opening a dataset that uses an extensible array, + * the extensible array header index block will generally + * not be read in until an element is looked up for the + * first time. Since there isn't currently a good way of + * controlling that explicitly, perform a fake lookup of + * a chunk to cause it to be read in or created if it + * doesn't exist yet. + */ + chunk_ud.common.layout = idx_info->layout; + chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.scaled = scaled; + + chunk_ud.chunk_block.offset = HADDR_UNDEF; + chunk_ud.chunk_block.length = 0; + chunk_ud.filter_mask = 0; + chunk_ud.new_unfilt_chunk = false; + chunk_ud.idx_hint = UINT_MAX; + + if (H5D__earray_idx_get_addr(idx_info, &chunk_ud) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't load extensible array header index block"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__earray_idx_load_metadata() */ + /*------------------------------------------------------------------------- * Function: H5D__earray_idx_resize * @@ -1195,10 +1304,6 @@ H5D__earray_idx_iterate_cb(hsize_t H5_ATTR_UNUSED idx, const void *_elmt, void * * Purpose: Iterate over the chunks in an index, making a callback * for each one. * - * Note: This implementation is slow, particularly for sparse - * extensible arrays, replace it with call to H5EA_iterate() - * when that's available. - * * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- @@ -1223,10 +1328,10 @@ H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t assert(chunk_udata); /* Check if the extensible array is open yet */ - if (NULL == idx_info->storage->u.earray.ea) { + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, H5_ITER_ERROR, "can't open extensible array"); } else /* Patch the top level file pointer contained in ea if needed */ H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f); @@ -1236,7 +1341,7 @@ H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t /* Get the extensible array statistics */ if (H5EA_get_stats(ea, &ea_stat) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array statistics"); + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, H5_ITER_ERROR, "can't query extensible array statistics"); if (ea_stat.stored.max_idx_set > 0) { H5D_earray_it_ud_t udata; /* User data for iteration callback */ @@ -1291,7 +1396,7 @@ H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t assert(udata); /* Check if the extensible array is open yet */ - if (NULL == idx_info->storage->u.earray.ea) { + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); @@ -1444,9 +1549,8 @@ H5D__earray_idx_delete(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk addresses"); /* Close extensible array */ - if (H5EA_close(idx_info->storage->u.earray.ea) < 0) + if (H5D__earray_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); - idx_info->storage->u.earray.ea = NULL; /* Set up the context user data */ ctx_udata.f = idx_info->f; @@ -1494,7 +1598,7 @@ H5D__earray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); /* Check if the source extensible array is open yet */ - if (NULL == idx_info_src->storage->u.earray.ea) + if (!H5D_EARRAY_IDX_IS_OPEN(idx_info_src)) /* Open the extensible array in file */ if (H5D__earray_idx_open(idx_info_src) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array"); @@ -1593,9 +1697,8 @@ H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) done: if (idx_info->storage->u.earray.ea) { - if (H5EA_close(idx_info->storage->u.earray.ea) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); - idx_info->storage->u.earray.ea = NULL; + if (H5D__earray_idx_close(idx_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -1673,16 +1776,14 @@ H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info) assert(idx_info->storage); /* Check if the extensible array is open */ - if (idx_info->storage->u.earray.ea) { - + if (H5D_EARRAY_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in ea if needed */ if (H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch earray file pointer"); /* Close extensible array */ - if (H5EA_close(idx_info->storage->u.earray.ea) < 0) + if (H5D__earray_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array"); - idx_info->storage->u.earray.ea = NULL; } /* end if */ done: diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c index 450d466755c..8d06de47b02 100644 --- a/src/H5Dfarray.c +++ b/src/H5Dfarray.c @@ -37,6 +37,8 @@ /* Local Macros */ /****************/ +#define H5D_FARRAY_IDX_IS_OPEN(idx_info) (NULL != (idx_info)->storage->u.btree2.bt2) + /* Value to fill unset array elements with */ #define H5D_FARRAY_FILL HADDR_UNDEF #define H5D_FARRAY_FILT_FILL \ @@ -105,10 +107,14 @@ static herr_t H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, hsize static herr_t H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__farray_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__farray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__farray_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__farray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -123,7 +129,6 @@ static herr_t H5D__farray_idx_dump(const H5O_storage_chunk_t *storage, FILE *str static herr_t H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info); /* Generic fixed array routines */ -static herr_t H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info); static herr_t H5D__farray_idx_depend(const H5D_chk_idx_info_t *idx_info); /*********************/ @@ -135,9 +140,13 @@ const H5D_chunk_ops_t H5D_COPS_FARRAY[1] = {{ true, /* Fixed array indices support SWMR access */ H5D__farray_idx_init, /* init */ H5D__farray_idx_create, /* create */ + H5D__farray_idx_open, /* open */ + H5D__farray_idx_close, /* close */ + H5D__farray_idx_is_open, /* is_open */ H5D__farray_idx_is_space_alloc, /* is_space_alloc */ H5D__farray_idx_insert, /* insert */ H5D__farray_idx_get_addr, /* get_addr */ + H5D__farray_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__farray_idx_iterate, /* iterate */ H5D__farray_idx_remove, /* remove */ @@ -726,55 +735,6 @@ H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNU FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__farray_idx_init() */ -/*------------------------------------------------------------------------- - * Function: H5D__farray_idx_open - * - * Purpose: Opens an existing fixed array and initializes - * the layout struct with information about the storage. - * - * Return: Success: non-negative - * Failure: negative - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info) -{ - H5D_farray_ctx_ud_t udata; /* User data for fixed array open call */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check args */ - assert(idx_info); - assert(idx_info->f); - assert(idx_info->pline); - assert(idx_info->layout); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type); - assert(idx_info->storage); - assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); - assert(H5_addr_defined(idx_info->storage->idx_addr)); - assert(NULL == idx_info->storage->u.farray.fa); - - /* Set up the user data */ - udata.f = idx_info->f; - udata.chunk_size = idx_info->layout->size; - - /* Open the fixed array for the chunk index */ - if (NULL == - (idx_info->storage->u.farray.fa = H5FA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open fixed array"); - - /* Check for SWMR writes to the file */ - if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) - if (H5D__farray_idx_depend(idx_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, - "unable to create flush dependency on object header"); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__farray_idx_open() */ - /*------------------------------------------------------------------------- * Function: H5D__farray_idx_create * @@ -853,12 +813,115 @@ H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__farray_idx_create() */ +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_open + * + * Purpose: Opens an existing fixed array and initializes + * the layout struct with information about the storage. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_farray_ctx_ud_t udata; /* User data for fixed array open call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check args */ + assert(idx_info); + assert(idx_info->f); + assert(idx_info->pline); + assert(idx_info->layout); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + assert(H5_addr_defined(idx_info->storage->idx_addr)); + assert(NULL == idx_info->storage->u.farray.fa); + + /* Set up the user data */ + udata.f = idx_info->f; + udata.chunk_size = idx_info->layout->size; + + /* Open the fixed array for the chunk index */ + if (NULL == + (idx_info->storage->u.farray.fa = H5FA_open(idx_info->f, idx_info->storage->idx_addr, &udata))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open fixed array"); + + /* Check for SWMR writes to the file */ + if (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if (H5D__farray_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, + "unable to create flush dependency on object header"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_close + * + * Purpose: Closes an existing fixed array. + * + * Return: Success: non-negative + * Failure: negative + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_close(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + assert(idx_info->storage->u.farray.fa); + + if (H5FA_close(idx_info->storage->u.farray.fa) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); + idx_info->storage->u.farray.fa = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + assert(is_open); + + *is_open = H5D_FARRAY_IDX_IS_OPEN(idx_info); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_idx_is_open() */ + /*------------------------------------------------------------------------- * Function: H5D__farray_idx_is_space_alloc * * Purpose: Query if space is allocated for index method * - * Return: Non-negative on success/Negative on failure + * Return: true/false * *------------------------------------------------------------------------- */ @@ -901,7 +964,7 @@ H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata assert(udata); /* Check if the fixed array is open yet */ - if (NULL == idx_info->storage->u.farray.fa) { + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); @@ -969,7 +1032,7 @@ H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda assert(udata); /* Check if the fixed array is open yet */ - if (NULL == idx_info->storage->u.farray.fa) { + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); @@ -1016,6 +1079,50 @@ H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda FUNC_LEAVE_NOAPI(ret_value) } /* H5D__farray_idx_get_addr() */ +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_load_metadata(const H5D_chk_idx_info_t *idx_info) +{ + H5D_chunk_ud_t chunk_ud; + hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + /* + * After opening a dataset that uses a fixed array, the + * fixed array data block will generally not be read in + * until an element is looked up for the first time. Since + * there isn't currently a good way of controlling that + * explicitly, perform a fake lookup of a chunk to cause + * it to be read in. + */ + chunk_ud.common.layout = idx_info->layout; + chunk_ud.common.storage = idx_info->storage; + chunk_ud.common.scaled = scaled; + + chunk_ud.chunk_block.offset = HADDR_UNDEF; + chunk_ud.chunk_block.length = 0; + chunk_ud.filter_mask = 0; + chunk_ud.new_unfilt_chunk = false; + chunk_ud.idx_hint = UINT_MAX; + + if (H5D__farray_idx_get_addr(idx_info, &chunk_ud) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't load fixed array data block"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__farray_idx_load_metadata() */ + /*------------------------------------------------------------------------- * Function: H5D__farray_idx_iterate_cb * @@ -1102,7 +1209,7 @@ H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t assert(chunk_udata); /* Check if the fixed array is open yet */ - if (NULL == idx_info->storage->u.farray.fa) { + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); @@ -1171,7 +1278,7 @@ H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t assert(udata); /* Check if the fixed array is open yet */ - if (NULL == idx_info->storage->u.farray.fa) { + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); @@ -1302,9 +1409,8 @@ H5D__farray_idx_delete(const H5D_chk_idx_info_t *idx_info) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk addresses"); /* Close fixed array */ - if (H5FA_close(idx_info->storage->u.farray.fa) < 0) + if (H5D__farray_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); - idx_info->storage->u.farray.fa = NULL; /* Set up the user data */ ctx_udata.f = idx_info->f; @@ -1352,10 +1458,11 @@ H5D__farray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, const H5D_chk assert(!H5_addr_defined(idx_info_dst->storage->idx_addr)); /* Check if the source fixed array is open yet */ - if (NULL == idx_info_src->storage->u.farray.fa) + if (!H5D_FARRAY_IDX_IS_OPEN(idx_info_src)) { /* Open the fixed array in file */ if (H5D__farray_idx_open(idx_info_src) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array"); + } /* Set copied metadata tag */ H5_BEGIN_TAG(H5AC__COPIED_TAG) @@ -1450,9 +1557,8 @@ H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) done: if (idx_info->storage->u.farray.fa) { - if (H5FA_close(idx_info->storage->u.farray.fa) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); - idx_info->storage->u.farray.fa = NULL; + if (H5D__farray_idx_close(idx_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -1528,16 +1634,14 @@ H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info) assert(idx_info->storage); /* Check if the fixed array is open */ - if (idx_info->storage->u.farray.fa) { - + if (H5D_FARRAY_IDX_IS_OPEN(idx_info)) { /* Patch the top level file pointer contained in fa if needed */ if (H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch fixed array file pointer"); /* Close fixed array */ - if (H5FA_close(idx_info->storage->u.farray.fa) < 0) + if (H5D__farray_idx_close(idx_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array"); - idx_info->storage->u.farray.fa = NULL; } /* end if */ done: diff --git a/src/H5Dio.c b/src/H5Dio.c index 2134ce1c79a..611518d3fc0 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -143,17 +143,17 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) } /* end if */ #endif /*H5_HAVE_PARALLEL*/ - /* iterate over all dsets and construct I/O information necessary to do I/O */ + /* Iterate over all dsets and construct I/O information necessary to do I/O */ for (i = 0; i < count; i++) { haddr_t prev_tag = HADDR_UNDEF; - /* check args */ + /* Check args */ if (NULL == dset_info[i].dset) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset"); if (NULL == dset_info[i].dset->oloc.file) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file"); - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset oheader addr */ H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* Set up datatype info for operation */ @@ -173,10 +173,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) if (dset_info[i].nelmts > 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer"); - /* If the buffer is nil, and 0 element is selected, make a fake buffer. - * This is for some MPI package like ChaMPIon on NCSA's tungsten which - * doesn't support this feature. - */ + /* If the buffer is nil, and 0 element is selected, make a fake buffer. */ dset_info[i].buf.vp = &fake_char; } /* end if */ @@ -191,8 +188,8 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) * rapidly changing coordinates match up), but the I/O code still has * difficulties with the notion. * - * To solve this, we check to see if H5S_select_shape_same() returns true, - * and if the ranks of the mem and file spaces are different. If they are, + * To solve this, check if H5S_select_shape_same() returns true + * and the ranks of the mem and file spaces are different. If so, * construct a new mem space that is equivalent to the old mem space, and * use that instead. * @@ -347,7 +344,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) if (dset_info[i].layout_ops.mdio_init) { haddr_t prev_tag = HADDR_UNDEF; - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset oheader addr */ H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* Make second phase IO init call */ @@ -396,7 +393,7 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) if (dset_info[i].skip_io) continue; - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset object header addr */ H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* Invoke correct "high level" I/O routine */ @@ -553,18 +550,18 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) if (NULL == (store = (H5D_storage_t *)H5MM_malloc(count * sizeof(H5D_storage_t)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset storage info array buffer"); - /* iterate over all dsets and construct I/O information */ + /* Iterate over all dsets and construct I/O information */ for (i = 0; i < count; i++) { bool should_alloc_space = false; /* Whether or not to initialize dataset's storage */ haddr_t prev_tag = HADDR_UNDEF; - /* check args */ + /* Check args */ if (NULL == dset_info[i].dset) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset"); if (NULL == dset_info[i].dset->oloc.file) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file"); - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset oheader addr */ H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* All filters in the DCPL must have encoding enabled. */ @@ -620,10 +617,7 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) if (dset_info[i].nelmts > 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no input buffer"); - /* If the buffer is nil, and 0 element is selected, make a fake buffer. - * This is for some MPI package like ChaMPIon on NCSA's tungsten which - * doesn't support this feature. - */ + /* If the buffer is nil, and 0 element is selected, make a fake buffer. */ dset_info[i].buf.cvp = &fake_char; } /* end if */ @@ -633,18 +627,18 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) if (!(H5S_has_extent(dset_info[i].mem_space))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set"); - /* H5S_select_shape_same() has been modified to accept topologically - * identical selections with different rank as having the same shape - * (if the most rapidly changing coordinates match up), but the I/O - * code still has difficulties with the notion. + /* H5S_select_shape_same() has been modified to accept topologically identical + * selections with different rank as having the same shape (if the most + * rapidly changing coordinates match up), but the I/O code still has + * difficulties with the notion. * - * To solve this, we check to see if H5S_select_shape_same() returns - * true, and if the ranks of the mem and file spaces are different. - * If they are, construct a new mem space that is equivalent to the - * old mem space, and use that instead. + * To solve this, check if H5S_select_shape_same() returns true + * and the ranks of the mem and file spaces are different. If so, + * construct a new mem space that is equivalent to the old mem space, and + * use that instead. * - * Note that in general, this requires us to touch up the memory buffer - * as well. + * Note that in general, this requires us to touch up the memory buffer as + * well. */ if (dset_info[i].nelmts > 0 && true == H5S_SELECT_SHAPE_SAME(dset_info[i].mem_space, dset_info[i].file_space) && @@ -818,11 +812,11 @@ H5D__write(size_t count, H5D_dset_io_info_t *dset_info) "unable to allocate array of selected pieces"); } - /* loop with serial & single-dset write IO path */ + /* Loop with serial & single-dset write IO path */ for (i = 0; i < count; i++) { assert(!dset_info[i].skip_io); - /* set metadata tagging with dset oheader addr */ + /* Set metadata tagging with dset oheader addr */ H5AC_tag(dset_info->dset->oloc.addr, &prev_tag); /* Invoke correct "high level" I/O routine */ @@ -936,7 +930,7 @@ H5D__ioinfo_init(size_t count, H5D_io_op_type_t op_type, H5D_dset_io_info_t *dse FUNC_ENTER_PACKAGE_NOERR - /* check args */ + /* Check args */ assert(count > 0); assert(dset_info); assert(dset_info[0].dset->oloc.file); @@ -1057,7 +1051,7 @@ H5D__typeinfo_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, hid_t FUNC_ENTER_PACKAGE - /* check args */ + /* Check args */ assert(io_info); assert(dset_info); @@ -1151,7 +1145,7 @@ H5D__typeinfo_init(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_info, hid_t /*------------------------------------------------------------------------- * Function: H5D__typeinfo_init_phase2 * - * Purpose: Continue initializing type info for all datasets after + * Purpose: Continues initializing type info for all datasets after * calculating the max type size across all datasets, and * before final determination of collective/independent in * H5D__ioinfo_adjust(). Currently just checks to see if @@ -1169,7 +1163,7 @@ H5D__typeinfo_init_phase2(H5D_io_info_t *io_info) FUNC_ENTER_PACKAGE - /* check args */ + /* Check args */ assert(io_info); /* If selection I/O mode is default (auto), enable it here if the VFD supports it (it will be turned off @@ -1238,7 +1232,7 @@ H5D__typeinfo_init_phase2(H5D_io_info_t *io_info) /*------------------------------------------------------------------------- * Function: H5D__ioinfo_adjust * - * Purpose: Adjust operation's I/O info for any parallel I/O, also + * Purpose: Adjusts operation's I/O info for any parallel I/O, also * handle decision on selection I/O even in serial case * * Return: Non-negative on success/Negative on failure @@ -1253,10 +1247,10 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) FUNC_ENTER_PACKAGE - /* check args */ + /* Check args */ assert(io_info); - /* check the first dset, should exist either single or multi dset cases */ + /* Check the first dset, should exist either single or multi dset cases */ assert(io_info->dsets_info[0].dset); dset0 = io_info->dsets_info[0].dset; assert(dset0->oloc.file); @@ -1317,7 +1311,7 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) if (io_info->dsets_info[i].dset->shared->dcpl_cache.pline.nused > 0) break; - /* If the above loop didn't complete at least one dataset has a filter */ + /* If the above loop didn't complete, at least one dataset has a filter */ if (i < io_info->count) { int comm_size = 0; @@ -1363,9 +1357,9 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info) /*------------------------------------------------------------------------- * Function: H5D__typeinfo_init_phase3 * - * Purpose: Finish initializing type info for all datasets after - * calculating the max type size across all datasets. And - * after final collective/independent determination in + * Purpose: Finishes initializing type info for all datasets after + * calculating the max type size across all datasets and + * final collective/independent determination in * H5D__ioinfo_adjust(). * * Return: Non-negative on success/Negative on failure diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 0ef6542fcdb..b6976e6a067 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -911,7 +911,8 @@ H5D__mpio_get_no_coll_cause_strings(char *local_cause, size_t local_cause_len, c case H5D_MPIO_COLLECTIVE: case H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE: default: - assert(0 && "invalid no collective cause reason"); + cause_str = "invalid or unknown no collective cause reason"; + assert(0 && "invalid or unknown no collective cause reason"); break; } @@ -3024,6 +3025,26 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as * metadata reads are enabled. */ if (H5F_get_coll_metadata_reads(di->dset->oloc.file)) { +#ifndef NDEBUG + { + H5D_chk_idx_info_t idx_info; + bool index_is_open; + + idx_info.f = di->dset->oloc.file; + idx_info.pline = &di->dset->shared->dcpl_cache.pline; + idx_info.layout = &di->dset->shared->layout.u.chunk; + idx_info.storage = &di->dset->shared->layout.storage.u.chunk; + + /* + * The dataset's chunk index should be open at this point. + * Otherwise, we will end up reading it in independently, + * which may not be desired. + */ + idx_info.storage->ops->is_open(&idx_info, &index_is_open); + assert(index_is_open); + } +#endif + md_reads_file_flag = H5P_FORCE_FALSE; md_reads_context_flag = false; H5F_set_coll_metadata_reads(di->dset->oloc.file, &md_reads_file_flag, &md_reads_context_flag); @@ -3446,26 +3467,6 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const chunk_node = H5SL_next(chunk_node); } } - else if (H5F_get_coll_metadata_reads(di[dset_idx].dset->oloc.file)) { - hsize_t scaled[H5O_LAYOUT_NDIMS] = {0}; - - /* - * If this rank has no selection in the dataset and collective - * metadata reads are enabled, do a fake lookup of a chunk to - * ensure that this rank has the chunk index opened. Otherwise, - * only the ranks that had a selection will have opened the - * chunk index and they will have done so independently. Therefore, - * when ranks with no selection participate in later collective - * metadata reads, they will try to open the chunk index collectively - * and issues will occur since other ranks won't participate. - * - * In the future, we should consider having a chunk index "open" - * callback that can be used to ensure collectivity between ranks - * in a more natural way, but this hack should suffice for now. - */ - if (H5D__chunk_lookup(di[dset_idx].dset, scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address"); - } /* Reset metadata tagging */ H5AC_tag(prev_tag, NULL); @@ -3782,6 +3783,10 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun counts_disps_array = H5MM_xfree(counts_disps_array); } + /* No useful work to do - exit */ + if (coll_chunk_list_num_entries == 0) + HGOTO_DONE(SUCCEED); + /* * Phase 2 - Involved ranks now redistribute any shared chunks to new * owners as necessary. diff --git a/src/H5Dnone.c b/src/H5Dnone.c index 472a2214dc7..d4eb9188840 100644 --- a/src/H5Dnone.c +++ b/src/H5Dnone.c @@ -14,9 +14,9 @@ * Purpose: Implicit (Non Index) chunked I/O functions. * * This is used when the dataset is: - * - extendible but with fixed max. dims - * - with early allocation - * - without filter + * - extendible but with fixed max. dims + * - with early allocation + * - without filter * * The chunk coordinate is mapped into the actual disk addresses * for the chunk without indexing. @@ -31,12 +31,12 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5MFprivate.h" /* File space management */ -#include "H5VMprivate.h" /* Vector functions */ +#include "H5MFprivate.h" /* File space management */ +#include "H5VMprivate.h" /* Vector functions */ /****************/ /* Local Macros */ @@ -52,8 +52,12 @@ /* Non Index chunking I/O ops */ static herr_t H5D__none_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__none_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__none_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__none_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__none_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__none_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__none_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__none_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__none_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -73,9 +77,13 @@ const H5D_chunk_ops_t H5D_COPS_NONE[1] = {{ false, /* Non-indexed chunking don't current support SWMR access */ NULL, /* init */ H5D__none_idx_create, /* create */ + H5D__none_idx_open, /* open */ + H5D__none_idx_close, /* close */ + H5D__none_idx_is_open, /* is_open */ H5D__none_idx_is_space_alloc, /* is_space_alloc */ NULL, /* insert */ H5D__none_idx_get_addr, /* get_addr */ + H5D__none_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__none_idx_iterate, /* iterate */ H5D__none_idx_remove, /* remove */ @@ -97,12 +105,12 @@ const H5D_chunk_ops_t H5D_COPS_NONE[1] = {{ /*******************/ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_create + * Function: H5D__none_idx_create * - * Purpose: Allocate memory for the maximum # of chunks in the dataset. + * Purpose: Allocate memory for the maximum # of chunks in the dataset. * - * Return: Non-negative on success - * Negative on failure. + * Return: Non-negative on success + * Negative on failure. * *------------------------------------------------------------------------- */ @@ -141,11 +149,73 @@ H5D__none_idx_create(const H5D_chk_idx_info_t *idx_info) } /* end H5D__none_idx_create() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_is_space_alloc + * Function: H5D__none_idx_open * - * Purpose: Query if space for the dataset chunks is allocated + * Purpose: Opens an existing "none" index. Currently a no-op. * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__none_idx_open(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__none_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__none_idx_close + * + * Purpose: Closes an existing "none" index. Currently a no-op. + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__none_idx_close(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__none_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__none_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__none_idx_is_open(const H5D_chk_idx_info_t H5_ATTR_NDEBUG_UNUSED *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_NONE == idx_info->storage->idx_type); + assert(is_open); + + *is_open = true; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__none_idx_is_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__none_idx_is_space_alloc + * + * Purpose: Query if space for the dataset chunks is allocated + * + * Return: true/false * *------------------------------------------------------------------------- */ @@ -161,12 +231,12 @@ H5D__none_idx_is_space_alloc(const H5O_storage_chunk_t *storage) } /* end H5D__none_idx_is_space_alloc() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_get_addr + * Function: H5D__none_idx_get_addr * - * Purpose: Get the file address of a chunk. - * Save the retrieved information in the udata supplied. + * Purpose: Get the file address of a chunk. + * Save the retrieved information in the udata supplied. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -200,12 +270,32 @@ H5D__none_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata } /* H5D__none_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_iterate + * Function: H5D__none_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. Currently a no-op. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__none_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__none_idx_load_metadata() */ + +/*------------------------------------------------------------------------- + * Function: H5D__none_idx_iterate * - * Purpose: Iterate over the chunks in an index, making a callback + * Purpose: Iterate over the chunks in an index, making a callback * for each one. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -275,13 +365,13 @@ H5D__none_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t ch } /* end H5D__none_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_remove + * Function: H5D__none_idx_remove * - * Purpose: Remove chunk from index. + * Purpose: Remove chunk from index. * - * Note: Chunks can't be removed (or added) to datasets with this - * form of index - all the space for all the chunks is always - * allocated in the file. + * Note: Chunks can't be removed (or added) to datasets with this + * form of index - all the space for all the chunks is always + * allocated in the file. * * Return: Non-negative on success/Negative on failure * @@ -299,12 +389,12 @@ H5D__none_idx_remove(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, } /* H5D__none_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_delete + * Function: H5D__none_idx_delete * - * Purpose: Delete raw data storage for entire dataset (i.e. all chunks) + * Purpose: Delete raw data storage for entire dataset (i.e. all chunks) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -337,11 +427,11 @@ H5D__none_idx_delete(const H5D_chk_idx_info_t *idx_info) } /* end H5D__none_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_copy_setup + * Function: H5D__none_idx_copy_setup * - * Purpose: Set up any necessary information for copying chunks + * Purpose: Set up any necessary information for copying chunks * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -407,11 +497,11 @@ H5D__none_idx_size(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, hsize_t *i } /* end H5D__none_idx_size() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_reset + * Function: H5D__none_idx_reset * - * Purpose: Reset indexing information. + * Purpose: Reset indexing information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -431,11 +521,11 @@ H5D__none_idx_reset(H5O_storage_chunk_t *storage, bool reset_addr) } /* end H5D__none_idx_reset() */ /*------------------------------------------------------------------------- - * Function: H5D__none_idx_dump + * Function: H5D__none_idx_dump * - * Purpose: Dump + * Purpose: Dump * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 82fec0ea1ff..a3695ae8544 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -393,10 +393,14 @@ typedef int (*H5D_chunk_cb_func_t)(const H5D_chunk_rec_t *chunk_rec, void *udata typedef herr_t (*H5D_chunk_init_func_t)(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); typedef herr_t (*H5D_chunk_create_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_open_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_close_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_is_open_func_t)(const H5D_chk_idx_info_t *idx_info, bool *is_open); typedef bool (*H5D_chunk_is_space_alloc_func_t)(const H5O_storage_chunk_t *storage); typedef herr_t (*H5D_chunk_insert_func_t)(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); typedef herr_t (*H5D_chunk_get_addr_func_t)(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +typedef herr_t (*H5D_chunk_load_metadata_func_t)(const H5D_chk_idx_info_t *idx_info); typedef herr_t (*H5D_chunk_resize_func_t)(H5O_layout_chunk_t *layout); typedef int (*H5D_chunk_iterate_func_t)(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); @@ -413,13 +417,18 @@ typedef herr_t (*H5D_chunk_dest_func_t)(const H5D_chk_idx_info_t *idx_info); /* Typedef for grouping chunk I/O routines */ typedef struct H5D_chunk_ops_t { - bool can_swim; /* Flag to indicate that the index supports SWMR access */ - H5D_chunk_init_func_t init; /* Routine to initialize indexing information in memory */ - H5D_chunk_create_func_t create; /* Routine to create chunk index */ + bool can_swim; /* Flag to indicate that the index supports SWMR access */ + H5D_chunk_init_func_t init; /* Routine to initialize indexing information in memory */ + H5D_chunk_create_func_t create; /* Routine to create chunk index */ + H5D_chunk_open_func_t open; /* Routine to open chunk index */ + H5D_chunk_close_func_t close; /* Routine to close chunk index */ + H5D_chunk_is_open_func_t is_open; /* Query routine to determine if index is open or not */ H5D_chunk_is_space_alloc_func_t - is_space_alloc; /* Query routine to determine if storage/index is allocated */ - H5D_chunk_insert_func_t insert; /* Routine to insert a chunk into an index */ - H5D_chunk_get_addr_func_t get_addr; /* Routine to retrieve address of chunk in file */ + is_space_alloc; /* Query routine to determine if storage/index is allocated */ + H5D_chunk_insert_func_t insert; /* Routine to insert a chunk into an index */ + H5D_chunk_get_addr_func_t get_addr; /* Routine to retrieve address of chunk in file */ + H5D_chunk_load_metadata_func_t + load_metadata; /* Routine to load additional chunk index metadata, such as fixed array data blocks */ H5D_chunk_resize_func_t resize; /* Routine to update chunk index info after resizing dataset */ H5D_chunk_iterate_func_t iterate; /* Routine to iterate over chunks */ H5D_chunk_remove_func_t remove; /* Routine to remove a chunk from an index */ diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index 118c6cd4224..fa8b0770359 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -78,15 +78,16 @@ #define H5D_MPIO_LOCAL_NO_COLLECTIVE_CAUSE_NAME \ "local_no_collective_cause" /* cause of broken collective I/O in each process */ #define H5D_MPIO_GLOBAL_NO_COLLECTIVE_CAUSE_NAME \ - "global_no_collective_cause" /* cause of broken collective I/O in all processes */ -#define H5D_XFER_EDC_NAME "err_detect" /* EDC */ -#define H5D_XFER_FILTER_CB_NAME "filter_cb" /* Filter callback function */ -#define H5D_XFER_CONV_CB_NAME "type_conv_cb" /* Type conversion callback function */ -#define H5D_XFER_XFORM_NAME "data_transform" /* Data transform */ -#define H5D_XFER_DSET_IO_SEL_NAME "dset_io_selection" /* Dataset I/O selection */ -#define H5D_XFER_SELECTION_IO_MODE_NAME "selection_io_mode" /* Selection I/O mode */ -#define H5D_XFER_NO_SELECTION_IO_CAUSE_NAME "no_selection_io_cause" /* Cause for no selection I/O */ -#define H5D_XFER_MODIFY_WRITE_BUF_NAME "modify_write_buf" /* Modify write buffers */ + "global_no_collective_cause" /* cause of broken collective I/O in all processes */ +#define H5D_XFER_EDC_NAME "err_detect" /* EDC */ +#define H5D_XFER_FILTER_CB_NAME "filter_cb" /* Filter callback function */ +#define H5D_XFER_CONV_CB_NAME "type_conv_cb" /* Type conversion callback function */ +#define H5D_XFER_XFORM_NAME "data_transform" /* Data transform */ +#define H5D_XFER_DSET_IO_SEL_NAME "dset_io_selection" /* Dataset I/O selection */ +#define H5D_XFER_SELECTION_IO_MODE_NAME "selection_io_mode" /* Selection I/O mode */ +#define H5D_XFER_NO_SELECTION_IO_CAUSE_NAME "no_selection_io_cause" /* Cause for no selection I/O */ +#define H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME "actual_selection_io_mode" /* Actual selection I/O mode */ +#define H5D_XFER_MODIFY_WRITE_BUF_NAME "modify_write_buf" /* Modify write buffers */ #ifdef H5_HAVE_INSTRUMENTED_LIBRARY /* Collective chunk instrumentation properties */ #define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard" diff --git a/src/H5Dsingle.c b/src/H5Dsingle.c index 9cb18d35278..dd9f2353d7b 100644 --- a/src/H5Dsingle.c +++ b/src/H5Dsingle.c @@ -27,12 +27,12 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ #include "H5FLprivate.h" /* Free Lists */ -#include "H5MFprivate.h" /* File space management */ -#include "H5VMprivate.h" /* Vector functions */ +#include "H5MFprivate.h" /* File space management */ +#include "H5VMprivate.h" /* Vector functions */ /****************/ /* Local Macros */ @@ -50,10 +50,14 @@ static herr_t H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, haddr_t dset_ohdr_addr); static herr_t H5D__single_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__single_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__single_idx_close(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__single_idx_is_open(const H5D_chk_idx_info_t *idx_info, bool *is_open); static bool H5D__single_idx_is_space_alloc(const H5O_storage_chunk_t *storage); static herr_t H5D__single_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, const H5D_t *dset); static herr_t H5D__single_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata); +static herr_t H5D__single_idx_load_metadata(const H5D_chk_idx_info_t *idx_info); static int H5D__single_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); static herr_t H5D__single_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata); @@ -73,9 +77,13 @@ const H5D_chunk_ops_t H5D_COPS_SINGLE[1] = {{ false, /* Single Chunk indexing doesn't current support SWMR access */ H5D__single_idx_init, /* init */ H5D__single_idx_create, /* create */ + H5D__single_idx_open, /* open */ + H5D__single_idx_close, /* close */ + H5D__single_idx_is_open, /* is_open */ H5D__single_idx_is_space_alloc, /* is_space_alloc */ H5D__single_idx_insert, /* insert */ H5D__single_idx_get_addr, /* get_addr */ + H5D__single_idx_load_metadata, /* load_metadata */ NULL, /* resize */ H5D__single_idx_iterate, /* iterate */ H5D__single_idx_remove, /* remove */ @@ -133,12 +141,12 @@ H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNU } /* end H5D__single_idx_init() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_create + * Function: H5D__single_idx_create * - * Purpose: Set up Single Chunk Index: filtered or non-filtered + * Purpose: Set up Single Chunk Index: filtered or non-filtered * - * Return: Non-negative on success - * Negative on failure. + * Return: Non-negative on success + * Negative on failure. * *------------------------------------------------------------------------- */ @@ -166,11 +174,73 @@ H5D__single_idx_create(const H5D_chk_idx_info_t *idx_info) } /* end H5D__single_idx_create() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_is_space_alloc + * Function: H5D__single_idx_open * - * Purpose: Query if space is allocated for the single chunk + * Purpose: Opens an existing "single" index. Currently a no-op. * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__single_idx_open(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__single_idx_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__single_idx_close + * + * Purpose: Closes an existing "single" index. Currently a no-op. + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__single_idx_close(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__single_idx_close() */ + +/*------------------------------------------------------------------------- + * Function: H5D__single_idx_is_open + * + * Purpose: Query if the index is opened or not + * + * Return: SUCCEED (can't fail) + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__single_idx_is_open(const H5D_chk_idx_info_t H5_ATTR_NDEBUG_UNUSED *idx_info, bool *is_open) +{ + FUNC_ENTER_PACKAGE_NOERR + + assert(idx_info); + assert(idx_info->storage); + assert(H5D_CHUNK_IDX_SINGLE == idx_info->storage->idx_type); + assert(is_open); + + *is_open = true; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__single_idx_is_open() */ + +/*------------------------------------------------------------------------- + * Function: H5D__single_idx_is_space_alloc + * + * Purpose: Query if space is allocated for the single chunk + * + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -186,11 +256,11 @@ H5D__single_idx_is_space_alloc(const H5O_storage_chunk_t *storage) } /* end H5D__single_idx_is_space_alloc() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_insert + * Function: H5D__single_idx_insert * - * Purpose: Allocate space for the single chunk + * Purpose: Allocate space for the single chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -231,12 +301,12 @@ H5D__single_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata } /* H5D__single_idx_insert() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_get_addr + * Function: H5D__single_idx_get_addr * - * Purpose: Get the file address of a chunk. - * Save the retrieved information in the udata supplied. + * Purpose: Get the file address of a chunk. + * Save the retrieved information in the udata supplied. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -271,11 +341,31 @@ H5D__single_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda } /* H5D__single_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_iterate + * Function: H5D__single_idx_load_metadata + * + * Purpose: Load additional chunk index metadata beyond the chunk index + * itself. Currently a no-op. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__single_idx_load_metadata(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info) +{ + FUNC_ENTER_PACKAGE_NOERR + + /* NO OP */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__single_idx_load_metadata() */ + +/*------------------------------------------------------------------------- + * Function: H5D__single_idx_iterate * - * Purpose: Make callback for the single chunk + * Purpose: Make callback for the single chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -318,11 +408,11 @@ H5D__single_idx_iterate(const H5D_chk_idx_info_t *idx_info, H5D_chunk_cb_func_t } /* end H5D__single_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_remove + * Function: H5D__single_idx_remove * - * Purpose: Remove the single chunk + * Purpose: Remove the single chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -357,12 +447,13 @@ H5D__single_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t } /* H5D__single_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_delete + * Function: H5D__single_idx_delete * - * Purpose: Delete raw data storage for entire dataset (i.e. the only chunk) + * Purpose: Delete raw data storage for entire dataset (i.e. the only + * chunk) * - * Return: Success: Non-negative - * Failure: negative + * Return: Success: Non-negative + * Failure: negative * *------------------------------------------------------------------------- */ @@ -389,11 +480,12 @@ H5D__single_idx_delete(const H5D_chk_idx_info_t *idx_info) } /* end H5D__single_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_copy_setup + * Function: H5D__single_idx_copy_setup * - * Purpose: Set up any necessary information for copying the single chunk + * Purpose: Set up any necessary information for copying the single + * chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -457,11 +549,11 @@ H5D__single_idx_size(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, hsize_t } /* end H5D__single_idx_size() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_reset + * Function: H5D__single_idx_reset * - * Purpose: Reset indexing information. + * Purpose: Reset indexing information. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ @@ -481,11 +573,11 @@ H5D__single_idx_reset(H5O_storage_chunk_t *storage, bool reset_addr) } /* end H5D__single_idx_reset() */ /*------------------------------------------------------------------------- - * Function: H5D__single_idx_dump + * Function: H5D__single_idx_dump * - * Purpose: Dump the address of the single chunk + * Purpose: Dump the address of the single chunk * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * *------------------------------------------------------------------------- */ diff --git a/src/H5FDcore.c b/src/H5FDcore.c index 08b714dfa34..1aa8d4bf19c 100644 --- a/src/H5FDcore.c +++ b/src/H5FDcore.c @@ -754,8 +754,10 @@ H5FD__core_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr ((file_image_info.buffer == NULL) && (file_image_info.size == 0))); memset(&sb, 0, sizeof(sb)); if ((file_image_info.buffer != NULL) && !(H5F_ACC_CREAT & flags)) { - if (HDopen(name, o_flags, H5_POSIX_CREATE_MODE_RW) >= 0) + if ((fd = HDopen(name, o_flags, H5_POSIX_CREATE_MODE_RW)) >= 0) { + HDclose(fd); HGOTO_ERROR(H5E_FILE, H5E_FILEEXISTS, NULL, "file already exists"); + } /* If backing store is requested, create and stat the file * Note: We are forcing the O_CREAT flag here, even though this is diff --git a/src/H5FDcore.h b/src/H5FDcore.h index 235d6fcaaf7..cd45c8d6061 100644 --- a/src/H5FDcore.h +++ b/src/H5FDcore.h @@ -11,17 +11,25 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the core driver. + * Purpose: The public header file for the core virtual file driver (VFD) */ #ifndef H5FDcore_H #define H5FDcore_H -#define H5FD_CORE (H5FDperform_init(H5FD_core_init)) +/** Initializer for the core VFD */ +#define H5FD_CORE (H5FDperform_init(H5FD_core_init)) + +/** Identifier for the core VFD */ #define H5FD_CORE_VALUE H5_VFD_CORE #ifdef __cplusplus extern "C" { #endif + +/** @private + * + * \brief Private initializer for the core VFD + */ H5_DLL hid_t H5FD_core_init(void); /** diff --git a/src/H5FDdirect.h b/src/H5FDdirect.h index e47ac37cdea..1e60bb08119 100644 --- a/src/H5FDdirect.h +++ b/src/H5FDdirect.h @@ -11,30 +11,47 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the direct driver. + * Purpose: The public header file for the direct virtual file driver (VFD) */ #ifndef H5FDdirect_H #define H5FDdirect_H #ifdef H5_HAVE_DIRECT -#define H5FD_DIRECT (H5FDperform_init(H5FD_direct_init)) + +/** Initializer for the direct VFD */ +#define H5FD_DIRECT (H5FDperform_init(H5FD_direct_init)) + +/** Identifier for the direct VFD */ #define H5FD_DIRECT_VALUE H5_VFD_DIRECT + #else + +/** Initializer for the direct VFD (disabled) */ #define H5FD_DIRECT (H5I_INVALID_HID) + +/** Identifier for the direct VFD (disabled) */ #define H5FD_DIRECT_VALUE H5_VFD_INVALID + #endif /* H5_HAVE_DIRECT */ +/** Default value for memory boundary */ +#define MBOUNDARY_DEF 4096 + +/** Default value for file block size */ +#define FBSIZE_DEF 4096 + +/** Default value for maximum copy buffer size */ +#define CBSIZE_DEF (16 * 1024 * 1024) + #ifdef H5_HAVE_DIRECT #ifdef __cplusplus extern "C" { #endif -/* Default values for memory boundary, file block size, and maximal copy buffer size. - * Application can set these values through the function H5Pset_fapl_direct. */ -#define MBOUNDARY_DEF 4096 -#define FBSIZE_DEF 4096 -#define CBSIZE_DEF 16 * 1024 * 1024 - +/** @private + * + * \brief Private initializer for the direct VFD + */ H5_DLL hid_t H5FD_direct_init(void); /** diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c index 94805a23a8b..3f43ae9cc5b 100644 --- a/src/H5FDfamily.c +++ b/src/H5FDfamily.c @@ -1427,7 +1427,8 @@ H5FD__family_delete(const char *filename, hid_t fapl_id) FUNC_ENTER_PACKAGE - assert(filename); + if (!filename) + HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "invalid filename pointer"); /* Get the driver info (for the member fapl) * The family_open call accepts H5P_DEFAULT, so we'll accept that here, too. diff --git a/src/H5FDfamily.h b/src/H5FDfamily.h index 76020f0a268..32e885c422d 100644 --- a/src/H5FDfamily.h +++ b/src/H5FDfamily.h @@ -11,18 +11,25 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the family driver. + * Purpose: The public header file for the family virtual file driver (VFD) */ #ifndef H5FDfamily_H #define H5FDfamily_H -#define H5FD_FAMILY (H5FDperform_init(H5FD_family_init)) +/** Initializer for the family VFD */ +#define H5FD_FAMILY (H5FDperform_init(H5FD_family_init)) + +/** Identifier for the family VFD */ #define H5FD_FAMILY_VALUE H5_VFD_FAMILY #ifdef __cplusplus extern "C" { #endif +/** @private + * + * \brief Private initializer for the family VFD + */ H5_DLL hid_t H5FD_family_init(void); /** diff --git a/src/H5FDhdfs.h b/src/H5FDhdfs.h index c8c2c37f1b5..e5f7173fce2 100644 --- a/src/H5FDhdfs.h +++ b/src/H5FDhdfs.h @@ -11,18 +11,29 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the hdfs driver. + * Purpose: The public header file for the Hadoop Distributed File System + * (hdfs) virtual file driver (VFD) */ #ifndef H5FDhdfs_H #define H5FDhdfs_H #ifdef H5_HAVE_LIBHDFS -#define H5FD_HDFS (H5FDperform_init(H5FD_hdfs_init)) + +/** Initializer for the hdfs VFD */ +#define H5FD_HDFS (H5FDperform_init(H5FD_hdfs_init)) + +/** Identifier for the hdfs VFD */ #define H5FD_HDFS_VALUE H5_VFD_HDFS -#else /* H5_HAVE_LIBHDFS */ + +#else + +/** Initializer for the hdfs VFD (disabled) */ #define H5FD_HDFS (H5I_INVALID_HID) + +/** Identifier for the hdfs VFD (disabled) */ #define H5FD_HDFS_VALUE H5_VFD_INVALID + #endif /* H5_HAVE_LIBHDFS */ #ifdef H5_HAVE_LIBHDFS @@ -104,6 +115,10 @@ typedef struct H5FD_hdfs_fapl_t { int32_t stream_buffer_size; } H5FD_hdfs_fapl_t; +/** @private + * + * \brief Private initializer for the hdfs VFD + */ H5_DLL hid_t H5FD_hdfs_init(void); /** diff --git a/src/H5FDint.c b/src/H5FDint.c index 082b6021332..5d3a80212ef 100644 --- a/src/H5FDint.c +++ b/src/H5FDint.c @@ -212,8 +212,9 @@ H5FD_locate_signature(H5FD_t *file, haddr_t *sig_addr) herr_t H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /*out*/) { - hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ - herr_t ret_value = SUCCEED; /* Return value */ + hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + uint32_t actual_selection_io_mode; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -257,6 +258,13 @@ H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /* if ((file->cls->read)(file, type, dxpl_id, addr + file->base_addr, size, buf) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed"); + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD_read() */ @@ -273,9 +281,10 @@ H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /* herr_t H5FD_write(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf) { - hid_t dxpl_id; /* DXPL for operation */ - haddr_t eoa = HADDR_UNDEF; /* EOA for file */ - herr_t ret_value = SUCCEED; /* Return value */ + hid_t dxpl_id; /* DXPL for operation */ + haddr_t eoa = HADDR_UNDEF; /* EOA for file */ + uint32_t actual_selection_io_mode; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -308,6 +317,13 @@ H5FD_write(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, const void if ((file->cls->write)(file, type, dxpl_id, addr + file->base_addr, size, buf) < 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write request failed"); + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD_write() */ @@ -360,6 +376,7 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs size_t size = 0; H5FD_mem_t type = H5FD_MEM_DEFAULT; hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + hbool_t is_raw = FALSE; /* Does this include raw data */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -441,6 +458,10 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs else { type = types[i]; + + /* Check for raw data operation */ + if (type == H5FD_MEM_DRAW) + is_raw = TRUE; } } @@ -455,13 +476,27 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs (unsigned long long)eoa); } } + else + /* We must still check if this is a raw data read */ + for (i = 0; i < count && types[i] != H5FD_MEM_NOLIST; i++) + if (types[i] == H5FD_MEM_DRAW) { + is_raw = true; + break; + } /* if the underlying VFD supports vector read, make the call */ if (file->cls->read_vector) { - if ((file->cls->read_vector)(file, dxpl_id, count, types, addrs, sizes, bufs) < 0) - HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed"); + + /* Set actual selection I/O mode, if this is a raw data operation */ + if (is_raw) { + uint32_t actual_selection_io_mode; + + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_VECTOR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else { @@ -471,6 +506,7 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs extend_sizes = false; extend_types = false; uint32_t no_selection_io_cause; + uint32_t actual_selection_io_mode; for (i = 0; i < count; i++) { @@ -512,6 +548,13 @@ H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs H5CX_get_no_selection_io_cause(&no_selection_io_cause); no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; H5CX_set_no_selection_io_cause(no_selection_io_cause); + + /* Set actual selection I/O mode, if this is a raw data operation */ + if (is_raw) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } done: @@ -575,6 +618,7 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr H5FD_mem_t type = H5FD_MEM_DEFAULT; hid_t dxpl_id; /* DXPL for operation */ haddr_t eoa = HADDR_UNDEF; /* EOA for file */ + hbool_t is_raw = FALSE; /* Does this include raw data */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -646,6 +690,10 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr else { type = types[i]; + + /* Check for raw data operation */ + if (type == H5FD_MEM_DRAW) + is_raw = true; } } @@ -663,10 +711,17 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr /* if the underlying VFD supports vector write, make the call */ if (file->cls->write_vector) { - if ((file->cls->write_vector)(file, dxpl_id, count, types, addrs, sizes, bufs) < 0) - HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed"); + + /* Set actual selection I/O mode, if this is a raw data operation */ + if (is_raw) { + uint32_t actual_selection_io_mode; + + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_VECTOR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else { /* otherwise, implement the vector write as a sequence of regular @@ -675,6 +730,7 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr extend_sizes = false; extend_types = false; uint32_t no_selection_io_cause; + uint32_t actual_selection_io_mode; for (i = 0; i < count; i++) { @@ -716,6 +772,13 @@ H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addr H5CX_get_no_selection_io_cause(&no_selection_io_cause); no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; H5CX_set_no_selection_io_cause(no_selection_io_cause); + + /* Set actual selection I/O mode, if this is a raw data operation */ + if (is_raw) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } done: @@ -996,18 +1059,35 @@ H5FD__read_selection_translate(uint32_t skip_vector_cb, H5FD_t *file, H5FD_mem_t /* Issue vector read call if appropriate */ if (use_vector) { + uint32_t actual_selection_io_mode; + H5_CHECK_OVERFLOW(vec_arr_nused, size_t, uint32_t); if ((file->cls->read_vector)(file, dxpl_id, (uint32_t)vec_arr_nused, types, addrs, sizes, vec_bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW && count > 0) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_VECTOR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } - else { + else if (count > 0) { uint32_t no_selection_io_cause; + uint32_t actual_selection_io_mode; /* Add H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB to no selection I/O cause */ H5CX_get_no_selection_io_cause(&no_selection_io_cause); no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; H5CX_set_no_selection_io_cause(no_selection_io_cause); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } done: @@ -1161,6 +1241,8 @@ H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_s /* if the underlying VFD supports selection read, make the call */ if (file->cls->read_selection) { + uint32_t actual_selection_io_mode; + /* Allocate array of space IDs if necessary, otherwise use local * buffers */ if (count > sizeof(mem_space_ids_local) / sizeof(mem_space_ids_local[0])) { @@ -1186,6 +1268,13 @@ H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_s if ((file->cls->read_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, element_sizes, bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read selection request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SELECTION_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else /* Otherwise, implement the selection read as a sequence of regular @@ -1337,9 +1426,18 @@ H5FD_read_selection_id(uint32_t skip_cb, H5FD_t *file, H5FD_mem_t type, uint32_t /* if the underlying VFD supports selection read, make the call */ if (!skip_selection_cb && file->cls->read_selection) { + uint32_t actual_selection_io_mode; + if ((file->cls->read_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, element_sizes, bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read selection request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SELECTION_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else { /* Otherwise, implement the selection read as a sequence of regular @@ -1653,18 +1751,35 @@ H5FD__write_selection_translate(uint32_t skip_vector_cb, H5FD_t *file, H5FD_mem_ /* Issue vector write call if appropriate */ if (use_vector) { + uint32_t actual_selection_io_mode; + H5_CHECK_OVERFLOW(vec_arr_nused, size_t, uint32_t); if ((file->cls->write_vector)(file, dxpl_id, (uint32_t)vec_arr_nused, types, addrs, sizes, vec_bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW && count > 0) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_VECTOR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } - else { + else if (count > 0) { uint32_t no_selection_io_cause; + uint32_t actual_selection_io_mode; /* Add H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB to no selection I/O cause */ H5CX_get_no_selection_io_cause(&no_selection_io_cause); no_selection_io_cause |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; H5CX_set_no_selection_io_cause(no_selection_io_cause); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SCALAR_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } done: @@ -1810,6 +1925,8 @@ H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_ /* if the underlying VFD supports selection write, make the call */ if (file->cls->write_selection) { + uint32_t actual_selection_io_mode; + /* Allocate array of space IDs if necessary, otherwise use local * buffers */ if (count > sizeof(mem_space_ids_local) / sizeof(mem_space_ids_local[0])) { @@ -1835,6 +1952,13 @@ H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_ if ((file->cls->write_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, element_sizes, bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write selection request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SELECTION_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else /* Otherwise, implement the selection write as a sequence of regular @@ -1979,9 +2103,18 @@ H5FD_write_selection_id(uint32_t skip_cb, H5FD_t *file, H5FD_mem_t type, uint32_ /* if the underlying VFD supports selection write, make the call */ if (!skip_selection_cb && file->cls->write_selection) { + uint32_t actual_selection_io_mode; + if ((file->cls->write_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, element_sizes, bufs) < 0) HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write selection request failed"); + + /* Set actual selection I/O, if this is a raw data operation */ + if (type == H5FD_MEM_DRAW) { + H5CX_get_actual_selection_io_mode(&actual_selection_io_mode); + actual_selection_io_mode |= H5D_SELECTION_IO; + H5CX_set_actual_selection_io_mode(actual_selection_io_mode); + } } else { /* Otherwise, implement the selection write as a sequence of regular diff --git a/src/H5FDlog.c b/src/H5FDlog.c index 8d43dc836a6..e35a6a65c4a 100644 --- a/src/H5FDlog.c +++ b/src/H5FDlog.c @@ -545,7 +545,7 @@ H5FD__log_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr) #endif /* H5_HAVE_WIN32_API */ /* Retain a copy of the name used to open the file, for possible error reporting */ - strncpy(file->filename, name, sizeof(file->filename)); + strncpy(file->filename, name, sizeof(file->filename) - 1); file->filename[sizeof(file->filename) - 1] = '\0'; /* Get the flags for logging */ diff --git a/src/H5FDlog.h b/src/H5FDlog.h index ae4e2d05e72..b4af2050a62 100644 --- a/src/H5FDlog.h +++ b/src/H5FDlog.h @@ -11,12 +11,15 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the log driver. + * Purpose: The public header file for the log virtual file driver (VFD) */ #ifndef H5FDlog_H #define H5FDlog_H -#define H5FD_LOG (H5FDperform_init(H5FD_log_init)) +/** Initializer for the log VFD */ +#define H5FD_LOG (H5FDperform_init(H5FD_log_init)) + +/** Identifier for the log VFD */ #define H5FD_LOG_VALUE H5_VFD_LOG /* Flags for H5Pset_fapl_log() */ @@ -62,6 +65,10 @@ extern "C" { #endif +/** @private + * + * \brief Private initializer for the log VFD + */ H5_DLL hid_t H5FD_log_init(void); /** diff --git a/src/H5FDmirror.h b/src/H5FDmirror.h index b196b2b1adc..6c98e1a8a6f 100644 --- a/src/H5FDmirror.h +++ b/src/H5FDmirror.h @@ -11,7 +11,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: Public, shared definitions for Mirror VFD & remote Writer. + * Purpose: The public header file for the mirror virtual file driver (VFD) */ #ifndef H5FDmirror_H @@ -19,7 +19,10 @@ #ifdef H5_HAVE_MIRROR_VFD -#define H5FD_MIRROR (H5FDperform_init(H5FD_mirror_init)) +/** Initializer for the mirror VFD */ +#define H5FD_MIRROR (H5FDperform_init(H5FD_mirror_init)) + +/** Identifier for the mirror VFD */ #define H5FD_MIRROR_VALUE H5_VFD_MIRROR #ifdef __cplusplus @@ -62,6 +65,10 @@ typedef struct H5FD_mirror_fapl_t { char remote_ip[H5FD_MIRROR_MAX_IP_LEN + 1]; } H5FD_mirror_fapl_t; +/** @private + * + * \brief Private initializer for the mirror VFD + */ H5_DLL hid_t H5FD_mirror_init(void); /** diff --git a/src/H5FDmpi.c b/src/H5FDmpi.c index 127740efe3b..f247c3478f8 100644 --- a/src/H5FDmpi.c +++ b/src/H5FDmpi.c @@ -104,13 +104,12 @@ H5FD_mpi_get_size(H5FD_t *file) } /* end H5FD_mpi_get_size() */ /*------------------------------------------------------------------------- - * Function: H5FD_mpi_get_comm + * Function: H5FD_mpi_get_comm * - * Purpose: Retrieves the file's communicator + * Purpose: Retrieves the file's MPI_Comm communicator object * - * Return: Success: The communicator (non-negative) - * - * Failure: Negative + * Return: Success: The communicator object + * Failure: MPI_COMM_NULL * *------------------------------------------------------------------------- */ @@ -143,6 +142,45 @@ H5FD_mpi_get_comm(H5FD_t *file) FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD_mpi_get_comm() */ +/*------------------------------------------------------------------------- + * Function: H5FD_mpi_get_info + * + * Purpose: Retrieves the file's MPI_Info info object + * + * Return: Success: The info object + * Failure: MPI_INFO_NULL + * + *------------------------------------------------------------------------- + */ +MPI_Info +H5FD_mpi_get_info(H5FD_t *file) +{ + const H5FD_class_t *cls; + uint64_t flags = H5FD_CTL_FAIL_IF_UNKNOWN_FLAG | H5FD_CTL_ROUTE_TO_TERMINAL_VFD_FLAG; + MPI_Info info = MPI_INFO_NULL; + void *info_ptr = (void *)(&info); + MPI_Info ret_value; + + FUNC_ENTER_NOAPI(MPI_INFO_NULL) + + assert(file); + cls = (const H5FD_class_t *)(file->cls); + assert(cls); + assert(cls->ctl); /* All MPI drivers must implement this */ + + /* Dispatch to driver */ + if ((cls->ctl)(file, H5FD_CTL_GET_MPI_INFO_OPCODE, flags, NULL, &info_ptr) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, MPI_INFO_NULL, "driver get_info request failed"); + + if (info == MPI_INFO_NULL) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, MPI_INFO_NULL, "driver get_info request failed -- bad info object"); + + ret_value = info; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_mpi_get_info() */ + /*------------------------------------------------------------------------- * Function: H5FD_mpi_MPIOff_to_haddr * diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index 7141550f40a..d5dd1261178 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -906,7 +906,7 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR /* copy over each hint */ for (i = 0; i < nkeys; i++) { - char key[MPI_MAX_INFO_KEY], value[MPI_MAX_INFO_VAL]; + char key[MPI_MAX_INFO_KEY], value[MPI_MAX_INFO_VAL + 1]; int valuelen, flag; /* retrieve the nth hint */ @@ -916,7 +916,7 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR if (MPI_SUCCESS != (mpi_code = MPI_Info_get_valuelen(info_used, key, &valuelen, &flag))) HMPI_GOTO_ERROR(NULL, "MPI_Info_get_valuelen failed", mpi_code) /* retrieve the value of nth hint */ - if (MPI_SUCCESS != (mpi_code = MPI_Info_get(info_used, key, valuelen + 1, value, &flag))) + if (MPI_SUCCESS != (mpi_code = MPI_Info_get(info_used, key, valuelen, value, &flag))) HMPI_GOTO_ERROR(NULL, "MPI_Info_get failed", mpi_code) /* copy the hint into info */ @@ -3795,6 +3795,7 @@ H5FD__mpio_delete(const char *filename, hid_t fapl_id) * At present, the supported op codes are: * * H5FD_CTL_GET_MPI_COMMUNICATOR_OPCODE + * H5FD_CTL_GET_MPI_INFO_OPCODE * H5FD_CTL_GET_MPI_RANK_OPCODE * H5FD_CTL_GET_MPI_SIZE_OPCODE * H5FD_CTL_GET_MPI_FILE_SYNC_OPCODE @@ -3827,6 +3828,12 @@ H5FD__mpio_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, const void H5_AT **((MPI_Comm **)output) = file->comm; break; + case H5FD_CTL_GET_MPI_INFO_OPCODE: + assert(output); + assert(*output); + **((MPI_Info **)output) = file->info; + break; + case H5FD_CTL_GET_MPI_RANK_OPCODE: assert(output); assert(*output); diff --git a/src/H5FDmpio.h b/src/H5FDmpio.h index 60deec2c07b..5e7ecf30353 100644 --- a/src/H5FDmpio.h +++ b/src/H5FDmpio.h @@ -11,35 +11,42 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the mpio driver. + * Purpose: The public header file for the MPI-I/O (mpio) virtual file driver (VFD) */ #ifndef H5FDmpio_H #define H5FDmpio_H -/* Macros */ - #ifdef H5_HAVE_PARALLEL + +/** Initializer for the mpio VFD */ #define H5FD_MPIO (H5FDperform_init(H5FD_mpio_init)) + #else + +/** Initializer for the mpio VFD (disabled) */ #define H5FD_MPIO (H5I_INVALID_HID) -#endif /* H5_HAVE_PARALLEL */ + +#endif #ifdef H5_HAVE_PARALLEL -/*Turn on H5FDmpio_debug if H5F_DEBUG is on */ -#ifdef H5F_DEBUG -#ifndef H5FDmpio_DEBUG + +#if defined(H5F_DEBUG) && !defined(H5FDmpio_DEBUG) +/** Turn mpio VFD debugging on (requires H5F_DEBUG) */ #define H5FDmpio_DEBUG #endif -#endif /* Global var whose value comes from environment variable */ /* (Defined in H5FDmpio.c) */ H5_DLLVAR hbool_t H5FD_mpi_opt_types_g; -/* Function prototypes */ #ifdef __cplusplus extern "C" { #endif + +/** @private + * + * \brief Private initializer for the mpio VFD + */ H5_DLL hid_t H5FD_mpio_init(void); /** diff --git a/src/H5FDmulti.h b/src/H5FDmulti.h index a85f2dfed25..d89a3e27cce 100644 --- a/src/H5FDmulti.h +++ b/src/H5FDmulti.h @@ -11,16 +11,22 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the "multi" driver. + * Purpose: The public header file for the multi virtual file driver (VFD) */ #ifndef H5FDmulti_H #define H5FDmulti_H +/** Initializer for the multi VFD */ #define H5FD_MULTI (H5FDperform_init(H5FD_multi_init)) #ifdef __cplusplus extern "C" { #endif + +/** @private + * + * \brief Private initializer for the multi VFD + */ H5_DLL hid_t H5FD_multi_init(void); /** diff --git a/src/H5FDonion.h b/src/H5FDonion.h index 09b290e2a9e..4aaab6d3c3e 100644 --- a/src/H5FDonion.h +++ b/src/H5FDonion.h @@ -11,19 +11,18 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Onion Virtual File Driver (VFD) - * - * Purpose: The public header file for the Onion VFD. + * Purpose: The public header file for the onion virtual file driver (VFD) */ #ifndef H5FDonion_H #define H5FDonion_H -#define H5FD_ONION (H5FDperform_init(H5FD_onion_init)) +/** Initializer for the onion VFD */ +#define H5FD_ONION (H5FDperform_init(H5FD_onion_init)) + +/** Identifier for the onion VFD */ #define H5FD_ONION_VALUE H5_VFD_ONION -/** - * Current version of the onion VFD fapl info struct. - */ +/** Current version of the onion VFD fapl info struct */ #define H5FD_ONION_FAPL_INFO_VERSION_CURR 1 #define H5FD_ONION_FAPL_INFO_CREATE_FLAG_ENABLE_PAGE_ALIGNMENT \ @@ -114,6 +113,10 @@ typedef struct H5FD_onion_fapl_info_t { extern "C" { #endif +/** @private + * + * \brief Private initializer for the onion VFD + */ H5_DLL hid_t H5FD_onion_init(void); /** diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h index 5330077565b..2fe54a588a9 100644 --- a/src/H5FDprivate.h +++ b/src/H5FDprivate.h @@ -214,6 +214,7 @@ H5_DLL herr_t H5FD_get_mpio_atomicity(H5FD_t *file, bool *flag); H5_DLL int H5FD_mpi_get_rank(H5FD_t *file); H5_DLL int H5FD_mpi_get_size(H5FD_t *file); H5_DLL MPI_Comm H5FD_mpi_get_comm(H5FD_t *file); +H5_DLL MPI_Info H5FD_mpi_get_info(H5FD_t *file); H5_DLL herr_t H5FD_mpi_get_file_sync_required(H5FD_t *file, bool *file_sync_required); #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h index 5f40bff6845..d8d77d6534b 100644 --- a/src/H5FDpublic.h +++ b/src/H5FDpublic.h @@ -179,6 +179,7 @@ #define H5FD_CTL_INVALID_OPCODE 0 #define H5FD_CTL_TEST_OPCODE 1 #define H5FD_CTL_GET_MPI_COMMUNICATOR_OPCODE 2 +#define H5FD_CTL_GET_MPI_INFO_OPCODE 9 #define H5FD_CTL_GET_MPI_RANK_OPCODE 3 #define H5FD_CTL_GET_MPI_SIZE_OPCODE 4 #define H5FD_CTL_MEM_ALLOC 5 diff --git a/src/H5FDros3.c b/src/H5FDros3.c index 3f3413c6d0e..c6aea0e327a 100644 --- a/src/H5FDros3.c +++ b/src/H5FDros3.c @@ -43,6 +43,9 @@ */ #define ROS3_STATS 0 +/* Max size of the cache, in bytes */ +#define ROS3_MAX_CACHE_SIZE 16777216 + /* The driver identification number, initialized at runtime */ static hid_t H5FD_ROS3_g = 0; @@ -189,6 +192,8 @@ typedef struct H5FD_ros3_t { H5FD_ros3_fapl_t fa; haddr_t eoa; s3r_t *s3r_handle; + uint8_t *cache; + size_t cache_size; #if ROS3_STATS ros3_statsbin meta[ROS3_STATS_BIN_COUNT + 1]; ros3_statsbin raw[ROS3_STATS_BIN_COUNT + 1]; @@ -1000,6 +1005,18 @@ H5FD__ros3_open(const char *url, unsigned flags, hid_t fapl_id, haddr_t maxaddr) HGOTO_ERROR(H5E_INTERNAL, H5E_UNINITIALIZED, NULL, "unable to reset file statistics"); #endif /* ROS3_STATS */ + /* Cache the initial bytes of the file */ + { + size_t filesize = H5FD_s3comms_s3r_get_filesize(file->s3r_handle); + + file->cache_size = (filesize < ROS3_MAX_CACHE_SIZE) ? filesize : ROS3_MAX_CACHE_SIZE; + + if (NULL == (file->cache = (uint8_t *)H5MM_calloc(file->cache_size))) + HGOTO_ERROR(H5E_VFL, H5E_NOSPACE, NULL, "unable to allocate cache memory"); + if (H5FD_s3comms_s3r_read(file->s3r_handle, 0, file->cache_size, file->cache) == FAIL) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, NULL, "unable to execute read"); + } + ret_value = (H5FD_t *)file; done: @@ -1007,8 +1024,10 @@ H5FD__ros3_open(const char *url, unsigned flags, hid_t fapl_id, haddr_t maxaddr) if (handle != NULL) if (FAIL == H5FD_s3comms_s3r_close(handle)) HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL, "unable to close s3 file handle"); - if (file != NULL) + if (file != NULL) { + H5MM_xfree(file->cache); file = H5FL_FREE(H5FD_ros3_t, file); + } curl_global_cleanup(); /* early cleanup because open failed */ } /* end if null return value (error) */ @@ -1335,6 +1354,7 @@ H5FD__ros3_close(H5FD_t H5_ATTR_UNUSED *_file) #endif /* ROS3_STATS */ /* Release the file info */ + H5MM_xfree(file->cache); file = H5FL_FREE(H5FD_ros3_t, file); done: @@ -1666,41 +1686,50 @@ H5FD__ros3_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU fprintf(stdout, "H5FD__ros3_read() called.\n"); #endif - assert(file != NULL); - assert(file->s3r_handle != NULL); - assert(buf != NULL); + assert(file); + assert(file->cache); + assert(file->s3r_handle); + assert(buf); filesize = H5FD_s3comms_s3r_get_filesize(file->s3r_handle); if ((addr > filesize) || ((addr + size) > filesize)) HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "range exceeds file address"); - if (H5FD_s3comms_s3r_read(file->s3r_handle, addr, size, buf) == FAIL) - HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "unable to execute read"); + /* Copy from the cache when accessing the first N bytes of the file. + * Saves network I/O operations when opening files. + */ + if (addr + size < file->cache_size) { + memcpy(buf, file->cache + addr, size); + } + else { + if (H5FD_s3comms_s3r_read(file->s3r_handle, addr, size, buf) == FAIL) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "unable to execute read"); #if ROS3_STATS - /* Find which "bin" this read fits in. Can be "overflow" bin. */ - for (bin_i = 0; bin_i < ROS3_STATS_BIN_COUNT; bin_i++) - if ((unsigned long long)size < ros3_stats_boundaries[bin_i]) - break; - bin = (type == H5FD_MEM_DRAW) ? &file->raw[bin_i] : &file->meta[bin_i]; + /* Find which "bin" this read fits in. Can be "overflow" bin. */ + for (bin_i = 0; bin_i < ROS3_STATS_BIN_COUNT; bin_i++) + if ((unsigned long long)size < ros3_stats_boundaries[bin_i]) + break; + bin = (type == H5FD_MEM_DRAW) ? &file->raw[bin_i] : &file->meta[bin_i]; - /* Store collected stats in appropriate bin */ - if (bin->count == 0) { - bin->min = size; - bin->max = size; - } - else { - if (size < bin->min) + /* Store collected stats in appropriate bin */ + if (bin->count == 0) { bin->min = size; - if (size > bin->max) bin->max = size; - } - bin->count++; - bin->bytes += (unsigned long long)size; + } + else { + if (size < bin->min) + bin->min = size; + if (size > bin->max) + bin->max = size; + } + bin->count++; + bin->bytes += (unsigned long long)size; #endif /* ROS3_STATS */ + } done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5FDros3.h b/src/H5FDros3.h index ecd26789cc8..217af2d01b6 100644 --- a/src/H5FDros3.h +++ b/src/H5FDros3.h @@ -11,20 +11,24 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Read-Only S3 Virtual File Driver (VFD) - * - * Purpose: The public header file for the ros3 driver. + * Purpose: The public header file for the read-only S3 (ros3) virtual file driver (VFD) */ #ifndef H5FDros3_H #define H5FDros3_H #ifdef H5_HAVE_ROS3_VFD -#define H5FD_ROS3 (H5FDperform_init(H5FD_ros3_init)) +/** Initializer for the ros3 VFD */ +#define H5FD_ROS3 (H5FDperform_init(H5FD_ros3_init)) + +/** Identifier for the ros3 VFD */ #define H5FD_ROS3_VALUE H5_VFD_ROS3 #else +/** Initializer for the ros3 VFD (disabled) */ #define H5FD_ROS3 (H5I_INVALID_HID) + +/** Identifier for the ros3 VFD (disabled) */ #define H5FD_ROS3_VALUE H5_VFD_INVALID -#endif /* H5_HAVE_ROS3_VFD */ +#endif #ifdef H5_HAVE_ROS3_VFD @@ -139,9 +143,9 @@ typedef struct H5FD_ros3_fapl_t { extern "C" { #endif -/** - * \brief Internal routine to initialize #H5FD_ROS3 driver. Not meant to be - * called directly by an HDF5 application. +/** @private + * + * \brief Private initializer for the ros3 VFD */ H5_DLL hid_t H5FD_ros3_init(void); diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c index 29616842c15..15accf76d33 100644 --- a/src/H5FDsec2.c +++ b/src/H5FDsec2.c @@ -368,7 +368,7 @@ H5FD__sec2_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr } /* Retain a copy of the name used to open the file, for possible error reporting */ - strncpy(file->filename, name, sizeof(file->filename)); + strncpy(file->filename, name, sizeof(file->filename) - 1); file->filename[sizeof(file->filename) - 1] = '\0'; /* Check for non-default FAPL */ diff --git a/src/H5FDsec2.h b/src/H5FDsec2.h index a2590aee968..dd0a4d8918d 100644 --- a/src/H5FDsec2.h +++ b/src/H5FDsec2.h @@ -11,18 +11,26 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the sec2 driver + * Purpose: The public header file for the POSOX I/O (sec2 - "POSIX section 2") + * virtual file driver (VFD) */ #ifndef H5FDsec2_H #define H5FDsec2_H -#define H5FD_SEC2 (H5FDperform_init(H5FD_sec2_init)) +/** Initializer for the sec2 VFD */ +#define H5FD_SEC2 (H5FDperform_init(H5FD_sec2_init)) + +/** Identifier for the sec2 VFD */ #define H5FD_SEC2_VALUE H5_VFD_SEC2 #ifdef __cplusplus extern "C" { #endif +/** @private + * + * \brief Private initializer for the sec2 VFD + */ H5_DLL hid_t H5FD_sec2_init(void); /** @@ -38,7 +46,6 @@ H5_DLL hid_t H5FD_sec2_init(void); * #H5FD_SEC2 driver. * * \since 1.4.0 - * */ H5_DLL herr_t H5Pset_fapl_sec2(hid_t fapl_id); diff --git a/src/H5FDsplitter.h b/src/H5FDsplitter.h index c8751c82349..99a471e5ce3 100644 --- a/src/H5FDsplitter.h +++ b/src/H5FDsplitter.h @@ -11,96 +11,103 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the "splitter" driver. + * Purpose: The public header file for the splitter virtual file driver (VFD) */ #ifndef H5FDsplitter_H #define H5FDsplitter_H -#define H5FD_SPLITTER (H5FDperform_init(H5FD_splitter_init)) +/** Initializer for the splitter VFD */ +#define H5FD_SPLITTER (H5FDperform_init(H5FD_splitter_init)) + +/** Identifier for the splitter VFD */ #define H5FD_SPLITTER_VALUE H5_VFD_SPLITTER -/* The version of the H5FD_splitter_vfd_config_t structure used */ +/** The version of the H5FD_splitter_vfd_config_t structure used */ #define H5FD_CURR_SPLITTER_VFD_CONFIG_VERSION 1 -/* Maximum length of a filename/path string in the Write-Only channel, +/** + * Maximum length of a filename/path string in the Write-Only channel, * including the NULL-terminator. */ #define H5FD_SPLITTER_PATH_MAX 4096 -/* Semi-unique constant used to help identify structure pointers */ +/** Semi-unique constant used to help identify structure pointers */ #define H5FD_SPLITTER_MAGIC 0x2B916880 -/* ---------------------------------------------------------------------------- - * Structure: H5FD_spliiter_vfd_config_t - * - * One-stop shopping for configuring a Splitter VFD (rather than many - * parameters passed into H5Pset/get functions). - * - * magic (int32_t) - * Semi-unique number, used to sanity-check that a given pointer is - * likely (or not) to be this structure type. MUST be first. - * If magic is not H5FD_SPLITTER_MAGIC, the structure (and/or pointer to) - * must be considered invalid. - * - * version (unsigned int) - * Version number of this structure -- informs component membership. - * If not H5FD_CURR_SPLITTER_VFD_CONFIG_VERSION, the structure (and/or - * pointer to) must be considered invalid. - * - * rw_fapl_id (hid_t) - * Library-given identification number of the Read/Write channel driver - * File Access Property List. - * The driver must support read/write access. - * Must be set to H5P_DEFAULT or a valid FAPL ID. - * - * wo_fapl_id (hid_t) - * Library-given identification number of the Read/Write channel driver - * File Access Property List. - * The driver feature flags must include H5FD_FEAT_DEFAULT_VFD_COMPAITBLE. - * Must be set to H5P_DEFAULT or a valid FAPL ID. - * - * wo_file_path (char[H5FD_SPLITTER_PATH_MAX + 1]) - * String buffer for the Write-Only channel target file. - * Must be null-terminated, cannot be empty. - * - * log_file_path (char[H5FD_SPLITTER_PATH_MAX + 1]) - * String buffer for the Splitter VFD logging output. - * Must be null-terminated. - * If null, no logfile is created. - * - * ignore_wo_errors (hbool_t) - * Toggle flag for how judiciously to respond to errors on the Write-Only - * channel. - * - * ---------------------------------------------------------------------------- +//! +/** + * Configuration options for setting up the Splitter VFD */ typedef struct H5FD_splitter_vfd_config_t { - int32_t magic; - unsigned int version; - hid_t rw_fapl_id; - hid_t wo_fapl_id; - char wo_path[H5FD_SPLITTER_PATH_MAX + 1]; - char log_file_path[H5FD_SPLITTER_PATH_MAX + 1]; - hbool_t ignore_wo_errs; + int32_t magic; /**< Magic number to identify this struct. Must be \p H5FD_SPLITTER_MAGIC. */ + unsigned int version; /**< Version number of this struct. Currently must be \p + H5FD_CURR_SPLITTER_VFD_CONFIG_VERSION. */ + hid_t rw_fapl_id; /**< File-access property list for setting up the read/write channel. Can be \p + H5P_DEFAULT. */ + hid_t wo_fapl_id; /**< File-access property list for setting up the read-only channel. The selected VFD + must support the \p H5FD_FEAT_DEFAULT_VFD_COMPATIBLE flag. Can be \p H5P_DEFAULT. */ + char wo_path[H5FD_SPLITTER_PATH_MAX + 1]; /**< Path to the write-only file */ + char log_file_path[H5FD_SPLITTER_PATH_MAX + 1]; /**< Path to the log file, which will be created on HDF5 + file open (existing files will be clobbered). Can be + NULL, in which case no logging output is generated. */ + hbool_t ignore_wo_errs; /**< Whether to ignore errors on the write-only channel */ } H5FD_splitter_vfd_config_t; +//! #ifdef __cplusplus extern "C" { #endif + +/** @private + * + * \brief Private initializer for the splitter VFD + */ H5_DLL hid_t H5FD_splitter_init(void); /** * \ingroup FAPL * - * \todo Add missing documentation + * \brief Sets the file access property list to use the splitter driver + * + * \fapl_id + * \param[in] config_ptr Configuration options for the VFD + * \returns \herr_t + * + * \details H5Pset_fapl_splitter() sets the file access property list identifier, + * \p fapl_id, to use the splitter driver. + * + * The splitter VFD echoes file manipulation (e.g. create, truncate) + * and write calls to a second, write-only file. + * + * \note The splitter VFD should not be confused with the split VFD, + * which is a simplification of the multi VFD and creates separate + * files for metadata and data. + * + * \since 1.10.7, 1.12.1 */ H5_DLL herr_t H5Pset_fapl_splitter(hid_t fapl_id, H5FD_splitter_vfd_config_t *config_ptr); /** * \ingroup FAPL * - * \todo Add missing documentation + * \brief Gets splitter driver properties from the the file access property list + * + * \fapl_id + * \param[out] config_ptr Configuration options for the VFD + * \returns \herr_t + * + * \details H5Pset_fapl_splitter() sets the file access property list identifier, + * \p fapl_id, to use the splitter driver. + * + * The splitter VFD echoes file manipulation (e.g. create, truncate) + * and write calls to a second file. + * + * \note The splitter VFD should not be confused with the split VFD, + * which is a simplification of the multi VFD and creates separate + * files for metadata and data. + * + * \since 1.10.7, 1.12.1 */ H5_DLL herr_t H5Pget_fapl_splitter(hid_t fapl_id, H5FD_splitter_vfd_config_t *config_ptr); diff --git a/src/H5FDstdio.h b/src/H5FDstdio.h index e2e05a77d64..794fe31bf61 100644 --- a/src/H5FDstdio.h +++ b/src/H5FDstdio.h @@ -11,20 +11,26 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the C stdio driver + * Purpose: The public header file for the C stdio virtual file driver (VFD) */ #ifndef H5FDstdio_H #define H5FDstdio_H #include "H5Ipublic.h" +/** Initializer for the stdio VFD */ #define H5FD_STDIO (H5FDperform_init(H5FD_stdio_init)) #ifdef __cplusplus extern "C" { #endif +/** @private + * + * \brief Private initializer for the stdio VFD + */ H5_DLL hid_t H5FD_stdio_init(void); + /** * \ingroup FAPL * @@ -34,7 +40,11 @@ H5_DLL hid_t H5FD_stdio_init(void); * \returns \herr_t * * \details H5Pset_fapl_stdio() modifies the file access property list to use - * the standard I/O driver, H5FDstdio(). + * the stdio VFD, which uses I/O calls from stdio.h. + * + * \note This VFD was designed to be a "demo" VFD that shows how to write + * your own VFD. Most applications should not use this VFD and should instead + * use the POSIX I/O VFD (sec2). * * \since 1.4.0 * diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c index a2daba0d01b..461fa16cfe7 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.c +++ b/src/H5FDsubfiling/H5FDsubfiling.c @@ -2551,6 +2551,12 @@ H5FD__subfiling_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, const void **((MPI_Comm **)output) = file->ext_comm; break; + case H5FD_CTL_GET_MPI_INFO_OPCODE: + assert(output); + assert(*output); + **((MPI_Info **)output) = file->info; + break; + case H5FD_CTL_GET_MPI_RANK_OPCODE: assert(output); assert(*output); diff --git a/src/H5FDwindows.h b/src/H5FDwindows.h index 14f698580f8..673d1c93b59 100644 --- a/src/H5FDwindows.h +++ b/src/H5FDwindows.h @@ -11,11 +11,16 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: The public header file for the Windows driver + * Purpose: The public header file for the Windows virtual file driver (VFD) + * + * This VFD uses no Win32 API calls directly (though it may be + * rewritten to do so in the future). It is currently defined to + * be the sec2 VFD. */ #ifndef H5FDwindows_H #define H5FDwindows_H +/** Initializer for the Windows VFD */ #define H5FD_WINDOWS (H5FD_sec2_init()) #ifdef __cplusplus diff --git a/src/H5Fint.c b/src/H5Fint.c index 014f619d8a9..8738026d7c9 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -410,11 +410,11 @@ H5F_get_access_plist(H5F_t *f, bool app_ref) if (H5P_set(new_plist, H5F_ACS_MPI_PARAMS_COMM_NAME, &mpi_comm) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set MPI communicator"); - /* Retrieve and set MPI info object */ - if (H5P_get(old_plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &mpi_info) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5I_INVALID_HID, "can't get MPI info object"); + /* Retrieve and set MPI info */ + if (MPI_INFO_NULL == (mpi_info = H5F_mpi_get_info(f))) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5I_INVALID_HID, "can't get MPI info"); if (H5P_set(new_plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &mpi_info) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set MPI info object"); + HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set MPI info"); } #endif /* H5_HAVE_PARALLEL */ if (H5P_set(new_plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < @@ -1968,6 +1968,22 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id) HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get minimum raw data fraction of page buffer"); } /* end if */ + /* Get the evict on close setting */ + if (H5P_get(a_plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get evict on close value"); + +#ifdef H5_HAVE_PARALLEL + /* Check for evict on close in parallel (currently unsupported) */ + assert(file->shared); + if (H5F_SHARED_HAS_FEATURE(file->shared, H5FD_FEAT_HAS_MPI)) { + int mpi_size = H5F_shared_mpi_get_size(file->shared); + + if ((mpi_size > 1) && evict_on_close) + HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, NULL, + "evict on close is currently not supported in parallel HDF5"); + } +#endif + /* * Read or write the file superblock, depending on whether the file is * empty or not. @@ -2046,8 +2062,6 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id) * or later, verify that the access property list value matches the value * in shared file structure. */ - if (H5P_get(a_plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get evict on close value"); if (shared->nrefs == 1) shared->evict_on_close = evict_on_close; else if (shared->nrefs > 1) { diff --git a/src/H5Fmpi.c b/src/H5Fmpi.c index 8a8fdc135c8..7a535e90d7d 100644 --- a/src/H5Fmpi.c +++ b/src/H5Fmpi.c @@ -97,11 +97,10 @@ H5F_mpi_get_rank(const H5F_t *f) /*------------------------------------------------------------------------- * Function: H5F_mpi_get_comm * - * Purpose: Retrieves the file's communicator + * Purpose: Retrieves the file's MPI_Comm communicator object * - * Return: Success: The communicator (non-negative) - * - * Failure: Negative + * Return: Success: The communicator object + * Failure: MPI_COMM_NULL * *------------------------------------------------------------------------- */ @@ -122,6 +121,33 @@ H5F_mpi_get_comm(const H5F_t *f) FUNC_LEAVE_NOAPI(ret_value) } /* end H5F_mpi_get_comm() */ +/*------------------------------------------------------------------------- + * Function: H5F_mpi_get_info + * + * Purpose: Retrieves the file's MPI_Info info object + * + * Return: Success: The info object + * Failure: MPI_INFO_NULL + * + *------------------------------------------------------------------------- + */ +MPI_Info +H5F_mpi_get_info(const H5F_t *f) +{ + MPI_Info ret_value = MPI_INFO_NULL; + + FUNC_ENTER_NOAPI(MPI_INFO_NULL) + + assert(f && f->shared); + + /* Dispatch to driver */ + if ((ret_value = H5FD_mpi_get_info(f->shared->lf)) == MPI_INFO_NULL) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, MPI_INFO_NULL, "driver get_info request failed"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5F_mpi_get_info() */ + /*------------------------------------------------------------------------- * Function: H5F_shared_mpi_get_size * diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index 9adbf3a0258..682e938120c 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -640,6 +640,7 @@ H5_DLL herr_t H5F_eoa_dirty(H5F_t *f); #ifdef H5_HAVE_PARALLEL H5_DLL int H5F_mpi_get_rank(const H5F_t *f); H5_DLL MPI_Comm H5F_mpi_get_comm(const H5F_t *f); +H5_DLL MPI_Info H5F_mpi_get_info(const H5F_t *f); H5_DLL int H5F_shared_mpi_get_size(const H5F_shared_t *f_sh); H5_DLL int H5F_mpi_get_size(const H5F_t *f); H5_DLL herr_t H5F_mpi_retrieve_comm(hid_t loc_id, hid_t acspl_id, MPI_Comm *mpi_comm); diff --git a/src/H5Fsfile.c b/src/H5Fsfile.c index ef80a799ca1..6cf2c809b8c 100644 --- a/src/H5Fsfile.c +++ b/src/H5Fsfile.c @@ -47,14 +47,17 @@ static H5F_sfile_node_t *H5F_sfile_head_s = NULL; *------------------------------------------------------------------------- */ void -H5F_sfile_assert_num(unsigned n) +H5F_sfile_assert_num(unsigned H5_ATTR_NDEBUG_UNUSED n) { FUNC_ENTER_NOAPI_NOINIT_NOERR + /* The only useful work this function does is asserting so when NDEBUG + * is defined it's a no-op. + */ +#ifndef NDEBUG if (n == 0) { - /* Sanity checking */ assert(H5F_sfile_head_s == NULL); - } /* end if */ + } else { unsigned count; /* Number of open shared files */ H5F_sfile_node_t *curr; /* Current shared file node */ @@ -68,11 +71,11 @@ H5F_sfile_assert_num(unsigned n) /* Advance to next shared file node */ curr = curr->next; - } /* end while */ + } - /* Sanity checking */ assert(count == n); - } /* end else */ + } +#endif FUNC_LEAVE_NOAPI_VOID } /* H5F_sfile_assert_num() */ diff --git a/src/H5M.c b/src/H5M.c index e2fd2025a9b..f59e02fa3ee 100644 --- a/src/H5M.c +++ b/src/H5M.c @@ -893,9 +893,6 @@ H5Mget_count(hid_t map_id, hsize_t *count /*out*/, hid_t dxpl_id) else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.get.get_type = H5VL_MAP_GET_COUNT; map_args.get.args.get_count.count = 0; @@ -952,9 +949,6 @@ H5M__put_api_common(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.put.key_mem_type_id = key_mem_type_id; map_args.put.key = key; @@ -1087,9 +1081,6 @@ H5M__get_api_common(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.get_val.key_mem_type_id = key_mem_type_id; map_args.get_val.key = key; @@ -1225,9 +1216,6 @@ H5Mexists(hid_t map_id, hid_t key_mem_type_id, const void *key, hbool_t *exists, else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.exists.key_mem_type_id = key_mem_type_id; map_args.exists.key = key; @@ -1305,9 +1293,6 @@ H5Miterate(hid_t map_id, hsize_t *idx, hid_t key_mem_type_id, H5M_iterate_t op, else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.specific.specific_type = H5VL_MAP_ITER; map_args.specific.args.iterate.loc_params.type = H5VL_OBJECT_BY_SELF; @@ -1394,9 +1379,6 @@ H5Miterate_by_name(hid_t loc_id, const char *map_name, hsize_t *idx, hid_t key_m else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.specific.specific_type = H5VL_MAP_ITER; map_args.specific.args.iterate.loc_params.type = H5VL_OBJECT_BY_NAME; @@ -1462,9 +1444,6 @@ H5Mdelete(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t dxpl_id) else if (true != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms"); - /* Set DXPL for operation */ - H5CX_set_dxpl(dxpl_id); - /* Set up VOL callback arguments */ map_args.specific.specific_type = H5VL_MAP_DELETE; map_args.specific.args.del.loc_params.type = H5VL_OBJECT_BY_SELF; diff --git a/src/H5Odeprec.c b/src/H5Odeprec.c index 6e8b34e789b..3de58185675 100644 --- a/src/H5Odeprec.c +++ b/src/H5Odeprec.c @@ -116,9 +116,10 @@ static herr_t H5O__iterate1_adapter(hid_t obj_id, const char *name, const H5O_info2_t *oinfo2, void *op_data) { H5O_visit1_adapter_t *shim_data = (H5O_visit1_adapter_t *)op_data; - H5O_info1_t oinfo; /* Deprecated object info struct */ - unsigned dm_fields; /* Fields for data model query */ - unsigned nat_fields; /* Fields for native query */ + H5O_info1_t oinfo; /* Deprecated object info struct */ + unsigned dm_fields; /* Fields for data model query */ + unsigned nat_fields; /* Fields for native query */ + H5VL_object_t *vol_obj; herr_t ret_value = H5_ITER_CONT; /* Return value */ FUNC_ENTER_PACKAGE @@ -158,7 +159,6 @@ H5O__iterate1_adapter(hid_t obj_id, const char *name, const H5O_info2_t *oinfo2, /* Check for retrieving native information */ nat_fields = shim_data->fields & (H5O_INFO_HDR | H5O_INFO_META_SIZE); if (nat_fields) { - H5VL_object_t *vol_obj; /* Object of obj_id */ H5VL_optional_args_t vol_cb_args; /* Arguments to VOL callback */ H5VL_native_object_optional_args_t obj_opt_args; /* Arguments for optional operation */ H5VL_loc_params_t loc_params; /* Location parameters for VOL callback */ @@ -401,7 +401,8 @@ H5Oget_info1(hid_t loc_id, H5O_info1_t *oinfo /*out*/) { H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ H5VL_loc_params_t loc_params; - herr_t ret_value = SUCCEED; /* Return value */ + bool is_native_vol_obj = false; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE2("e", "ix", loc_id, oinfo); @@ -418,6 +419,15 @@ H5Oget_info1(hid_t loc_id, H5O_info1_t *oinfo /*out*/) if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Oget_info1 is only meant to be used with the native VOL connector"); + /* Retrieve the object's information */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, H5O_INFO_ALL) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't get deprecated info for object"); @@ -441,7 +451,8 @@ H5Oget_info_by_name1(hid_t loc_id, const char *name, H5O_info1_t *oinfo /*out*/, { H5VL_object_t *vol_obj = NULL; /* object of loc_id */ H5VL_loc_params_t loc_params; - herr_t ret_value = SUCCEED; /* Return value */ + bool is_native_vol_obj = false; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE4("e", "i*sxi", loc_id, name, oinfo, lapl_id); @@ -468,6 +479,15 @@ H5Oget_info_by_name1(hid_t loc_id, const char *name, H5O_info1_t *oinfo /*out*/, if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Oget_info_by_name1 is only meant to be used with the native VOL connector"); + /* Retrieve the object's information */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, H5O_INFO_ALL) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't get deprecated info for object"); @@ -493,7 +513,8 @@ H5Oget_info_by_idx1(hid_t loc_id, const char *group_name, H5_index_t idx_type, H { H5VL_object_t *vol_obj = NULL; /* object of loc_id */ H5VL_loc_params_t loc_params; - herr_t ret_value = SUCCEED; /* Return value */ + bool is_native_vol_obj = false; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE7("e", "i*sIiIohxi", loc_id, group_name, idx_type, order, n, oinfo, lapl_id); @@ -524,6 +545,15 @@ H5Oget_info_by_idx1(hid_t loc_id, const char *group_name, H5_index_t idx_type, H if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Oget_info_by_idx1 is only meant to be used with the native VOL connector"); + /* Retrieve the object's information */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, H5O_INFO_ALL) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't get deprecated info for object"); @@ -574,7 +604,7 @@ H5Oget_info2(hid_t loc_id, H5O_info1_t *oinfo /*out*/, unsigned fields) "can't determine if VOL object is native connector object"); if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Oget_info2 is only meant to be used with the native VOL connector"); + "Deprecated H5Oget_info2 is only meant to be used with the native VOL connector"); /* Retrieve deprecated info struct */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, fields) < 0) @@ -637,7 +667,7 @@ H5Oget_info_by_name2(hid_t loc_id, const char *name, H5O_info1_t *oinfo /*out*/, "can't determine if VOL object is native connector object"); if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Oget_info_by_name2 is only meant to be used with the native VOL connector"); + "Deprecated H5Oget_info_by_name2 is only meant to be used with the native VOL connector"); /* Retrieve deprecated info struct */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, fields) < 0) @@ -706,7 +736,7 @@ H5Oget_info_by_idx2(hid_t loc_id, const char *group_name, H5_index_t idx_type, H "can't determine if VOL object is native connector object"); if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Oget_info_by_idx2 is only meant to be used with the native VOL connector"); + "Deprecated H5Oget_info_by_idx2 is only meant to be used with the native VOL connector"); /* Retrieve deprecated info struct */ if (H5O__get_info_old(vol_obj, &loc_params, oinfo, fields) < 0) @@ -753,6 +783,7 @@ H5Ovisit1(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, H5O_iterate1 H5VL_loc_params_t loc_params; /* Location parameters for object access */ H5O_visit1_adapter_t shim_data; /* Adapter for passing app callback & user data */ herr_t ret_value; /* Return value */ + bool is_native_vol_obj = false; FUNC_ENTER_API(FAIL) H5TRACE5("e", "iIiIoOi*x", obj_id, idx_type, order, op, op_data); @@ -769,6 +800,15 @@ H5Ovisit1(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, H5O_iterate1 if (NULL == (vol_obj = H5VL_vol_object(obj_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Ovisit1 is only meant to be used with the native VOL connector"); + /* Set location parameters */ loc_params.type = H5VL_OBJECT_BY_SELF; loc_params.obj_type = H5I_get_type(obj_id); @@ -833,6 +873,7 @@ H5Ovisit_by_name1(hid_t loc_id, const char *obj_name, H5_index_t idx_type, H5_it H5VL_loc_params_t loc_params; /* Location parameters for object access */ H5O_visit1_adapter_t shim_data; /* Adapter for passing app callback & user data */ herr_t ret_value; /* Return value */ + bool is_native_vol_obj = false; FUNC_ENTER_API(FAIL) H5TRACE7("e", "i*sIiIoOi*xi", loc_id, obj_name, idx_type, order, op, op_data, lapl_id); @@ -857,6 +898,15 @@ H5Ovisit_by_name1(hid_t loc_id, const char *obj_name, H5_index_t idx_type, H5_it if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_OHDR, H5E_VOL, FAIL, + "Deprecated H5Ovisit_by_name1 is only meant to be used with the native VOL connector"); + /* Set location parameters */ loc_params.type = H5VL_OBJECT_BY_NAME; loc_params.loc_data.loc_by_name.name = obj_name; @@ -949,9 +999,10 @@ H5Ovisit2(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, H5O_iterate1 if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, H5I_INVALID_HID, "can't determine if VOL object is native connector object"); + if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Ovisit2 is only meant to be used with the native VOL connector"); + "Deprecated H5Ovisit2 is only meant to be used with the native VOL connector"); /* Set location parameters */ loc_params.type = H5VL_OBJECT_BY_SELF; @@ -1053,7 +1104,7 @@ H5Ovisit_by_name2(hid_t loc_id, const char *obj_name, H5_index_t idx_type, H5_it "can't determine if VOL object is native connector object"); if (!is_native_vol_obj) HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, H5I_INVALID_HID, - "H5Ovisit_by_name2 is only meant to be used with the native VOL connector"); + "Deprecated H5Ovisit_by_name2 is only meant to be used with the native VOL connector"); /* Set location parameters */ loc_params.type = H5VL_OBJECT_BY_NAME; diff --git a/src/H5Olayout.c b/src/H5Olayout.c index a686ce49e1c..1f2b6862b6e 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -591,6 +591,7 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU /* Avoid zero-size allocation */ mesg->storage.u.virt.list = NULL; } + mesg->storage.u.virt.list_nalloc = (size_t)tmp_hsize; mesg->storage.u.virt.list_nused = (size_t)tmp_hsize; diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c index b6130f5117d..9adb2d60d3f 100644 --- a/src/H5Pdxpl.c +++ b/src/H5Pdxpl.c @@ -175,6 +175,9 @@ /* Definitions for cause of no selection I/O property */ #define H5D_XFER_NO_SELECTION_IO_CAUSE_SIZE sizeof(uint32_t) #define H5D_XFER_NO_SELECTION_IO_CAUSE_DEF 0 +/* Definitions for actual selection I/O mode property */ +#define H5D_XFER_ACTUAL_SELECTION_IO_MODE_SIZE sizeof(uint32_t) +#define H5D_XFER_ACTUAL_SELECTION_IO_MODE_DEF 0 /* Definitions for modify write buffer property */ #define H5D_XFER_MODIFY_WRITE_BUF_SIZE sizeof(bool) #define H5D_XFER_MODIFY_WRITE_BUF_DEF false @@ -295,7 +298,8 @@ static const H5S_t *H5D_def_dset_io_sel_g = H5D_XFER_DSET_IO_SEL_DEF; /* Default value for dataset I/O selection */ static const H5D_selection_io_mode_t H5D_def_selection_io_mode_g = H5D_XFER_SELECTION_IO_MODE_DEF; static const uint32_t H5D_def_no_selection_io_cause_g = H5D_XFER_NO_SELECTION_IO_CAUSE_DEF; -static const bool H5D_def_modify_write_buf_g = H5D_XFER_MODIFY_WRITE_BUF_DEF; +static const uint32_t H5D_def_actual_selection_io_mode_g = H5D_XFER_ACTUAL_SELECTION_IO_MODE_DEF; +static const bool H5D_def_modify_write_buf_g = H5D_XFER_MODIFY_WRITE_BUF_DEF; /*------------------------------------------------------------------------- * Function: H5P__dxfr_reg_prop @@ -470,6 +474,13 @@ H5P__dxfr_reg_prop(H5P_genclass_t *pclass) NULL) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class"); + /* Register the actual selection I/O mode property */ + /* (Note: this property should not have an encode/decode callback) */ + if (H5P__register_real(pclass, H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, + H5D_XFER_ACTUAL_SELECTION_IO_MODE_SIZE, &H5D_def_actual_selection_io_mode_g, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class"); + /* Register the modify write buffer property */ if (H5P__register_real(pclass, H5D_XFER_MODIFY_WRITE_BUF_NAME, H5D_XFER_MODIFY_WRITE_BUF_SIZE, &H5D_def_modify_write_buf_g, NULL, NULL, NULL, H5D_XFER_MODIFY_WRITE_BUF_ENC, @@ -2456,6 +2467,39 @@ H5Pget_no_selection_io_cause(hid_t plist_id, uint32_t *no_selection_io_cause /*o FUNC_LEAVE_API(ret_value) } /* end H5Pget_no_selection_io_cause() */ +/*------------------------------------------------------------------------- + * Function: H5Pget_actual_selection_io_mode + * + * Purpose: Retrieves actual selection I/O mode + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * April 27, 2023 + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_actual_selection_io_mode(hid_t plist_id, uint32_t *actual_selection_io_mode /*out*/) +{ + H5P_genplist_t *plist; + herr_t ret_value = SUCCEED; /* return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "ix", plist_id, actual_selection_io_mode); + + /* Get the plist structure */ + if (NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_ID, H5E_BADID, FAIL, "can't find object for ID"); + + /* Return values */ + if (actual_selection_io_mode) + if (H5P_get(plist, H5D_XFER_ACTUAL_SELECTION_IO_MODE_NAME, actual_selection_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get actual_selection_io_mode value"); + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_actual_selection_io_mode() */ + /*------------------------------------------------------------------------- * Function: H5P__dxfr_modify_write_buf_enc * diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index 5f5782cae3b..e7c1fb3acb8 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -4848,7 +4848,7 @@ H5P__facc_mdc_log_location_close(const char H5_ATTR_UNUSED *name, size_t H5_ATTR *------------------------------------------------------------------------- */ herr_t -H5Pset_evict_on_close(hid_t fapl_id, hbool_t H5_ATTR_PARALLEL_UNUSED evict_on_close) +H5Pset_evict_on_close(hid_t fapl_id, hbool_t evict_on_close) { H5P_genplist_t *plist; /* property list pointer */ herr_t ret_value = SUCCEED; /* return value */ @@ -4864,14 +4864,9 @@ H5Pset_evict_on_close(hid_t fapl_id, hbool_t H5_ATTR_PARALLEL_UNUSED evict_on_cl if (NULL == (plist = (H5P_genplist_t *)H5I_object(fapl_id))) HGOTO_ERROR(H5E_ID, H5E_BADID, FAIL, "can't find object for ID"); -#ifndef H5_HAVE_PARALLEL /* Set value */ if (H5P_set(plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set evict on close property"); -#else - HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, - "evict on close is currently not supported in parallel HDF5"); -#endif /* H5_HAVE_PARALLEL */ done: FUNC_LEAVE_API(ret_value) @@ -5174,7 +5169,7 @@ H5Pget_all_coll_metadata_ops(hid_t plist_id, hbool_t *is_collective /*out*/) * Function: H5Pset_coll_metadata_write * * Purpose: Tell the library whether the metadata write operations will - * be done collectively (1) or not (0). Default is collective. + * be done collectively (1) or not (0). Default is independent. * * Return: Non-negative on success/Negative on failure * diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index a58d97ffd22..3a059105a11 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -420,6 +420,13 @@ typedef enum H5D_selection_io_mode_t { } H5D_selection_io_mode_t; //! +/** + * Causes for H5Pget_actual_selection_io_mode() property + */ +#define H5D_SCALAR_IO (0x0001u) /**< Scalar (or legacy MPIO) I/O was performed */ +#define H5D_VECTOR_IO (0x0002u) /**< Vector I/O was performed */ +#define H5D_SELECTION_IO (0x0004u) /**< Selection I/O was performed */ + /********************/ /* Public Variables */ /********************/ @@ -4830,9 +4837,8 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * enumerated value in #H5F_libver_t, indicating that this is * currently the latest format available. * - * The library supports the following five pairs of - * (\p low, \p high) combinations as derived from the values - * in #H5F_libver_t: + * The library supports the following pairs of (\p low, \p high) + * combinations as derived from the values in #H5F_libver_t: * * * @@ -4843,14 +4849,13 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * * + * \li The library will create objects with the earliest + * possible format versions. + * \li The library will allow objects to be created with the + * latest format versions available to library release 1.8.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.8.x + * release will fail. * * * + * + * + * + * + * + * + * + * + * + * + * + * + * format compatibility. * * * * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * * * * + * \p high=#H5F_LIBVER_V112 * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * objects created with this setting. * *
    \p low=#H5F_LIBVER_EARLIEST
    * \p high=#H5F_LIBVER_V18
    - * \li The library will create objects with the earliest - * possible format versions. - * \li The library will allow objects to be created with the - * latest format versions available to library release 1.8.x. - * \li API calls that create objects or features that are - * available to versions of the library greater than 1.8.x - * release will fail. - *
    \p low=#H5F_LIBVER_EARLIEST
    @@ -4860,13 +4865,45 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * format versions. * \li The library will allow objects to be created with the latest * format versions available to library release 1.10.x. - * Since 1.10.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. For example, if a newer - * format version is required to support a feature e.g. virtual - * dataset, this setting will allow the object to be created. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.10.x + * release will fail.
    \p low=#H5F_LIBVER_EARLIEST
    + * \p high=#H5F_LIBVER_V112
    + * \li The library will create objects with the earliest possible + * format versions. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.12.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.12.x + * release will fail.
    \p low=#H5F_LIBVER_EARLIEST
    + * \p high=#H5F_LIBVER_V114
    + * \li The library will create objects with the earliest possible + * format versions. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.14.x + * release will fail.
    \p low=#H5F_LIBVER_EARLIEST
    + * \p high=#H5F_LIBVER_V116
    + * \li The library will create objects with the earliest possible + * format versions. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. * \li This is the library default setting and provides the greatest - * format compatibility. - *
    \p low=#H5F_LIBVER_V18
    @@ -4874,6 +4911,11 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); *
    * \li The library will create objects with the latest format * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.8.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li API calls that create objects or features that are available * to versions of the library greater than 1.8.x release will * fail. @@ -4884,44 +4926,219 @@ H5_DLL herr_t H5Pset_gc_references(hid_t fapl_id, unsigned gc_ref); * \p low=#H5F_LIBVER_V18
    * \p high=#H5F_LIBVER_V110
    + * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.10.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.10.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V18
    + * \p high=#H5F_LIBVER_V112
    + * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.12.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.12.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V18
    + * \p high=#H5F_LIBVER_V114
    + * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li API calls that create objects or features that are + * available to versions of the library greater than 1.14.x + * release will fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V18
    + * \p high=#H5F_LIBVER_V116
    + * \li The library will create objects with the latest format + * versions available to library release 1.8.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. + * \li This setting allows users to take advantage of the latest + * features and performance enhancements in the library. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V110
    + * \p high=#H5F_LIBVER_V110
    * \li The library will create objects with the latest format - * versions available to library release 1.8.x. + * versions available to library release 1.10.x. * \li The library will allow objects to be created with the latest * format versions available to library release 1.10.x. - * Since 1.10.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. For example, if a - * newer format version is required to support a feature e.g. - * virtual dataset, this setting will allow the object to be - * created. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.10.x release will + * fail. * \li Earlier versions of the library may not be able to access * objects created with this setting.
    \p low=#H5F_LIBVER_V110
    - * \p high=#H5F_LIBVER_V110 - *
    * \li The library will create objects with the latest format * versions available to library release 1.10.x. * \li The library will allow objects to be created with the latest - * format versions available to library release 1.10.x. - * Since 1.10.x is also #H5F_LIBVER_LATEST, there is no upper - * limit on the format versions to use. For example, if a - * newer format version is required to support a feature e.g. - * virtual dataset, this setting will allow the object to be - * created. + * format versions available to library release 1.12.x. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.12.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V110
    + * \p high=#H5F_LIBVER_V114
    + * \li The library will create objects with the latest format + * versions available to library release 1.10.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.14.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V110
    + * \p high=#H5F_LIBVER_V116
    + * \li The library will create objects with the latest format + * versions available to library release 1.10.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. + * \li This setting allows users to take advantage of the latest + * features and performance enhancements in the library. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V112
    + * \p high=#H5F_LIBVER_V112
    + * \li The library will create objects with the latest format + * versions available to library release 1.12.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.12.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.12.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V112
    + * \p high=#H5F_LIBVER_V114
    + * \li The library will create objects with the latest format + * versions available to library release 1.12.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.14.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V112
    + * \p high=#H5F_LIBVER_V116
    + * \li The library will create objects with the latest format + * versions available to library release 1.12.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. + * \li This setting allows users to take advantage of the latest + * features and performance enhancements in the library. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V114
    + * \p high=#H5F_LIBVER_V114
    + * \li The library will create objects with the latest format + * versions available to library release 1.14.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.14.x. + * \li The objects written with this setting may be + * accessible to a smaller range of library versions than + * would be the case if low is set to #H5F_LIBVER_EARLIEST. + * \li API calls that create objects or features that are available + * to versions of the library greater than 1.14.x release will + * fail. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V114
    + * \p high=#H5F_LIBVER_V116
    + * \li The library will create objects with the latest format + * versions available to library release 1.14.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. + * \li This setting allows users to take advantage of the latest + * features and performance enhancements in the library. + * \li Earlier versions of the library may not be able to access + * objects created with this setting.
    \p low=#H5F_LIBVER_V116
    + * \p high=#H5F_LIBVER_V116
    + * \li The library will create objects with the latest format + * versions available to library release 1.16.x. + * \li The library will allow objects to be created with the latest + * format versions available to library release 1.16.x. See + * note *H5F_LIBVER_LATEST* below the table. * \li This setting allows users to take advantage of the latest * features and performance enhancements in the library. * However, objects written with this setting may be * accessible to a smaller range of library versions than * would be the case if low is set to #H5F_LIBVER_EARLIEST. * \li Earlier versions of the library may not be able to access - * objects created with this - * setting. - *
    * + * \note *H5F_LIBVER_LATEST*:
    + * Since 1.16.x is also #H5F_LIBVER_LATEST, there is no upper + * limit on the format versions to use. That is, if a + * newer format version is required to support a feature + * in 1.16.x series, this setting will allow the object to be + * created. + * * \version 1.10.2 #H5F_LIBVER_V18 added to the enumerated defines in * #H5F_libver_t. * @@ -5811,7 +6028,7 @@ H5_DLL int H5Pget_external_count(hid_t plist_id); * \note H5Pget_fill_time() is designed to work in coordination with the * dataset fill value and dataset storage allocation time properties, * retrieved with the functions H5Pget_fill_value() and - * H5Pget_alloc_time(). + * H5Pget_alloc_time().type == H5FD_MEM_DRAW * * \since 1.6.0 * @@ -8299,6 +8516,61 @@ H5_DLL herr_t H5Pget_selection_io(hid_t plist_id, H5D_selection_io_mode_t *selec */ H5_DLL herr_t H5Pget_no_selection_io_cause(hid_t plist_id, uint32_t *no_selection_io_cause); +/** + * \ingroup DXPL + * + * \brief Retrieves the type(s) of I/O that HDF5 actually performed on raw data + * during the last I/O call + * + * \dxpl_id{plist_id} + * \param[out] actual_selection_io_mode A bitwise set value indicating the + * type(s) of I/O performed + * \return \herr_t + * + * \par Motivation: + * A user can request selection I/O to be performed via a data transfer + * property list (DXPL). This can be used to enable collective I/O with + * type conversion, or with custom VFDs that support vector or selection + * I/O. However, there are conditions that can cause HDF5 to forgo + * selection or vector I/O and perform legacy (scalar) I/O instead. + * This function allows the user to determine which type or types of + * I/O were actually performed. + * + * \details H5Pget_actual_selection_io_mode() allows the user to determine which + * type(s) of I/O were actually performed on raw data during the last + * I/O operation which used \p plist_id. This property is set after + * all I/O is completed; if I/O fails, it will not be set. + * + * H5Pget_no_selection_io_cause() can be used to determine the reason + * why selection or vector I/O was not performed. + * + * Valid bitflags returned in \p actual_selection_io_mode are listed + * as follows. + * + * - #H5D_SCALAR_IO + * Scalar (or legacy MPIO) I/O was performed + * - #H5D_VECTOR_IO + * Vector I/O was performed + * - #H5D_SELECTION_IO + * Selection I/O was performed + * + * 0 or more of these can be present in \p actual_selection_io_mode in + * a bitwise fashion, since a single operation can trigger multiple + * instances of I/O, possibly with different types. A value of \p 0 + * indicates no raw data I/O was performed during the operation. + * + * Be aware that this function will only include raw data I/O performed + * to/from disk as part of the last I/O operation. Any metadata + * I/O, including attribute and compact dataset I/O, is disregarded. + * It is also possible that data was cached in the dataset chunk cache + * or sieve buffer, which may prevent I/O from hitting the disk, and + * thereby prevent it from being counted by this function. + * + * \since 1.14.3 + * + */ +H5_DLL herr_t H5Pget_actual_selection_io_mode(hid_t plist_id, uint32_t *actual_selection_io_mode); + /** * * \ingroup DXPL diff --git a/src/H5Rdeprec.c b/src/H5Rdeprec.c index 773d8b0c706..1d12ebae25a 100644 --- a/src/H5Rdeprec.c +++ b/src/H5Rdeprec.c @@ -101,14 +101,14 @@ H5R__decode_token_compat(H5VL_object_t *vol_obj, H5I_type_t type, H5R_type_t ref #ifndef NDEBUG { - bool is_native = false; /* Whether the src file is using the native VOL connector */ + bool is_native_vol_obj = false; /* Whether the src file is using the native VOL connector */ /* Check if using native VOL connector */ - if (H5VL_object_is_native(vol_obj, &is_native) < 0) + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, "can't query if file uses native VOL connector"); /* Must use native VOL connector for this operation */ - assert(is_native); + assert(is_native_vol_obj); } #endif /* NDEBUG */ @@ -251,7 +251,8 @@ H5Rget_obj_type1(hid_t id, H5R_type_t ref_type, const void *ref) H5O_token_t obj_token = {0}; /* Object token */ const unsigned char *buf = (const unsigned char *)ref; /* Reference buffer */ H5O_type_t obj_type = H5O_TYPE_UNKNOWN; /* Type of the referenced object */ - H5G_obj_t ret_value; /* Return value */ + bool is_native_vol_obj; /* Whether the native VOL connector is in use */ + H5G_obj_t ret_value; /* Return value */ FUNC_ENTER_API(H5G_UNKNOWN) H5TRACE3("Go", "iRt*x", id, ref_type, ref); @@ -266,6 +267,16 @@ H5Rget_obj_type1(hid_t id, H5R_type_t ref_type, const void *ref) if (NULL == (vol_obj = H5VL_vol_object(id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5G_UNKNOWN, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rget_obj_type1 is only meant to be used with the native VOL connector"); + /* Get object type */ if ((vol_obj_type = H5I_get_type(id)) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5G_UNKNOWN, "invalid location identifier"); @@ -315,7 +326,8 @@ H5Rdereference1(hid_t obj_id, H5R_type_t ref_type, const void *ref) H5I_type_t opened_type; /* Opened object type */ void *opened_obj = NULL; /* Opened object */ const unsigned char *buf = (const unsigned char *)ref; /* Reference buffer */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + bool is_native_vol_obj; /* Whether the native VOL connector is in use */ + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) H5TRACE3("i", "iRt*x", obj_id, ref_type, ref); @@ -330,6 +342,16 @@ H5Rdereference1(hid_t obj_id, H5R_type_t ref_type, const void *ref) if (NULL == (vol_obj = H5VL_vol_object(obj_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rdereference1 is only meant to be used with the native VOL connector"); + /* Get object type */ if ((vol_obj_type = H5I_get_type(obj_id)) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); @@ -382,8 +404,9 @@ H5Rcreate(void *ref, hid_t loc_id, const char *name, H5R_type_t ref_type, hid_t H5VL_file_get_args_t file_get_vol_cb_args; /* Arguments to VOL callback */ hid_t file_id = H5I_INVALID_HID; /* File ID for region reference */ void *vol_obj_file = NULL; - unsigned char *buf = (unsigned char *)ref; /* Return reference pointer */ - herr_t ret_value = SUCCEED; /* Return value */ + bool is_native_vol_obj = false; /* Whether the src file is using the native VOL connector */ + unsigned char *buf = (unsigned char *)ref; /* Return reference pointer */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE5("e", "*xi*sRti", ref, loc_id, name, ref_type, space_id); @@ -404,18 +427,13 @@ H5Rcreate(void *ref, hid_t loc_id, const char *name, H5R_type_t ref_type, hid_t if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); -#ifndef NDEBUG - { - bool is_native = false; /* Whether the src file is using the native VOL connector */ + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, "can't query if file uses native VOL connector"); - /* Check if using native VOL connector */ - if (H5VL_object_is_native(vol_obj, &is_native) < 0) - HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, "can't query if file uses native VOL connector"); - - /* Must use native VOL connector for this operation */ - assert(is_native); - } -#endif /* NDEBUG */ + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, "must use native VOL connector to create reference"); /* Get object type */ if ((vol_obj_type = H5I_get_type(loc_id)) < 0) @@ -500,13 +518,14 @@ H5Rcreate(void *ref, hid_t loc_id, const char *name, H5R_type_t ref_type, hid_t herr_t H5Rget_obj_type2(hid_t id, H5R_type_t ref_type, const void *ref, H5O_type_t *obj_type /*out*/) { - H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ - H5I_type_t vol_obj_type = H5I_BADID; /* Object type of loc_id */ - H5VL_object_get_args_t vol_cb_args; /* Arguments to VOL callback */ - H5VL_loc_params_t loc_params; /* Location parameters */ - H5O_token_t obj_token = {0}; /* Object token */ - const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ - herr_t ret_value = SUCCEED; /* Return value */ + H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ + H5I_type_t vol_obj_type = H5I_BADID; /* Object type of loc_id */ + H5VL_object_get_args_t vol_cb_args; /* Arguments to VOL callback */ + H5VL_loc_params_t loc_params; /* Location parameters */ + H5O_token_t obj_token = {0}; /* Object token */ + const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ + bool is_native_vol_obj = false; /* Whether the native VOL connector is in use */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE4("e", "iRt*xx", id, ref_type, ref, obj_type); @@ -521,6 +540,16 @@ H5Rget_obj_type2(hid_t id, H5R_type_t ref_type, const void *ref, H5O_type_t *obj if (NULL == (vol_obj = H5VL_vol_object(id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rget_obj_type2 is only meant to be used with the native VOL connector"); + /* Get object type */ if ((vol_obj_type = H5I_get_type(id)) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); @@ -560,14 +589,15 @@ H5Rget_obj_type2(hid_t id, H5R_type_t ref_type, const void *ref, H5O_type_t *obj hid_t H5Rdereference2(hid_t obj_id, hid_t oapl_id, H5R_type_t ref_type, const void *ref) { - H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ - H5I_type_t vol_obj_type = H5I_BADID; /* Object type of loc_id */ - H5VL_loc_params_t loc_params; /* Location parameters */ - H5O_token_t obj_token = {0}; /* Object token */ - H5I_type_t opened_type; /* Opened object type */ - void *opened_obj = NULL; /* Opened object */ - const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ - hid_t ret_value = H5I_INVALID_HID; /* Return value */ + H5VL_object_t *vol_obj = NULL; /* Object of loc_id */ + H5I_type_t vol_obj_type = H5I_BADID; /* Object type of loc_id */ + H5VL_loc_params_t loc_params; /* Location parameters */ + H5O_token_t obj_token = {0}; /* Object token */ + H5I_type_t opened_type; /* Opened object type */ + void *opened_obj = NULL; /* Opened object */ + const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ + bool is_native_vol_obj = false; /* Whether the native VOL connector is in use */ + hid_t ret_value = H5I_INVALID_HID; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) H5TRACE4("i", "iiRt*x", obj_id, oapl_id, ref_type, ref); @@ -588,6 +618,16 @@ H5Rdereference2(hid_t obj_id, hid_t oapl_id, H5R_type_t ref_type, const void *re if (NULL == (vol_obj = H5VL_vol_object(obj_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + "can't determine if VOL object is native connector object"); + + /* Must use native VOL connector for this operation */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rdereference2 is only meant to be used with the native VOL connector"); + /* Get object type */ if ((vol_obj_type = H5I_get_type(obj_id)) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); @@ -639,7 +679,8 @@ H5Rget_region(hid_t id, H5R_type_t ref_type, const void *ref) H5S_t *space = NULL; /* Dataspace object */ hid_t file_id = H5I_INVALID_HID; /* File ID for region reference */ const unsigned char *buf = (const unsigned char *)ref; /* Reference pointer */ - hid_t ret_value; /* Return value */ + bool is_native_vol_obj = false; /* Whether the src file is using the native VOL connector */ + hid_t ret_value; /* Return value */ FUNC_ENTER_API(H5I_INVALID_HID) H5TRACE3("i", "iRt*x", id, ref_type, ref); @@ -654,19 +695,14 @@ H5Rget_region(hid_t id, H5R_type_t ref_type, const void *ref) if (NULL == (vol_obj = H5VL_vol_object(id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); -#ifndef NDEBUG - { - bool is_native = false; /* Whether the src file is using the native VOL connector */ - - /* Check if using native VOL connector */ - if (H5VL_object_is_native(vol_obj, &is_native) < 0) - HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, H5I_INVALID_HID, - "can't query if file uses native VOL connector"); + /* Check if using native VOL connector */ + if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, H5I_INVALID_HID, + "can't query if file uses native VOL connector"); - /* Must use native VOL connector for this operation */ - assert(is_native); - } -#endif /* NDEBUG */ + if (!is_native_vol_obj) + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + "H5Rget_region is only meant to be used with the native VOL connector"); /* Get object type */ if ((vol_obj_type = H5I_get_type(id)) < 0) diff --git a/src/H5T.c b/src/H5T.c index ef94925982f..a02abfc18d0 100644 --- a/src/H5T.c +++ b/src/H5T.c @@ -2501,7 +2501,7 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con H5T_g.asoft = na; H5T_g.soft = x; } /* end if */ - strncpy(H5T_g.soft[H5T_g.nsoft].name, name, (size_t)H5T_NAMELEN); + strncpy(H5T_g.soft[H5T_g.nsoft].name, name, (size_t)H5T_NAMELEN - 1); H5T_g.soft[H5T_g.nsoft].name[H5T_NAMELEN - 1] = '\0'; H5T_g.soft[H5T_g.nsoft].src = src->shared->type; H5T_g.soft[H5T_g.nsoft].dst = dst->shared->type; @@ -2550,7 +2550,7 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con /* Create a new conversion path */ if (NULL == (new_path = H5FL_CALLOC(H5T_path_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); - strncpy(new_path->name, name, (size_t)H5T_NAMELEN); + strncpy(new_path->name, name, (size_t)H5T_NAMELEN - 1); new_path->name[H5T_NAMELEN - 1] = '\0'; if (NULL == (new_path->src = H5T_copy(old_path->src, H5T_COPY_ALL)) || NULL == (new_path->dst = H5T_copy(old_path->dst, H5T_COPY_ALL))) @@ -4953,7 +4953,7 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co if (NULL == (path = H5FL_CALLOC(H5T_path_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for type conversion path"); if (name && *name) { - strncpy(path->name, name, (size_t)H5T_NAMELEN); + strncpy(path->name, name, (size_t)H5T_NAMELEN - 1); path->name[H5T_NAMELEN - 1] = '\0'; } /* end if */ else diff --git a/src/H5Tvlen.c b/src/H5Tvlen.c index 68087a8e134..651ff8f7267 100644 --- a/src/H5Tvlen.c +++ b/src/H5Tvlen.c @@ -503,7 +503,7 @@ H5T__vlen_mem_seq_write(H5VL_object_t H5_ATTR_UNUSED *file, const H5T_vlen_alloc if (seq_len) { size_t len = seq_len * base_size; /* Sequence size */ - /* Use the user's memory allocation routine is one is defined */ + /* Use the user's memory allocation routine if one is defined */ if (vl_alloc_info->alloc_func != NULL) { if (NULL == (vl.p = (vl_alloc_info->alloc_func)(len, vl_alloc_info->alloc_info))) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, diff --git a/src/H5mpi.c b/src/H5mpi.c index 2725ec5bf19..cf7e33d46c9 100644 --- a/src/H5mpi.c +++ b/src/H5mpi.c @@ -380,9 +380,9 @@ H5_mpi_info_cmp(MPI_Info info1, MPI_Info info2, int *result) /* Allocate buffers for iteration */ if (NULL == (key = (char *)H5MM_malloc(MPI_MAX_INFO_KEY * sizeof(char)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); - if (NULL == (value1 = (char *)H5MM_malloc(MPI_MAX_INFO_VAL * sizeof(char)))) + if (NULL == (value1 = (char *)H5MM_malloc((MPI_MAX_INFO_VAL + 1) * sizeof(char)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); - if (NULL == (value2 = (char *)H5MM_malloc(MPI_MAX_INFO_VAL * sizeof(char)))) + if (NULL == (value2 = (char *)H5MM_malloc((MPI_MAX_INFO_VAL + 1) * sizeof(char)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); /* Iterate over the keys, comparing them */ diff --git a/src/H5system.c b/src/H5system.c index 30a89a174af..be886ae52f3 100644 --- a/src/H5system.c +++ b/src/H5system.c @@ -807,13 +807,12 @@ H5_nanosleep(uint64_t nanosec) #ifdef H5_HAVE_WIN32_API DWORD dwMilliseconds = (DWORD)ceil(nanosec / 1.0e6); - DWORD ignore; /* Windows can't sleep at a ns resolution. Best we can do is ~1 ms. We * don't care about the return value since the second parameter * (bAlertable) is false, so it will always be zero. */ - ignore = SleepEx(dwMilliseconds, false); + SleepEx(dwMilliseconds, false); #else diff --git a/test/CMakePassthroughVOLTests.cmake b/test/CMakePassthroughVOLTests.cmake index a5d9b598fb8..853e4b339d6 100644 --- a/test/CMakePassthroughVOLTests.cmake +++ b/test/CMakePassthroughVOLTests.cmake @@ -37,19 +37,19 @@ endforeach () foreach (voltest ${VOL_LIST}) foreach (h5_tfile ${HDF5_TEST_FILES}) - HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_tfile}" "${PROJECT_BINARY_DIR}/${voltest}/${h5_tfile}" "HDF5_VOLTEST_LIB_files") + HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_tfile}" "${PROJECT_BINARY_DIR}/${voltest}/testfiles/${h5_tfile}" "HDF5_VOLTEST_LIB_files") endforeach () endforeach () foreach (voltest ${VOL_LIST}) foreach (ref_file ${HDF5_REFERENCE_FILES}) - HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${ref_file}" "${PROJECT_BINARY_DIR}/${voltest}/${ref_file}" "HDF5_VOLTEST_LIB_files") + HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${ref_file}" "${PROJECT_BINARY_DIR}/${voltest}/testfiles/${ref_file}" "HDF5_VOLTEST_LIB_files") endforeach () endforeach () foreach (voltest ${VOL_LIST}) foreach (h5_file ${HDF5_REFERENCE_TEST_FILES}) - HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/${voltest}/${h5_file}" "HDF5_VOLTEST_LIB_files") + HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/${voltest}/testfiles/${h5_file}" "HDF5_VOLTEST_LIB_files") endforeach () endforeach () diff --git a/test/Makefile.am b/test/Makefile.am index 291907ca0f9..fdd83e5bdeb 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -192,7 +192,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 mdset.h5 compact_dataset.h5 dataset.h5 d storage_size.h5 dls_01_strings.h5 power2up.h5 version_bounds.h5 \ alloc_0sized.h5 h5s_block.h5 h5s_plist.h5 \ extend.h5 istore.h5 extlinks*.h5 frspace.h5 links*.h5 \ - sys_file1 tfile[1-7].h5 th5s[1-4].h5 lheap.h5 fheap.h5 ohdr.h5 \ + sys_file1 tfile[1-8].h5 th5s[1-4].h5 lheap.h5 fheap.h5 ohdr.h5 \ stab.h5 extern_[1-5].h5 extern_[1-4][rw].raw gheap[0-4].h5 \ ohdr_min_a.h5 ohdr_min_b.h5 min_dset_ohdr_testfile.h5 \ dt_arith[1-2] links.h5 links[0-6]*.h5 extlinks[0-15].h5 \ @@ -226,7 +226,10 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 mdset.h5 compact_dataset.h5 dataset.h5 d test_swmr*.h5 cache_logging.h5 cache_logging.out vds_swmr.h5 vds_swmr_src_*.h5 \ swmr[0-2].h5 swmr_writer.out swmr_writer.log.* swmr_reader.out.* swmr_reader.log.* \ tbogus.h5.copy cache_image_test.h5 direct_chunk.h5 native_vol_test.h5 \ - splitter*.h5 splitter.log mirror_rw mirror_ro event_set_[0-9].h5 + splitter*.h5 splitter.log mirror_rw mirror_ro event_set_[0-9].h5 \ + cmpd_dtransform.h5 single_latest.h5 source_file.h5 stdio_file.h5 \ + tfile_is_accessible.h5 tfile_is_accessible_non_hdf5.h5 tverbounds_dtype.h5 \ + virtual_file1.h5 tfile_double_open.h5 tfile_incr_filesize.h5 flushrefresh_test # Sources for testhdf5 executable testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \ diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c index 02dbde3759a..460c8ae7564 100644 --- a/test/cmpd_dset.c +++ b/test/cmpd_dset.c @@ -401,8 +401,10 @@ compare_a_b_c_data(void *exp1_buf, void *exp2_buf, void *rbuf) if (s1_ptr->a != rbuf_ptr->a || s2_ptr->b != rbuf_ptr->b || s2_ptr->c != rbuf_ptr->c) { H5_FAILED(); printf(" i=%d\n", i); - printf(" expect_buf:a=%ld, b=%ld, c=%ld\n", s1_ptr->a, s2_ptr->b, s2_ptr->c); - printf(" rbuf: a=%ld, b=%ld, c=%ld", rbuf_ptr->a, rbuf_ptr->b, rbuf_ptr->c); + printf(" expect_buf:a=%" PRId64 ", b=%" PRId64 ", c=%" PRId64 "\n", s1_ptr->a, s2_ptr->b, + s2_ptr->c); + printf(" rbuf: a=%" PRId64 ", b=%" PRId64 ", c=%" PRId64 "\n", rbuf_ptr->a, rbuf_ptr->b, + rbuf_ptr->c); goto error; } } /* end for */ diff --git a/test/dt_arith.c b/test/dt_arith.c index ab89b689837..d3147cbd577 100644 --- a/test/dt_arith.c +++ b/test/dt_arith.c @@ -696,7 +696,7 @@ test_particular_fp_integer(void) /* Print errors */ if (dst_i != fill_value) { - float x = 0.0; + float x = 0.0F; int y; if (0 == fails_this_test++) @@ -2637,7 +2637,7 @@ my_isnan(dtype_t type, void *val) char s[256]; if (FLT_FLOAT == type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, val, sizeof(float)); retval = isnan(x); } @@ -2663,7 +2663,7 @@ my_isnan(dtype_t type, void *val) */ if (!retval) { if (FLT_FLOAT == type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, val, sizeof(float)); snprintf(s, sizeof(s), "%g", (double)x); @@ -3115,7 +3115,7 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) int check_expo[2]; if (FLT_FLOAT == dst_type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, &buf[j * dst_size], sizeof(float)); if (underflow && fabsf(x) <= FLT_MIN && fabsf(hw_f) <= FLT_MIN) continue; /* all underflowed, no error */ @@ -3185,7 +3185,7 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) printf(" %02x", saved[j * src_size + ENDIAN(src_size, k, sendian)]); printf("%*s", (int)(3 * MAX(0, (ssize_t)dst_size - (ssize_t)src_size)), ""); if (FLT_FLOAT == src_type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, &saved[j * src_size], sizeof(float)); printf(" %29.20e\n", (double)x); } @@ -3207,7 +3207,7 @@ test_conv_flt_1(const char *name, int run_test, hid_t src, hid_t dst) printf(" %02x", buf[j * dst_size + ENDIAN(dst_size, k, dendian)]); printf("%*s", (int)(3 * MAX(0, (ssize_t)src_size - (ssize_t)dst_size)), ""); if (FLT_FLOAT == dst_type) { - float x = 0.0; + float x = 0.0F; memcpy(&x, &buf[j * dst_size], sizeof(float)); printf(" %29.20e\n", (double)x); } diff --git a/test/evict_on_close.c b/test/evict_on_close.c index 9ca7f9f9cf3..db2a96282ef 100644 --- a/test/evict_on_close.c +++ b/test/evict_on_close.c @@ -32,12 +32,6 @@ #include "H5Ipkg.h" #include "H5VLprivate.h" /* Virtual Object Layer */ -/* Evict on close is not supported under parallel at this time. - * In the meantime, we just run a simple check that EoC can't be - * enabled in parallel HDF5. - */ -#ifndef H5_HAVE_PARALLEL - /* Uncomment to manually inspect cache states */ /* (Requires debug build of the library) */ /* #define EOC_MANUAL_INSPECTION */ @@ -974,89 +968,3 @@ main(void) exit(EXIT_FAILURE); } /* end main() */ - -#else - -/*------------------------------------------------------------------------- - * Function: check_evict_on_close_parallel_fail() - * - * Purpose: Verify that the H5Pset_evict_on_close() call fails in - * parallel HDF5. - * - * Return: SUCCEED/FAIL - * - *------------------------------------------------------------------------- - */ -static herr_t -check_evict_on_close_parallel_fail(void) -{ - hid_t fapl_id = H5I_INVALID_HID; - bool evict_on_close; - herr_t status; - - TESTING("evict on close fails in parallel"); - - /* Create a fapl */ - if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) - TEST_ERROR; - - /* Set the evict on close property (should fail)*/ - evict_on_close = true; - H5E_BEGIN_TRY - { - status = H5Pset_evict_on_close(fapl_id, evict_on_close); - } - H5E_END_TRY - if (status >= 0) - FAIL_PUTS_ERROR("H5Pset_evict_on_close() did not fail in parallel HDF5."); - - /* close fapl */ - if (H5Pclose(fapl_id) < 0) - TEST_ERROR; - - PASSED(); - return SUCCEED; - -error: - H5_FAILED(); - return FAIL; - -} /* check_evict_on_close_parallel_fail() */ - -/*------------------------------------------------------------------------- - * Function: main (parallel version) - * - * Return: EXIT_FAILURE/EXIT_SUCCESS - * - *------------------------------------------------------------------------- - */ -int -main(void) -{ - unsigned nerrors = 0; /* number of test errors */ - - printf("Testing evict-on-close cache behavior\n"); - - /* Initialize */ - h5_reset(); - - /* Test that EoC fails in parallel HDF5 */ - nerrors += check_evict_on_close_parallel_fail() < 0 ? 1 : 0; - - if (nerrors) - goto error; - - printf("All evict-on-close tests passed.\n"); - printf("Note that EoC is not supported under parallel so most tests are skipped.\n"); - - exit(EXIT_SUCCESS); - -error: - - printf("***** %u evict-on-close test%s FAILED! *****\n", nerrors, nerrors > 1 ? "S" : ""); - - exit(EXIT_FAILURE); - -} /* main() - parallel */ - -#endif /* H5_HAVE_PARALLEL */ diff --git a/test/h5test.c b/test/h5test.c index 5348e1b4c8d..ef580cf3072 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -457,7 +457,7 @@ h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fu const char *suffix = _suffix; size_t i, j; hid_t driver = H5I_INVALID_HID; - int isppdriver = 0; /* if the driver is MPI parallel */ + bool isppdriver = false; /* if the driver is MPI parallel */ if (!base_name || !fullname || size < 1) return NULL; @@ -516,10 +516,8 @@ h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fu } } - /* Must first check fapl is not H5P_DEFAULT (-1) because H5FD_XXX - * could be of value -1 if it is not defined. - */ - isppdriver = ((H5P_DEFAULT != fapl) || driver_env_var) && (H5FD_MPIO == driver); + if (h5_using_parallel_driver(fapl, &isppdriver) < 0) + return NULL; /* Check HDF5_NOCLEANUP environment setting. * (The #ifdef is needed to prevent compile failure in case MPI is not @@ -864,22 +862,23 @@ h5_show_hostname(void) WSADATA wsaData; int err; #endif +#ifdef H5_HAVE_PARALLEL + int mpi_rank, mpi_initialized, mpi_finalized; +#endif /* try show the process or thread id in multiple processes cases*/ #ifdef H5_HAVE_PARALLEL - { - int mpi_rank, mpi_initialized, mpi_finalized; - - MPI_Initialized(&mpi_initialized); - MPI_Finalized(&mpi_finalized); + MPI_Initialized(&mpi_initialized); + MPI_Finalized(&mpi_finalized); - if (mpi_initialized && !mpi_finalized) { - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - printf("MPI-process %d.", mpi_rank); - } - else - printf("thread 0."); + if (mpi_initialized && !mpi_finalized) { + /* Prevent output here from getting mixed with later output */ + MPI_Barrier(MPI_COMM_WORLD); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + printf("MPI-process %d.", mpi_rank); } + else + printf("thread 0."); #else printf("thread %" PRIu64 ".", H5TS_thread_id()); #endif @@ -915,6 +914,11 @@ h5_show_hostname(void) #ifdef H5_HAVE_WIN32_API WSACleanup(); #endif +#ifdef H5_HAVE_PARALLEL + /* Prevent output here from getting mixed with later output */ + if (mpi_initialized && !mpi_finalized) + MPI_Barrier(MPI_COMM_WORLD); +#endif } #ifdef H5_HAVE_PARALLEL diff --git a/test/links.c b/test/links.c index 6f07d32253e..99e011402c0 100644 --- a/test/links.c +++ b/test/links.c @@ -9880,6 +9880,7 @@ external_set_elink_cb(hid_t fapl, bool new_format) set_elink_cb_t op_data, *op_data_p; H5L_elink_traverse_t cb; char filename1[NAME_BUF_SIZE], filename2[NAME_BUF_SIZE]; + bool driver_is_parallel; unsigned flags; if (new_format) @@ -9890,16 +9891,21 @@ external_set_elink_cb(hid_t fapl, bool new_format) /* Build user data for callback */ op_data.parent_file = filename1; op_data.target_file = filename2; + + /* Check if using a parallel file driver */ + if (h5_using_parallel_driver(fapl, &driver_is_parallel) < 0) + TEST_ERROR; + + base_driver = H5Pget_driver(fapl); + /* Core file driver has issues when used as the member file driver for a family file */ /* Family file driver cannot be used with family or multi drivers for member files */ /* Also disable parallel member drivers, because H5F_HAS_FEATURE(H5FD_FEAT_HAS_MPI) would report false, causing problems */ - base_driver = H5Pget_driver(fapl); - op_data.base_fapl = - (base_driver == H5FD_FAMILY || base_driver == H5FD_MULTI || base_driver == H5FD_MPIO || - base_driver == H5FD_CORE || base_driver == H5FD_DIRECT || base_driver == H5FD_SUBFILING) - ? H5P_DEFAULT - : fapl; + op_data.base_fapl = fapl; + if (base_driver == H5FD_CORE || base_driver == H5FD_FAMILY || base_driver == H5FD_MULTI || + base_driver == H5FD_DIRECT || driver_is_parallel) + op_data.base_fapl = H5P_DEFAULT; op_data.fam_size = ELINK_CB_FAM_SIZE; op_data.code = 0; @@ -18434,14 +18440,12 @@ link_info_by_idx_old(hid_t fapl) { hid_t file_id = H5I_INVALID_HID; /* File ID */ hid_t group_id = H5I_INVALID_HID, group_id2 = H5I_INVALID_HID; /* Group IDs */ - H5F_t *f = NULL; - unsigned hard_link; /* Create hard or soft link? */ - H5L_info2_t linfo; /* Link info struct */ - char objname[NAME_BUF_SIZE]; /* Object name */ - char valname[NAME_BUF_SIZE]; /* Link value name */ - char filename[NAME_BUF_SIZE]; /* File name */ + unsigned hard_link; /* Create hard or soft link? */ + H5L_info2_t linfo; /* Link info struct */ + char objname[NAME_BUF_SIZE]; /* Object name */ + char valname[NAME_BUF_SIZE]; /* Link value name */ + char filename[NAME_BUF_SIZE]; /* File name */ H5O_token_t objtoken[CORDER_NLINKS]; /* Tokens (Addresses) of the objects created */ - void *vol_obj_file = NULL; /* Object of file_id */ char tmpname[NAME_BUF_SIZE]; /* Temporary link name */ char tmpval[NAME_BUF_SIZE]; /* Temporary link value */ unsigned u; /* Local index variable */ @@ -18461,14 +18465,6 @@ link_info_by_idx_old(hid_t fapl) if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR; - /* Need the file struct to address encoding */ - /* Retrieve VOL object */ - if (NULL == (vol_obj_file = H5VL_vol_object(file_id))) - TEST_ERROR; - /* Retrieve file from VOL object */ - if (NULL == (f = (H5F_t *)H5VL_object_data((const H5VL_object_t *)vol_obj_file))) - TEST_ERROR; - /* Create group to operate on */ if ((group_id = H5Gcreate2(file_id, CORDER_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; @@ -19066,12 +19062,10 @@ delete_by_idx_old(hid_t fapl) { hid_t file_id = H5I_INVALID_HID; /* File ID */ hid_t group_id = H5I_INVALID_HID, group_id2 = H5I_INVALID_HID; /* Group IDs */ - H5F_t *f = NULL; - H5L_info2_t linfo; /* Link info struct */ - H5_iter_order_t order; /* Order within in the index */ - void *vol_obj_file = NULL; /* Object of file_id */ - char objname[NAME_BUF_SIZE]; /* Object name */ - char filename[NAME_BUF_SIZE]; /* File name */ + H5L_info2_t linfo; /* Link info struct */ + H5_iter_order_t order; /* Order within in the index */ + char objname[NAME_BUF_SIZE]; /* Object name */ + char filename[NAME_BUF_SIZE]; /* File name */ H5O_token_t objtoken[CORDER_NLINKS]; /* Tokens (Addresses) of the objects created */ char tmpname[NAME_BUF_SIZE]; /* Temporary link name */ unsigned u; /* Local index variable */ @@ -19091,14 +19085,6 @@ delete_by_idx_old(hid_t fapl) if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR; - /* Need the file struct to address encoding */ - /* Retrieve VOL object */ - if (NULL == (vol_obj_file = H5VL_vol_object(file_id))) - TEST_ERROR; - /* Retrieve file from VOL object */ - if (NULL == (f = (H5F_t *)H5VL_object_data((const H5VL_object_t *)vol_obj_file))) - TEST_ERROR; - /* Create group to operate on */ if ((group_id = H5Gcreate2(file_id, CORDER_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; diff --git a/test/select_io_dset.c b/test/select_io_dset.c index 79449aac070..33b1c843d09 100644 --- a/test/select_io_dset.c +++ b/test/select_io_dset.c @@ -104,13 +104,28 @@ typedef enum { #define TEST_TCONV_BUF_TOO_SMALL 0x100 #define TEST_IN_PLACE_TCONV 0x200 +static herr_t +check_actual_selection_io_mode(hid_t dxpl, uint32_t sel_io_mode_expected) +{ + uint32_t actual_sel_io_mode; + + if (H5Pget_actual_selection_io_mode(dxpl, &actual_sel_io_mode) < 0) + TEST_ERROR; + if (actual_sel_io_mode != sel_io_mode_expected) + TEST_ERROR; + + return SUCCEED; +error: + return FAIL; +} + /* * Case 1: single dataset read/write, no type conversion (null case) * --create dataset with H5T_NATIVE_INT * --write/read dataset with H5T_NATIVE_INT */ static herr_t -test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_no_type_conv(hid_t fid, unsigned set_cache, unsigned chunked, unsigned dtrans, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -130,14 +145,14 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Generate dataset name */ @@ -146,7 +161,7 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Create dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -156,23 +171,23 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set data transform */ if (dtrans) if (H5Pset_data_transform(dxpl, expr) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -180,7 +195,11 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Write data to the dataset with/without data transform */ if (H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -188,7 +207,11 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Read data from the dataset without data transform set in dxpl */ if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Verify data or transformed data read */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -203,7 +226,7 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) /* Read the data from the dataset with data transform set in dxpl */ if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read is transformed a second time */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -216,15 +239,15 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); @@ -252,7 +275,7 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) * --read again with H5T_STD_I32BE */ static herr_t -test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_no_size_change_no_bkg(hid_t fid, unsigned set_cache, unsigned chunked, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -268,39 +291,39 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) int fillvalue = (-1); if ((wbuf = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && (wbuf_bak = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) - FAIL_STACK_ERROR; + TEST_ERROR; if ((rbuf = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillvalue) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Generate dataset name */ @@ -309,7 +332,7 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create 1d dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_STD_I32BE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -325,7 +348,11 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Write the data to the dataset with little endian */ if (H5Dwrite(did, H5T_STD_I32LE, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -333,7 +360,11 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Read the data from the dataset with little endian */ if (H5Dread(did, H5T_STD_I32LE, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Verify data read little endian */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -347,7 +378,7 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Read the data from the dataset with big endian */ if (H5Dread(did, H5T_STD_I32BE, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read in big endian */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -360,13 +391,13 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; free(wbuf); free(wbuf_bak); @@ -405,7 +436,7 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * */ static herr_t -test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_larger_mem_type_no_bkg(hid_t fid, unsigned set_cache, unsigned chunked, unsigned dtrans, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -425,14 +456,14 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Generate dataset name */ @@ -441,7 +472,7 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Create dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -451,23 +482,23 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set data transform */ if (dtrans) if (H5Pset_data_transform(dxpl, expr) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -475,7 +506,11 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Write data to the dataset with/without data transform set in dxpl */ if (H5Dwrite(did, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -483,7 +518,11 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Read the data from the dataset without data transform in dxpl */ if (H5Dread(did, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Verify data or transformed data read */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -498,7 +537,7 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign /* Read data from the dataset with data transform set in dxpl */ if (H5Dread(did, H5T_NATIVE_LLONG, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read is transformed a second time */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -511,15 +550,15 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); @@ -547,7 +586,7 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign * --read dataset with H5T_NATIVE_SHORT */ static herr_t -test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_smaller_mem_type_no_bkg(hid_t fid, unsigned set_cache, unsigned chunked, unsigned dtrans, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -567,14 +606,14 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Generate dataset name */ @@ -583,7 +622,7 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Create 1d chunked dataset with/without data transform */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -593,23 +632,23 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set data transform */ if (dtrans) { if (H5Pset_data_transform(dxpl, expr) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Copy wbuf if the library will be modifying it */ @@ -618,7 +657,11 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Write data to the dataset with/without data transform in dxpl */ if (H5Dwrite(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -626,7 +669,11 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Read data from the dataset without data transform in dxpl */ if (H5Dread(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, ntrans_dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; /* Verify data or transformed data read */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -641,7 +688,7 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig /* Read data from the dataset with data transform set in dxpl */ if (H5Dread(did, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read is transformed a second time */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -654,15 +701,15 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); @@ -730,55 +777,55 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Allocate buffers for datasets */ if (NULL == (s1_wbuf = (s1_t *)malloc(sizeof(s1_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (s1_wbuf_bak = (s1_t *)malloc(sizeof(s1_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (s1_rbuf = (s1_t *)malloc(sizeof(s1_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (s2_wbuf = (s2_t *)malloc(sizeof(s2_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (s2_wbuf_bak = (s2_t *)malloc(sizeof(s2_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (s2_rbuf = (s2_t *)malloc(sizeof(s2_t) * DSET_SELECT_DIM))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create the memory data type */ if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_fill_value(dcpl, s1_tid, &fillvalue) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Case 5(a) */ @@ -789,7 +836,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create 1d dataset */ if ((did = H5Dcreate2(fid, dset_name, s1_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -805,7 +852,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Write all the data to the dataset */ if (H5Dwrite(did, s1_tid, H5S_ALL, H5S_ALL, dxpl, s1_wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -813,7 +860,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Read all the data from the dataset */ if (H5Dread(did, s1_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -838,12 +885,12 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create a compound type same size as s1_t */ if ((ss_ac_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* but contains only subset members of s1_t */ if (H5Tinsert(ss_ac_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(ss_ac_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -851,7 +898,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Write s1_wbuf to the dataset with only subset members in ss_tid */ if (H5Dwrite(did, ss_ac_tid, H5S_ALL, H5S_ALL, dxpl, s1_wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -859,7 +906,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Read the whole compound back */ if (H5Dread(did, ss_ac_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify the compound fields have the correct (old or new) values */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -884,16 +931,16 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create a compound type same size as s1_t */ if ((ss_bc_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* but contains only subset members of s1_t */ if (H5Tinsert(ss_bc_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(ss_bc_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read the dataset: will read only what is set in */ if (H5Dread(did, ss_bc_tid, H5S_ALL, H5S_ALL, dxpl, s1_rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data read */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -915,13 +962,13 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * --1 smaller mem type */ if ((s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s2_tid, "a", HOFFSET(s2_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s2_tid, "b", HOFFSET(s2_t, b), H5T_NATIVE_LONG) < 0 || H5Tinsert(s2_tid, "c", HOFFSET(s2_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s2_tid, "d", HOFFSET(s2_t, d), H5T_NATIVE_SHORT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Update s2_wbuf with unique values */ for (i = 0; i < DSET_SELECT_DIM; i++) { @@ -936,7 +983,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) memcpy(s2_wbuf_bak, s2_wbuf, sizeof(s2_t) * DSET_SELECT_DIM); if (H5Dwrite(did, s2_tid, H5S_ALL, H5S_ALL, dxpl, s2_wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Restore wbuf from backup if the library modified it */ if (mwbuf) @@ -959,21 +1006,21 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s1_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s2_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(ss_ac_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(ss_bc_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Release buffers */ free(s1_wbuf); @@ -1030,7 +1077,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * Datatype for all datasets: H5T_NATIVE_LONG */ static herr_t -test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_multi_dsets_no_bkg(hid_t fid, unsigned set_cache, unsigned chunked, unsigned dtrans, unsigned mwbuf) { size_t ndsets; int i, j; @@ -1074,41 +1121,41 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m dims[0] = DSET_SELECT_DIM; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set data transform */ if (dtrans) if (H5Pset_data_transform(dxpl, expr) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set up file space ids, mem space ids, and dataset ids */ for (i = 0; i < (int)ndsets; i++) { if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Generate dataset name */ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s", i, @@ -1118,31 +1165,31 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], ((HDrandom() % 2) ? H5T_NATIVE_LONG : H5T_NATIVE_INT), file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); /* Allocate buffers for all datasets */ if (NULL == (total_wbuf = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (total_wbuf_bak = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_trans_wbuf = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_rbuf = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); if (NULL == (total_lwbuf = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (total_lwbuf_bak = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_trans_lwbuf = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_lrbuf = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -1175,6 +1222,10 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (H5Dwrite_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, wbufs) < 0) TEST_ERROR; + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; + /* Restore wbuf from backup if the library modified it */ if (mwbuf) memcpy(total_wbuf, total_wbuf_bak, ndsets * DSET_SELECT_DIM * sizeof(int)); @@ -1183,6 +1234,10 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, ntrans_dxpl, rbufs) < 0) TEST_ERROR; + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; + /* Verify */ for (i = 0; i < (int)ndsets; i++) for (j = 0; j < DSET_SELECT_DIM; j++) @@ -1199,6 +1254,10 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) TEST_ERROR; + /* Verify selection I/O mode */ + if (check_actual_selection_io_mode(dxpl, chunked && !set_cache ? 0 : H5D_SCALAR_IO) < 0) + TEST_ERROR; + /* Verify */ for (i = 0; i < (int)ndsets; i++) for (j = 0; j < DSET_SELECT_DIM; j++) @@ -1260,19 +1319,19 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m } if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if (H5Sclose(file_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(mem_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_dids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } free(total_wbuf); @@ -1315,7 +1374,7 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (total_lrbuf) free(total_lrbuf); if (total_trans_lwbuf) - free(total_lrbuf); + free(total_trans_lwbuf); return FAIL; @@ -1404,41 +1463,41 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; dims[0] = DSET_SELECT_DIM; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Create the memory data type */ if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Generate dataset name */ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s", i, @@ -1447,7 +1506,7 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create ith dataset */ if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); @@ -1530,12 +1589,12 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create a compound type same size as s1_t */ if ((ss_ac_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* but contains only subset members of s1_t */ if (H5Tinsert(ss_ac_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(ss_ac_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Untouched memory and file spaces for other datasets */ for (i = 0; i < (int)ndsets; i++) { @@ -1603,18 +1662,18 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create a compound type same size as s1_t */ if ((ss_bc_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* but contains only subset members of s1_t */ if (H5Tinsert(ss_bc_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(ss_bc_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Reset memory and file space for dataset */ if (H5Sselect_all(mem_sids[mm]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sselect_all(file_sids[mm]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Untouched memory and file space for other datasets */ for (i = 0; i < (int)ndsets; i++) { @@ -1677,13 +1736,13 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * --1 smaller mem type */ if ((s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s2_tid, "a", HOFFSET(s2_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s2_tid, "b", HOFFSET(s2_t, b), H5T_NATIVE_LONG) < 0 || H5Tinsert(s2_tid, "c", HOFFSET(s2_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s2_tid, "d", HOFFSET(s2_t, d), H5T_NATIVE_SHORT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { s2_wbufi[i] = s2_total_wbuf + (i * DSET_SELECT_DIM); @@ -1735,17 +1794,17 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if (H5Sclose(file_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(mem_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_dids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } free(total_wbuf); @@ -1845,34 +1904,34 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; dims[0] = DSET_SELECT_DIM; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Set up file space ids, mem space ids, and dataset ids */ for (i = 0; i < (int)ndsets; i++) { if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Generate dataset name */ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s", i, @@ -1881,7 +1940,7 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Create ith dataset */ if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_STD_I32BE, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Case a */ @@ -1891,11 +1950,11 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Allocate buffers for all datasets */ if (NULL == (total_wbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_wbuf_bak = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_rbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -1958,11 +2017,11 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Allocate buffers for all datasets */ if (NULL == (total_lwbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_lwbuf_bak = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_lrbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -2033,11 +2092,11 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Allocate buffers for all datasets */ if (NULL == (total_swbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_swbuf_bak = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_srbuf = (uint8_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -2088,17 +2147,17 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if (H5Sclose(file_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(mem_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_dids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } free(total_wbuf); @@ -2278,66 +2337,66 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) /* Create dataset transfer property list */ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Enable selection I/O */ if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set modify write buffer if requested */ if (mwbuf) if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Set dataset layout: contiguous or chunked */ dims[0] = DSET_SELECT_DIM; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Create compound data type: s1_t */ if ((s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s1_tid, "d", HOFFSET(s1_t, d), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create compound data type: s3_t */ if ((s3_tid = H5Tcreate(H5T_COMPOUND, sizeof(s3_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s3_tid, "a", HOFFSET(s3_t, a), H5T_NATIVE_INT) < 0 || H5Tinsert(s3_tid, "b", HOFFSET(s3_t, b), H5T_NATIVE_INT) < 0 || H5Tinsert(s3_tid, "c", HOFFSET(s3_t, c), H5T_NATIVE_INT) < 0 || H5Tinsert(s3_tid, "d", HOFFSET(s3_t, d), H5T_NATIVE_INT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create compound data type: s4_t */ if ((s4_tid = H5Tcreate(H5T_COMPOUND, sizeof(s4_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(s4_tid, "b", HOFFSET(s4_t, b), H5T_NATIVE_UINT) < 0 || H5Tinsert(s4_tid, "d", HOFFSET(s4_t, d), H5T_NATIVE_UINT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create dataset for i ndsets */ for (i = 0; i < (int)ndsets; i++) { /* File space ids */ if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Memory space ids */ if ((mem_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; mm = HDrandom() % (int)ndsets; if (mm == 0) { @@ -2346,7 +2405,7 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } else if (mm == 1) { dset_types[i] = DSET_WITH_CONV_AND_NO_BKG; @@ -2354,7 +2413,7 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_LONG, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } else { dset_types[i] = DSET_WITH_CONV_AND_BKG; @@ -2362,7 +2421,7 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } } /* end for i ndsets */ @@ -2372,49 +2431,49 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) /* DSET_WITH_NO_CONV */ buf_size = ndsets * DSET_SELECT_DIM * sizeof(int); if (NULL == (total_wbuf1 = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (total_wbuf1_bak = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (NULL == (total_rbuf1 = (int *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* DSET_WITH_CONV_AND_NO_BKG */ buf_size = ndsets * DSET_SELECT_DIM * sizeof(unsigned long); if (NULL == (ul_total_wbuf2 = (unsigned long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (ul_total_wbuf2_bak = (unsigned long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); if (NULL == (l_total_rbuf2 = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(long); if (NULL == (l_total_wbuf2 = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (l_total_wbuf2_bak = (long *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(short); if (NULL == (s_total_rbuf2 = (short *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* DSET_WITH_CONV_AND_BKG */ buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); if (NULL == (s1_total_wbuf3 = (s1_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (s1_total_wbuf3_bak = (s1_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(s3_t); if (NULL == (s3_total_rbuf3 = (s3_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(s4_t); if (NULL == (s4_total_wbuf3 = (s4_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; if (mwbuf && NULL == (s4_total_wbuf3_bak = (s4_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; buf_size = ndsets * DSET_SELECT_DIM * sizeof(s1_t); if (NULL == (s1_total_rbuf3 = (s1_t *)malloc(buf_size))) - FAIL_STACK_ERROR; + TEST_ERROR; /* Test with s settings for ndsets */ for (s = SETTING_A; s <= SETTING_B; s++) { @@ -2622,26 +2681,26 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) /* Closing */ if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s1_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s3_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(s4_tid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; for (i = 0; i < (int)ndsets; i++) { if (H5Sclose(file_sids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_dids[i]) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Don't delete the last set of datasets */ if ((n + 1) != niter) if (H5Ldelete(fid, dset_names[i], H5P_DEFAULT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } /* Freeing */ @@ -2756,7 +2815,7 @@ test_set_get_select_io_mode(const char *filename, hid_t fapl) TESTING("H5Pget/set_selection_io_mode()"); if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) TEST_ERROR; @@ -2788,16 +2847,16 @@ test_set_get_select_io_mode(const char *filename, hid_t fapl) /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((did = H5Dcreate2(fid, "test_chk_dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) @@ -2805,7 +2864,7 @@ test_set_get_select_io_mode(const char *filename, hid_t fapl) /* May change the selection io actually performed */ if (H5Dwrite(did, H5T_NATIVE_LONG, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) TEST_ERROR; @@ -2815,15 +2874,15 @@ test_set_get_select_io_mode(const char *filename, hid_t fapl) TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Fclose(fid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; PASSED(); @@ -2882,34 +2941,31 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ } if ((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Enable page buffering to trigger H5D_PAGE_BUFFER */ if (test_mode & TEST_PAGE_BUFFER) { if (H5Pset_page_buffer_size(fapl, 4096, 0, 0) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } else { /* Not page buffer test, reset to default */ if (H5Pset_page_buffer_size(fapl, 0, 0, 0) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, 0, (hsize_t)1) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; - /* If default mode, 1st write will trigger cb, 2nd write will trigger sieve */ - /* If on mode, will trigger nothing because the on mode path is different */ - /* Need 2 writes */ if (test_mode & TEST_CONTIGUOUS_SIEVE_BUFFER) { no_selection_io_cause_write_expected |= H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER; no_selection_io_cause_read_expected |= H5D_SEL_IO_CONTIGUOUS_SIEVE_BUFFER; @@ -2917,14 +2973,14 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if (test_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET) { if (H5Pset_layout(dcpl, H5D_COMPACT) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; no_selection_io_cause_write_expected |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; no_selection_io_cause_read_expected |= H5D_SEL_IO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; } if (test_mode == TEST_DATASET_FILTER) { if (H5Pset_deflate(dcpl, 9) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; is_chunked = true; no_selection_io_cause_write_expected |= H5D_SEL_IO_DATASET_FILTER; no_selection_io_cause_read_expected |= H5D_SEL_IO_DATASET_FILTER; @@ -2938,7 +2994,7 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if (test_mode == TEST_DISABLE_BY_API) { if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_OFF) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; no_selection_io_cause_write_expected |= H5D_SEL_IO_DISABLE_BY_API; no_selection_io_cause_read_expected |= H5D_SEL_IO_DISABLE_BY_API; } @@ -2951,19 +3007,19 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ /* Datatype conversion */ if (test_mode & TEST_DATATYPE_CONVERSION) { if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; tid = H5T_NATIVE_UINT; /* If we're testing a too small tconv buffer, set the buffer to be too small */ if (test_mode & TEST_TCONV_BUF_TOO_SMALL) { if (H5Pset_buffer(dxpl, sizeof(int), NULL, NULL) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* If we're using in-place type conversion sel io will succeed and only switch to scalar at the * VFL */ if (test_mode & TEST_IN_PLACE_TCONV) { if (H5Pset_modify_write_buf(dxpl, true) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; no_selection_io_cause_write_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; } else @@ -2987,28 +3043,28 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ /* Create 1d data space */ dims[0] = DSET_SELECT_DIM; if ((sid = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (is_chunked) { cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } if ((did = H5Dcreate2(fid, "no_selection_io_cause", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Initialize data */ for (i = 0; i < DSET_SELECT_DIM; i++) wbuf[i] = i; if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (test_mode & TEST_CONTIGUOUS_SIEVE_BUFFER) { if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_write) < 0) @@ -3023,11 +3079,11 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ test_mode & TEST_PAGE_BUFFER) { if (H5Dflush(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; } if (H5Dread(did, tid, H5S_ALL, H5S_ALL, dxpl, rbuf) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify causes of no selection I/O for write is as expected */ if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_read) < 0) @@ -3038,20 +3094,20 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ TEST_ERROR; if (H5Dclose(did) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(sid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Fclose(fid) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pclose(fcpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; return SUCCEED; @@ -3085,13 +3141,13 @@ test_get_no_selection_io_cause(const char *filename, hid_t fapl) TESTING("H5Pget_no_selection_io_cause()"); if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) TEST_ERROR; if (H5Pclose(dxpl) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* The following tests are based on H5D_SELECTION_IO_MODE_DEFAULT as the default setting in the library; skip the tests if that is not true */ @@ -3208,7 +3264,8 @@ main(void) case TEST_NO_TYPE_CONV: /* case 1 */ TESTING_2("No type conversion (null case)"); - nerrors += (test_no_type_conv(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + nerrors += + (test_no_type_conv(fid, set_cache, chunked, dtrans, mwbuf) < 0 ? 1 : 0); break; @@ -3219,7 +3276,9 @@ main(void) if (dtrans) SKIPPED(); else - nerrors += (test_no_size_change_no_bkg(fid, chunked, mwbuf) < 0 ? 1 : 0); + nerrors += + (test_no_size_change_no_bkg(fid, set_cache, chunked, mwbuf) < 0 ? 1 + : 0); break; @@ -3227,7 +3286,9 @@ main(void) TESTING_2("Larger memory type, no background buffer"); nerrors += - (test_larger_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + (test_larger_mem_type_no_bkg(fid, set_cache, chunked, dtrans, mwbuf) < 0 + ? 1 + : 0); break; @@ -3235,7 +3296,9 @@ main(void) TESTING_2("Smaller memory type, no background buffer"); nerrors += - (test_smaller_mem_type_no_bkg(fid, chunked, dtrans, mwbuf) < 0 ? 1 : 0); + (test_smaller_mem_type_no_bkg(fid, set_cache, chunked, dtrans, mwbuf) < 0 + ? 1 + : 0); break; @@ -3253,7 +3316,7 @@ main(void) case TEST_MULTI_CONV_NO_BKG: /* case 6 */ TESTING_2("multi-datasets: type conv + no bkg buffer"); - nerrors += test_multi_dsets_no_bkg(fid, chunked, dtrans, mwbuf); + nerrors += test_multi_dsets_no_bkg(fid, set_cache, chunked, dtrans, mwbuf); break; diff --git a/test/testframe.c b/test/testframe.c index 2b650270f3a..5cb25ed8148 100644 --- a/test/testframe.c +++ b/test/testframe.c @@ -155,35 +155,37 @@ TestUsage(void) { unsigned i; - print_func("Usage: %s [-v[erbose] (l[ow]|m[edium]|h[igh]|0-9)] %s\n", TestProgName, - (TestPrivateUsage ? "" : "")); - print_func(" [-[e]x[clude] name]+ \n"); - print_func(" [-o[nly] name]+ \n"); - print_func(" [-b[egin] name] \n"); - print_func(" [-s[ummary]] \n"); - print_func(" [-c[leanoff]] \n"); - print_func(" [-h[elp]] \n"); - print_func("\n\n"); - print_func("verbose controls the amount of information displayed\n"); - print_func("exclude to exclude tests by name\n"); - print_func("only to name tests which should be run\n"); - print_func("begin start at the name of the test given\n"); - print_func("summary prints a summary of test results at the end\n"); - print_func("cleanoff does not delete *.hdf files after execution of tests\n"); - print_func("help print out this information\n"); - if (TestPrivateUsage) { - print_func("\nExtra options\n"); - TestPrivateUsage(); - } - print_func("\n\n"); - print_func("This program currently tests the following: \n\n"); - print_func("%16s %s\n", "Name", "Description"); - print_func("%16s %s\n", "----", "-----------"); + if (mpi_rank_framework_g == 0) { + print_func("Usage: %s [-v[erbose] (l[ow]|m[edium]|h[igh]|0-9)] %s\n", TestProgName, + (TestPrivateUsage ? "" : "")); + print_func(" [-[e]x[clude] name]+ \n"); + print_func(" [-o[nly] name]+ \n"); + print_func(" [-b[egin] name] \n"); + print_func(" [-s[ummary]] \n"); + print_func(" [-c[leanoff]] \n"); + print_func(" [-h[elp]] \n"); + print_func("\n\n"); + print_func("verbose controls the amount of information displayed\n"); + print_func("exclude to exclude tests by name\n"); + print_func("only to name tests which should be run\n"); + print_func("begin start at the name of the test given\n"); + print_func("summary prints a summary of test results at the end\n"); + print_func("cleanoff does not delete *.hdf files after execution of tests\n"); + print_func("help print out this information\n"); + if (TestPrivateUsage) { + print_func("\nExtra options\n"); + TestPrivateUsage(); + } + print_func("\n\n"); + print_func("This program currently tests the following: \n\n"); + print_func("%16s %s\n", "Name", "Description"); + print_func("%16s %s\n", "----", "-----------"); - for (i = 0; i < Index; i++) - print_func("%16s %s\n", Test[i].Name, Test[i].Description); + for (i = 0; i < Index; i++) + print_func("%16s %s\n", Test[i].Name, Test[i].Description); - print_func("\n\n"); + print_func("\n\n"); + } } /* @@ -192,12 +194,14 @@ TestUsage(void) void TestInfo(const char *ProgName) { - unsigned major, minor, release; + if (mpi_rank_framework_g == 0) { + unsigned major, minor, release; - H5get_libversion(&major, &minor, &release); + H5get_libversion(&major, &minor, &release); - print_func("\nFor help use: %s -help\n", ProgName); - print_func("Linked with hdf5 version %u.%u release %u\n", major, minor, release); + print_func("\nFor help use: %s -help\n", ProgName); + print_func("Linked with hdf5 version %u.%u release %u\n", major, minor, release); + } } /* @@ -301,20 +305,24 @@ PerformTests(void) for (Loop = 0; Loop < Index; Loop++) if (Test[Loop].SkipFlag) { - MESSAGE(2, ("Skipping -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name)); + if (mpi_rank_framework_g == 0) + MESSAGE(2, ("Skipping -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name)); } else { if (mpi_rank_framework_g == 0) MESSAGE(2, ("Testing -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name)); - MESSAGE(5, ("===============================================\n")); + if (mpi_rank_framework_g == 0) + MESSAGE(5, ("===============================================\n")); Test[Loop].NumErrors = num_errs; Test_parameters = Test[Loop].Parameters; TestAlarmOn(); Test[Loop].Call(); TestAlarmOff(); Test[Loop].NumErrors = num_errs - Test[Loop].NumErrors; - MESSAGE(5, ("===============================================\n")); - MESSAGE(5, ("There were %d errors detected.\n\n", (int)Test[Loop].NumErrors)); + if (mpi_rank_framework_g == 0) { + MESSAGE(5, ("===============================================\n")); + MESSAGE(5, ("There were %d errors detected.\n\n", (int)Test[Loop].NumErrors)); + } } Test_parameters = NULL; /* clear it. */ @@ -358,7 +366,8 @@ TestCleanup(void) { unsigned Loop; - MESSAGE(2, ("\nCleaning Up temp files...\n\n")); + if (mpi_rank_framework_g == 0) + MESSAGE(2, ("\nCleaning Up temp files...\n\n")); /* call individual cleanup routines in each source module */ for (Loop = 0; Loop < Index; Loop++) @@ -619,7 +628,8 @@ SetTest(const char *testname, int action) break; default: /* error */ - printf("*** ERROR: Unknown action (%d) for SetTest\n", action); + if (mpi_rank_framework_g == 0) + printf("*** ERROR: Unknown action (%d) for SetTest\n", action); break; } } diff --git a/test/tfile.c b/test/tfile.c index 1c5196acf60..24cc7ce000e 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -138,9 +138,15 @@ #define NGROUPS 2 #define NDSETS 4 -/* Declaration for test_incr_filesize() */ +/* Declaration for libver bounds tests */ #define FILE8 "tfile8.h5" /* Test file */ +/* Declaration for test_file_double_file_dataset_open() */ +#define FILE_DOUBLE_OPEN "tfile_double_open" + +/* Declaration for test_incr_filesize() */ +#define FILE_INCR_FILESIZE "tfile_incr_filesize" + /* Files created under 1.6 branch and 1.8 branch--used in test_filespace_compatible() */ static const char *OLD_FILENAME[] = { "filespace_1_6.h5", /* 1.6 HDF5 file */ @@ -2623,8 +2629,8 @@ test_file_double_file_dataset_open(bool new_format) if (new_format) { ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); CHECK(ret, FAIL, "H5Pset_libver_bounds"); - } /* end if */ - h5_fixname(FILE1, fapl, filename, sizeof filename); + } + h5_fixname(FILE_DOUBLE_OPEN, fapl, filename, sizeof filename); /* Create the test file */ fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); @@ -2934,6 +2940,9 @@ test_file_double_file_dataset_open(bool new_format) ret = H5Tclose(tid1); CHECK(ret, FAIL, "H5Tclose"); + /* Delete the test file */ + h5_delete_test_file(filename, fapl); + /* Close FAPL */ ret = H5Pclose(fapl); CHECK(ret, FAIL, "H5Pclose"); @@ -7650,7 +7659,7 @@ test_incr_filesize(void) MESSAGE(5, ("Testing H5Fincrement_filesize() and H5Fget_eoa())\n")); fapl = h5_fileaccess(); - h5_fixname(FILE8, fapl, filename, sizeof filename); + h5_fixname(FILE_INCR_FILESIZE, fapl, filename, sizeof filename); /* Get the VFD feature flags */ driver_id = H5Pget_driver(fapl); @@ -7735,6 +7744,9 @@ test_incr_filesize(void) /* Verify the filesize is the previous stored_eoa + 512 */ VERIFY(filesize, stored_eoa + 512, "file size"); + /* Delete the test file */ + h5_delete_test_file(FILE_INCR_FILESIZE, fapl); + /* Close the file access property list */ ret = H5Pclose(fapl); CHECK(ret, FAIL, "H5Pclose"); @@ -8225,6 +8237,7 @@ cleanup_file(void) H5Fdelete(FILE5, H5P_DEFAULT); H5Fdelete(FILE6, H5P_DEFAULT); H5Fdelete(FILE7, H5P_DEFAULT); + H5Fdelete(FILE8, H5P_DEFAULT); H5Fdelete(DST_FILE, H5P_DEFAULT); } H5E_END_TRY diff --git a/test/tselect.c b/test/tselect.c index 55599b3324e..55430f24ad5 100644 --- a/test/tselect.c +++ b/test/tselect.c @@ -1875,8 +1875,8 @@ test_select_hyper_contig3(hid_t dset_type, hid_t xfer_plist) ** ****************************************************************/ static void -verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t H5_ATTR_NDEBUG_UNUSED cube_size, - unsigned edge_size, unsigned cube_rank) +verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t cube_size, unsigned edge_size, + unsigned cube_rank) { const uint16_t *cube_ptr; /* Pointer into the cube buffer */ uint16_t expected_value; /* Expected value in dataset */ @@ -1902,7 +1902,9 @@ verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t H5_ATTR m = 0; do { /* Sanity check */ - assert(s < cube_size); + if (s >= cube_size) + TestErrPrintf("s should not be >= cube_size! s = %zu, cube_size = %zu\n", s, + cube_size); /* Check for correct value */ if (*cube_ptr != expected_value) diff --git a/testpar/Makefile.am b/testpar/Makefile.am index 59d47e15ebf..4a8cb826f49 100644 --- a/testpar/Makefile.am +++ b/testpar/Makefile.am @@ -58,6 +58,7 @@ LDADD = $(LIBH5TEST) $(LIBHDF5) # after_mpi_fin.h5 is from t_init_term # go is used for debugging. See testphdf5.c. CHECK_CLEANFILES+=MPItest.h5 Para*.h5 bigio_test.h5 CacheTestDummy.h5 \ - ShapeSameTest.h5 shutdown.h5 pmulti_dset.h5 after_mpi_fin.h5 go + ShapeSameTest.h5 shutdown.h5 pmulti_dset.h5 after_mpi_fin.h5 go noflush.h5 \ + mpio_select_test_file.h5 *.btr include $(top_srcdir)/config/conclude.am diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c index c2aac771b29..48abf8ed9dd 100644 --- a/testpar/t_2Gio.c +++ b/testpar/t_2Gio.c @@ -4291,9 +4291,10 @@ main(int argc, char **argv) printf("2 GByte IO TESTS START\n"); printf("2 MPI ranks will run the tests...\n"); printf("===================================\n"); - h5_show_hostname(); } + h5_show_hostname(); + if (H5dont_atexit() < 0) { printf("Failed to turn off atexit processing. Continue.\n"); }; @@ -4345,8 +4346,7 @@ main(int argc, char **argv) #endif /* H5_HAVE_FILTER_DEFLATE */ /* Display testing information */ - if (MAINPROCESS) - TestInfo(argv[0]); + TestInfo(argv[0]); /* setup file access property list */ fapl = H5Pcreate(H5P_FILE_ACCESS); diff --git a/testpar/t_coll_md.c b/testpar/t_coll_md.c index 1220111a56d..9c6fc7120cf 100644 --- a/testpar/t_coll_md.c +++ b/testpar/t_coll_md.c @@ -43,6 +43,11 @@ #define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr" #define COLL_GHEAP_WRITE_ATTR_DIMS 1 +#define COLL_IO_IND_MD_WRITE_NDIMS 2 +#define COLL_IO_IND_MD_WRITE_CHUNK0 4 +#define COLL_IO_IND_MD_WRITE_CHUNK1 256 +#define COLL_IO_IND_MD_WRITE_NCHUNK1 16384 + /* * A test for issue HDFFV-10501. A parallel hang was reported which occurred * in linked-chunk I/O when collective metadata reads are enabled and some ranks @@ -569,3 +574,101 @@ test_collective_global_heap_write(void) VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); } + +/* + * A test to ensure that hangs don't occur when collective I/O + * is requested at the interface level (by a call to + * H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)), while + * collective metadata writes are NOT requested. + */ +void +test_coll_io_ind_md_write(void) +{ + const char *filename; + long long *data = NULL; + hsize_t dset_dims[COLL_IO_IND_MD_WRITE_NDIMS]; + hsize_t chunk_dims[COLL_IO_IND_MD_WRITE_NDIMS]; + hsize_t sel_dims[COLL_IO_IND_MD_WRITE_NDIMS]; + hsize_t offset[COLL_IO_IND_MD_WRITE_NDIMS]; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dset_id2 = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + int mpi_rank, mpi_size; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + filename = GetTestParameters(); + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, false) >= 0), "Unset collective metadata reads succeeded"); + VRFY((H5Pset_coll_metadata_write(fapl_id, false) >= 0), "Unset collective metadata writes succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + dset_dims[0] = (hsize_t)(mpi_size * COLL_IO_IND_MD_WRITE_CHUNK0); + dset_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1 * COLL_IO_IND_MD_WRITE_NCHUNK1); + + fspace_id = H5Screate_simple(COLL_IO_IND_MD_WRITE_NDIMS, dset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + + chunk_dims[0] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK0); + chunk_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1); + + VRFY((H5Pset_chunk(dcpl_id, COLL_IO_IND_MD_WRITE_NDIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded"); + + VRFY((H5Pset_shuffle(dcpl_id) >= 0), "H5Pset_shuffle succeeded"); + + dset_id = H5Dcreate2(file_id, "dset1", H5T_NATIVE_LLONG, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + sel_dims[0] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK0); + sel_dims[1] = (hsize_t)(COLL_IO_IND_MD_WRITE_CHUNK1 * COLL_IO_IND_MD_WRITE_NCHUNK1); + + offset[0] = (hsize_t)mpi_rank * sel_dims[0]; + offset[1] = 0; + + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, sel_dims, NULL) >= 0), + "H5Sselect_hyperslab succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); + + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + + data = malloc(sel_dims[0] * sel_dims[1] * sizeof(long long)); + for (size_t i = 0; i < sel_dims[0] * sel_dims[1]; i++) + data[i] = rand(); + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_LLONG, H5S_BLOCK, fspace_id, dxpl_id, data) >= 0), + "H5Dwrite succeeded"); + + dset_id2 = H5Dcreate2(file_id, "dset2", H5T_NATIVE_LLONG, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id2 >= 0), "H5Dcreate2 succeeded"); + + for (size_t i = 0; i < sel_dims[0] * sel_dims[1]; i++) + data[i] = rand(); + + VRFY((H5Dwrite(dset_id2, H5T_NATIVE_LLONG, H5S_BLOCK, fspace_id, dxpl_id, data) >= 0), + "H5Dwrite succeeded"); + + free(data); + + VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); + VRFY((H5Dclose(dset_id2) >= 0), "H5Dclose succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} diff --git a/testpar/t_file.c b/testpar/t_file.c index a6a541becf3..8f8b2914a70 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -1060,3 +1060,164 @@ test_invalid_libver_bounds_file_close_assert(void) ret = H5Pclose(fcpl_id); VRFY((SUCCEED == ret), "H5Pclose"); } + +/* + * Tests that H5Pevict_on_close properly succeeds in serial/one rank and fails when + * called by multiple ranks. + */ +void +test_evict_on_close_parallel_unsupp(void) +{ + const char *filename = NULL; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + hid_t fid = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + herr_t ret; + + filename = (const char *)GetTestParameters(); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* setup file access plist */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); + ret = H5Pset_libver_bounds(fapl_id, H5F_LIBVER_EARLIEST, H5F_LIBVER_V18); + VRFY((SUCCEED == ret), "H5Pset_libver_bounds"); + + ret = H5Pset_evict_on_close(fapl_id, true); + VRFY((SUCCEED == ret), "H5Pset_evict_on_close"); + + /* test on 1 rank */ + ret = H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, info); + VRFY((SUCCEED == ret), "H5Pset_fapl_mpio"); + + if (mpi_rank == 0) { + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((SUCCEED == ret), "H5Fcreate"); + ret = H5Fclose(fid); + VRFY((SUCCEED == ret), "H5Fclose"); + } + + VRFY((MPI_SUCCESS == MPI_Barrier(MPI_COMM_WORLD)), "MPI_Barrier"); + + /* test on multiple ranks if we have them */ + if (mpi_size > 1) { + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((SUCCEED == ret), "H5Pset_fapl_mpio"); + + H5E_BEGIN_TRY + { + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + } + H5E_END_TRY + VRFY((fid == H5I_INVALID_HID), "H5Fcreate"); + } + + ret = H5Pclose(fapl_id); + VRFY((SUCCEED == ret), "H5Pclose"); +} + +/* + * Verify that MPI I/O hints are preserved after closing the file access property list + * as described in issue #3025 + * This is a test program from the user. + */ +void +test_fapl_preserve_hints(void) +{ + const char *filename; + const char *key = "hdf_info_fapl"; + const char *value = "xyz"; + MPI_Info info_used = MPI_INFO_NULL; + MPI_Info info = MPI_INFO_NULL; + hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ + hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ + char key_used[MPI_MAX_INFO_KEY + 1]; + char *value_used = NULL; + bool same = false; + int flag = -1; + int nkeys_used; + int i; + int mpi_ret; /* MPI return value */ + herr_t ret; /* Generic return value */ + + filename = (const char *)GetTestParameters(); + + value_used = malloc(MPI_MAX_INFO_VAL + 1); + VRFY(value_used, "malloc succeeded"); + + /* set up MPI parameters */ + mpi_ret = MPI_Info_create(&info); + VRFY((mpi_ret >= 0), "MPI_Info_create succeeded"); + + mpi_ret = MPI_Info_set(info, key, value); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set succeeded"); + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); + + ret = H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, info); + VRFY((ret >= 0), "H5Pset_fapl_mpio"); + + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); + + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + + fapl_id = H5Fget_access_plist(fid); + VRFY((fapl_id != H5I_INVALID_HID), "H5Fget_access_plist succeeded"); + + ret = H5Pget_fapl_mpio(fapl_id, NULL, &info_used); + VRFY((ret >= 0), "H5Pget_fapl_mpio succeeded"); + + VRFY((info_used != MPI_INFO_NULL), "H5Pget_fapl_mpio"); + + mpi_ret = MPI_Info_get_nkeys(info_used, &nkeys_used); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_get_nkeys succeeded"); + + /* Loop over the # of keys */ + for (i = 0; i < nkeys_used; i++) { + + /* Memset the buffers to zero */ + memset(key_used, 0, MPI_MAX_INFO_KEY + 1); + memset(value_used, 0, MPI_MAX_INFO_VAL + 1); + + /* Get the nth key */ + mpi_ret = MPI_Info_get_nthkey(info_used, i, key_used); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_get_nthkey succeeded"); + + if (!strcmp(key_used, key)) { + mpi_ret = MPI_Info_get(info_used, key_used, MPI_MAX_INFO_VAL, value_used, &flag); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_get succeeded"); + + if (!strcmp(value_used, value)) { + + /* Both key_used and value_used are the same */ + same = true; + break; + } + } + } /* end for */ + + VRFY((same == true), "key_used and value_used are the same"); + + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Free the MPI info object */ + mpi_ret = MPI_Info_free(&info); + VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + + mpi_ret = MPI_Info_free(&info_used); + VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + + free(value_used); + +} /* end test_fapl_preserve_hints() */ diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 0f08be9344d..6c054085ed7 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -69,10 +69,11 @@ typedef enum num_chunks_written_t { typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode); -static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options); -static void verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id, - num_chunks_written_t chunks_written); -static void verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id); +static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options); +static void verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id, + num_chunks_written_t chunks_written); +static void verify_chunk_opt_status(size_t num_dsets, test_mode_t test_mode, bool any_io, bool any_filters, + bool collective, bool unalloc_read, bool did_alloc, hid_t dxpl_id); static const char *test_mode_to_string(test_mode_t test_mode); static void create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t filespace_id, @@ -80,9 +81,11 @@ static void create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t ty static void open_datasets(hid_t parent_obj_id, const char *dset_name, size_t num_dsets, test_mode_t test_mode, hid_t *dset_ids); static void write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, - hid_t *fspace_ids, hid_t dxpl_id, const void **bufs, test_mode_t test_mode); + hid_t *fspace_ids, hid_t dcpl_id, hid_t dxpl_id, const void **bufs, + test_mode_t test_mode, bool any_io, bool collective, bool overwrite); static void read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t fspace_id, - hid_t dxpl_id, void **bufs, test_mode_t test_mode); + hid_t dcpl_id, hid_t dxpl_id, void **bufs, test_mode_t test_mode, bool any_io, + bool collective, bool all_uninit_read); static void select_hyperslab(size_t num_dsets, hid_t *dset_ids, hsize_t *start, hsize_t *stride, hsize_t *count, hsize_t *block, hid_t *fspace_ids); @@ -471,11 +474,15 @@ verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id, * I/O was performed. */ static void -verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id) +verify_chunk_opt_status(size_t num_dsets, test_mode_t test_mode, bool any_io, bool any_filters, + bool collective, bool unalloc_read, bool did_alloc, hid_t dxpl_id) { H5D_mpio_actual_chunk_opt_mode_t chunk_opt_mode; H5D_selection_io_mode_t sel_io_mode; + uint32_t actual_sel_io_mode; + uint32_t actual_sel_io_mode_reduced; uint32_t no_sel_io_cause = 0; + int mpi_code; herr_t ret; if (H5P_DEFAULT != dxpl_id) { @@ -528,6 +535,95 @@ verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id) "verified I/O optimization was linked-chunk I/O"); } } + + /* Verify actual selection I/O mode */ + ret = H5Pget_actual_selection_io_mode(dxpl_id, &actual_sel_io_mode); + VRFY((ret >= 0), "H5Pget_actual_selection_io_mode succeeded"); + + /* Reduce results to process 0 (bitwise OR so we get all I/O types) */ + mpi_code = + MPI_Reduce(&actual_sel_io_mode, &actual_sel_io_mode_reduced, 1, MPI_UINT32_T, MPI_BOR, 0, comm); + VRFY((MPI_SUCCESS == mpi_code), "MPI_Reduce succeeded"); + + /* Verify selection I/O mode on rank 0 */ + if (mpi_rank == 0) { + /* No actual I/O performed, only reported I/O will be from allocation, even if "no" datasets were + * involved (num_dsets == 0 implies the call was expected to fail, but it fails after allocation). + * Also if the test mode is mixed filtered and unfiltered and the call did not fail, then there + * will always be an I/O callback made with raw data. This is because unfiltered datasets fall + * back to scalar I/O when mixed with filtered, and scalar I/O reports an I/O call was made even + * with a size of 0 bytes, while vector I/O does not report I/O was made if passed 0 vector + * elements (because no elements were raw data), which is what happens when performing I/O on a + * filtered dataset with no selection. Vector I/O does report an I/O call was made if passed a raw + * data element of size 0, so this is consistent. */ + if (!any_io) { + if (did_alloc || (num_dsets > 0 && test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + VRFY(H5D_SCALAR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar I/O"); + else + VRFY(0 == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was 0 (no I/O)"); + } + /* No filters, library should have used selection I/O if enabled, scalar I/O otherwise */ + else if (!any_filters) { + assert(!unalloc_read && !did_alloc); + if (sel_io_mode == H5D_SELECTION_IO_MODE_DEFAULT || sel_io_mode == H5D_SELECTION_IO_MODE_ON) + VRFY(H5D_SELECTION_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was selection I/O"); + else + VRFY(H5D_SCALAR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar I/O"); + } + /* Independent I/O, library should have done no I/O if reading from unallocated datasets, scalar + * I/O otherwise, since filtered I/O is only supported with scalar I/O in independent/serial */ + else if (!collective) { + if (unalloc_read) + VRFY(0 == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was 0 (no I/O)"); + else + VRFY(H5D_SCALAR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar I/O"); + } + else + switch (test_mode) { + case USE_SINGLE_DATASET: + case USE_MULTIPLE_DATASETS: + /* Collective case with only filtered datasets. If we performed allocation then there + * should be scalar I/O for allocation in addition to vector I/O for the actual data. + * If we're reading from an unallocated dataset then there should be no actual I/O. + * Otherwise there should only be vector I/O. */ + if (did_alloc) + VRFY((H5D_SCALAR_IO | H5D_VECTOR_IO) == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar and vector I/O"); + else if (unalloc_read) + VRFY(0 == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was 0 (no I/O)"); + else + VRFY(H5D_VECTOR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was vector I/O"); + break; + + case USE_MULTIPLE_DATASETS_MIXED_FILTERED: + /* Collective case with mixed filtered and unfiltered datasets. If we're reading from + * a unallocated datasets then there should be scalar I/O from reading the unfilitered + * datasets, since they are always allocated in parallel. Otherwise there should be + * vector I/O from the filtered datasets and scalar I/O from the unfiltered datasets. + */ + if (unalloc_read) + VRFY(H5D_SCALAR_IO == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar I/O"); + else + VRFY((H5D_SCALAR_IO | H5D_VECTOR_IO) == actual_sel_io_mode_reduced, + "verified actual selection I/O mode was scalar and vector I/O"); + break; + + case TEST_MODE_SENTINEL: + default: + printf("Invalid test mode\n"); + fflush(stdout); + MPI_Abort(MPI_COMM_WORLD, -1); + } + } } } @@ -576,11 +672,21 @@ create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t case USE_MULTIPLE_DATASETS: case USE_MULTIPLE_DATASETS_MIXED_FILTERED: dset_name_ptr = dset_name_multi_buf; - n_dsets = (rand() % (MAX_NUM_DSETS_MULTI - 1)) + 2; + + if (MAINPROCESS) + n_dsets = (rand() % (MAX_NUM_DSETS_MULTI - 1)) + 2; + + if (mpi_size > 1) + VRFY((MPI_SUCCESS == MPI_Bcast(&n_dsets, 1, MPI_INT, 0, comm)), "MPI_Bcast succeeded"); /* Select between 1 and (n_dsets - 1) datasets to be unfiltered */ if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) { - n_unfiltered = (rand() % (n_dsets - 1)) + 1; + if (MAINPROCESS) + n_unfiltered = (rand() % (n_dsets - 1)) + 1; + + if (mpi_size > 1) + VRFY((MPI_SUCCESS == MPI_Bcast(&n_unfiltered, 1, MPI_INT, 0, comm)), + "MPI_Bcast succeeded"); unfiltered_dcpl = H5Pcopy(dcpl_id); VRFY((unfiltered_dcpl >= 0), "H5Pcopy succeeded"); @@ -621,7 +727,11 @@ create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t * remaining datasets as unfiltered datasets. Otherwise, * randomly determine if a dataset will be unfiltered. */ - unfiltered = ((size_t)n_unfiltered == dsets_left) || ((rand() % 2) == 0); + if (MAINPROCESS) + unfiltered = ((size_t)n_unfiltered == dsets_left) || ((rand() % 2) == 0); + + if (mpi_size > 1) + VRFY((MPI_SUCCESS == MPI_Bcast(&unfiltered, 1, MPI_C_BOOL, 0, comm)), "MPI_Bcast succeeded"); if (unfiltered) { curr_dcpl = unfiltered_dcpl; @@ -693,10 +803,12 @@ open_datasets(hid_t parent_obj_id, const char *dset_name, size_t num_dsets, test */ static void write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t *fspace_ids, - hid_t dxpl_id, const void **bufs, test_mode_t test_mode) + hid_t dcpl_id, hid_t dxpl_id, const void **bufs, test_mode_t test_mode, bool any_io, + bool collective, bool overwrite) { - hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; - hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; + H5D_alloc_time_t alloc_time = H5D_ALLOC_TIME_DEFAULT; for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { mem_type_ids[dset_idx] = type_id; @@ -724,7 +836,11 @@ write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id MPI_Abort(MPI_COMM_WORLD, -1); } - verify_chunk_opt_status(num_dsets, dxpl_id); + if (!overwrite) + VRFY(H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0, "H5Pget_alloc_time succeeded"); + + verify_chunk_opt_status(num_dsets, test_mode, any_io, true, collective, false, + !overwrite && (alloc_time == H5D_ALLOC_TIME_LATE), dxpl_id); } /* @@ -733,11 +849,13 @@ write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id */ static void read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t fspace_id, - hid_t dxpl_id, void **bufs, test_mode_t test_mode) + hid_t dcpl_id, hid_t dxpl_id, void **bufs, test_mode_t test_mode, bool any_io, bool collective, + bool all_uninit_read) { - hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; - hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; - hid_t file_space_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_type_ids[MAX_NUM_DSETS_MULTI]; + hid_t mem_space_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_space_ids[MAX_NUM_DSETS_MULTI]; + H5D_alloc_time_t alloc_time = H5D_ALLOC_TIME_DEFAULT; for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { mem_type_ids[dset_idx] = type_id; @@ -766,7 +884,13 @@ read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, MPI_Abort(MPI_COMM_WORLD, -1); } - verify_chunk_opt_status(num_dsets, dxpl_id); + if (all_uninit_read) + VRFY(H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0, "H5Pget_alloc_time succeeded"); + + verify_chunk_opt_status(num_dsets, test_mode, any_io, true, collective, + all_uninit_read && + (alloc_time == H5D_ALLOC_TIME_INCR || alloc_time == H5D_ALLOC_TIME_LATE), + false, dxpl_id); } static void @@ -940,8 +1064,8 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -973,7 +1097,8 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil (C_DATATYPE)dset_idx; } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -1094,8 +1219,8 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -1124,7 +1249,8 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -1201,6 +1327,12 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* + * Since we're only doing a partial write to the dataset, make + * sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); @@ -1245,8 +1377,8 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -1283,7 +1415,8 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil } } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -1404,8 +1537,8 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -1436,7 +1569,8 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -1485,6 +1619,7 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group hsize_t block[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -1551,7 +1686,12 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group read_bufs[dset_idx] = tmp_buf; } - for (size_t i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ @@ -1567,8 +1707,8 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -1582,15 +1722,15 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Verify the correct data was written */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), "Data verification succeeded"); - if (i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS - 1) { + if (i < num_loops - 1) { /* Extend the dataset(s) by count[1] chunks in the extensible dimension */ dataset_dims[1] += count[1] * block[1]; @@ -1646,6 +1786,7 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H hsize_t block[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -1712,7 +1853,12 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H read_bufs[dset_idx] = tmp_buf; } - for (size_t i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { /* Each process defines the dataset selection in memory and writes * it to the hyperslab in the file */ @@ -1727,8 +1873,8 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -1742,15 +1888,15 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Verify the correct data was written */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), "Data verification succeeded"); - if (i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS - 1) { + if (i < num_loops - 1) { /* Extend the dataset(s) by count[1] chunks in the extensible dimension */ dataset_dims[1] += count[1] * block[1]; @@ -1808,6 +1954,7 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, hsize_t block[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -1855,7 +2002,12 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - for (size_t i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { /* Set selected dimensions */ sel_dims[0] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS; sel_dims[1] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS; @@ -1893,8 +2045,8 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -1908,15 +2060,15 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Verify the correct data was written */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), "Data verification succeeded"); - if (i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS - 1) { + if (i < num_loops - 1) { /* * Extend the dataset(s) by the size of one chunk per rank * in the first extensible dimension. Extend the dataset(s) @@ -1977,6 +2129,7 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 hsize_t block[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -2024,7 +2177,12 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); - for (size_t i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { /* Set selected dimensions */ sel_dims[0] = (i + 1); sel_dims[1] = (i + 1) * (size_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; @@ -2062,8 +2220,8 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -2077,15 +2235,15 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5 for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Verify the correct data was written */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), "Data verification succeeded"); - if (i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS - 1) { + if (i < num_loops - 1) { /* Extend the dataset(s) by the size of a chunk in each extensible dimension */ dataset_dims[0] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS; dataset_dims[1] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; @@ -2131,28 +2289,31 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; - const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; - void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ - void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; - hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t start[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t data_size, correct_buf_size; - size_t num_dsets; - hid_t dset_ids[MAX_NUM_DSETS_MULTI]; - hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; - hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID; + H5D_alloc_time_t alloc_time; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to filtered chunks with a single process having no selection"); + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -2177,6 +2338,12 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* + * Since we're only doing a partial write to the dataset, make + * sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); @@ -2226,8 +2393,8 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi } } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, mpi_size > 1, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2267,7 +2434,8 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi } } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, mpi_size == 1 && alloc_time == H5D_ALLOC_TIME_INCR); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -2305,23 +2473,26 @@ static void test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode) { - C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; - const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; - void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ - void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; - hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - size_t data_size, correct_buf_size; - size_t num_dsets; - hid_t dset_ids[MAX_NUM_DSETS_MULTI]; - hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; - hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID; + H5D_alloc_time_t alloc_time; + C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0}; + const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0}; + void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */ + void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; + hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t data_size, correct_buf_size; + size_t num_dsets; + hid_t dset_ids[MAX_NUM_DSETS_MULTI]; + hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; + hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing write to filtered chunks with all processes having no selection"); + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); VRFY((file_id >= 0), "Test file open succeeded"); @@ -2341,6 +2512,12 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* + * Since we're doing a no-op write to the dataset, + * make sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); @@ -2372,8 +2549,8 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, false, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2397,7 +2574,8 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, alloc_time == H5D_ALLOC_TIME_INCR); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -2514,8 +2692,8 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2547,7 +2725,8 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -2673,8 +2852,8 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2715,7 +2894,8 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -2852,8 +3032,8 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z /* Set data transform expression */ VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded"); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, plist_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, plist_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -2887,7 +3067,8 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3016,8 +3197,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3046,7 +3227,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou (C_DATATYPE)((j % (hsize_t)mpi_size) + (j / (hsize_t)mpi_size) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3176,8 +3358,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3206,7 +3388,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3333,8 +3516,8 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3380,7 +3563,8 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3524,7 +3708,8 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, data_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3557,7 +3742,8 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group } } - read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, test_mode, + true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3702,7 +3888,8 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, data_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -3738,7 +3925,8 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, } } - read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, test_mode, + true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -3797,6 +3985,7 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro hid_t group_id = H5I_INVALID_HID; hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; + H5D_alloc_time_t alloc_time; if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype " @@ -3831,6 +4020,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); + /* Retrieve allocation time */ + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -3915,7 +4107,7 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro * of the H5Dwrite loop: */ /* write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, - dxpl_id, data_bufs, test_mode); */ + dcpl_id, dxpl_id, data_bufs, test_mode, true, true, false); */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { herr_t expected = FAIL; herr_t ret; @@ -3958,9 +4150,10 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro VRFY((ret == expected), "Dataset write"); if (expected == SUCCEED) - verify_chunk_opt_status(1, dxpl_id); + verify_chunk_opt_status(1, test_mode, true, false, true, false, false, dxpl_id); else - verify_chunk_opt_status(0, dxpl_id); + verify_chunk_opt_status(0, test_mode, false, true, true, false, alloc_time == H5D_ALLOC_TIME_LATE, + dxpl_id); } for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) @@ -3986,7 +4179,11 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + /* If some writes succeeded (due to mixed filtered mode) or if allocation time is late, then there is data + * on disk to be read */ + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, test_mode, + true, false, + !(test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED || alloc_time == H5D_ALLOC_TIME_LATE)); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { hid_t dset_dcpl; @@ -4066,6 +4263,7 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group hid_t group_id = H5I_INVALID_HID; hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; hid_t filespace = H5I_INVALID_HID; + H5D_alloc_time_t alloc_time; if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion"); @@ -4099,6 +4297,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); + /* Retrieve allocation time */ + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + /* Create chunked dataset */ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); @@ -4183,7 +4384,7 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group * of the H5Dwrite loop: */ /* write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, - dxpl_id, data_bufs, test_mode); */ + dcpl_id, dxpl_id, data_bufs, test_mode, true, true, false); */ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { herr_t expected = FAIL; herr_t ret; @@ -4226,9 +4427,10 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group VRFY((ret == expected), "Dataset write"); if (expected == SUCCEED) - verify_chunk_opt_status(1, dxpl_id); + verify_chunk_opt_status(1, test_mode, true, false, true, false, false, dxpl_id); else - verify_chunk_opt_status(0, dxpl_id); + verify_chunk_opt_status(0, test_mode, false, true, true, false, alloc_time == H5D_ALLOC_TIME_LATE, + dxpl_id); } for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) @@ -4254,7 +4456,11 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + /* If some writes succeeded (due to mixed filtered mode) or if allocation time is late, then there is data + * on disk to be read */ + read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, test_mode, + true, false, + !(test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED || alloc_time == H5D_ALLOC_TIME_LATE)); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { hid_t dset_dcpl; @@ -4419,8 +4625,8 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -4475,8 +4681,8 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -4639,8 +4845,8 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -4695,8 +4901,8 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -4861,8 +5067,8 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -4917,8 +5123,8 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -5107,8 +5313,8 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -5173,8 +5379,8 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil } } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, mpi_size > 1 ? true : false, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -5250,7 +5456,6 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter void *read_bufs[MAX_NUM_DSETS_MULTI] = {0}; hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; - hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; size_t data_size, read_buf_size; size_t num_dsets; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; @@ -5339,8 +5544,8 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -5368,8 +5573,6 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter open_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - sel_dims[0] = sel_dims[1] = 0; - select_none(num_dsets, dset_ids, fspace_ids); read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); @@ -5385,8 +5588,8 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(data_bufs_nc[dset_idx], 0, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, false, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -5529,8 +5732,8 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_ select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -5586,8 +5789,8 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -5784,8 +5987,8 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -5842,8 +6045,8 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -6025,8 +6228,8 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -6089,8 +6292,8 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -6282,8 +6485,8 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); @@ -6349,8 +6552,8 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -6518,8 +6721,8 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -6581,8 +6784,8 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5 VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -6765,8 +6968,8 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -6826,8 +7029,8 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -7035,7 +7238,8 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, data_bufs, + test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -7091,7 +7295,8 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -7285,7 +7490,8 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, data_bufs, + test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -7341,7 +7547,8 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -7538,7 +7745,8 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, data_bufs, + test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -7594,7 +7802,8 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, read_bufs, + test_mode, true, false, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -7797,7 +8006,8 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode); + write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, data_bufs, + test_mode, true, false, false); /* Verify space allocation status */ plist_id = H5Dget_create_plist(dset_ids[0]); @@ -7853,7 +8063,8 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); } - read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, read_bufs, + test_mode, true, false, false); /* Collect each piece of data from all ranks into a global buffer on all ranks */ global_buf = calloc(1, data_size); @@ -8006,8 +8217,8 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id select_all(num_dsets, dset_ids, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, dcpl_id, H5P_DEFAULT, + data_bufs, test_mode, true, false, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -8051,7 +8262,8 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id open_datasets(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -8177,8 +8389,8 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) free(data_bufs_nc[dset_idx]); @@ -8228,8 +8440,8 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx); } - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, H5P_DEFAULT, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)), @@ -8275,6 +8487,7 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; size_t data_size; size_t num_dsets; + size_t num_loops; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; @@ -8352,7 +8565,12 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, read_bufs[dset_idx] = tmp_buf; } - for (size_t i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) { + /* Determine number of loops to run through */ + num_loops = SHRINKING_GROWING_CHUNKS_NLOOPS; + if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) + num_loops /= 2; + + for (size_t i = 0; i < num_loops; i++) { for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { /* Continually write random float data, followed by zeroed-out data */ if (i % 2) @@ -8366,8 +8584,8 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, } } - write_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, i > 0); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -8381,8 +8599,8 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, } } - read_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8511,8 +8729,8 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi read_bufs[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, @@ -8525,8 +8743,8 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi /* Verify the correct data was written */ open_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8544,6 +8762,12 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); + /* + * Since we're only doing a partial write to the dataset, make + * sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); /* Create datasets depending on the current test mode */ @@ -8570,8 +8794,8 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, @@ -8587,8 +8811,8 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8717,8 +8941,8 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t read_bufs[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -8729,8 +8953,8 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t /* Verify the correct data was written */ open_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8748,6 +8972,12 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t filespace = H5Screate_simple(WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL); VRFY((filespace >= 0), "File dataspace creation succeeded"); + /* + * Since we're only doing a partial write to the dataset, make + * sure the fill time is set appropriately + */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); /* Create datasets depending on the current test mode */ @@ -8775,8 +9005,8 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -8790,8 +9020,8 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) memset(read_bufs[dset_idx], 255, data_size); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dcpl_id, dxpl_id, + read_bufs, test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)), @@ -8870,6 +9100,9 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* Make sure the fill time is set appropriately */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, FILL_VALUES_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); /* Add test filter to the pipeline */ @@ -8899,7 +9132,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id } /* Read entire dataset and verify that the fill value is returned */ - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, true); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) @@ -8941,8 +9175,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -8953,7 +9187,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id /* Verify correct data was written */ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); /* * Each MPI rank communicates their written piece of data @@ -9001,8 +9236,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, true); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -9013,7 +9248,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id /* Verify correct data was written */ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { C_DATATYPE *tmp_buf = read_bufs[dset_idx]; @@ -9046,7 +9282,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); /* Read entire dataset and verify that the fill value is returned */ - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, true); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++) @@ -9081,8 +9318,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx); } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -9093,7 +9330,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id /* Verify correct data was written */ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t i = 0; i < (size_t)mpi_size; i++) { recvcounts[i] = (int)(count[1] * block[1]); @@ -9135,8 +9373,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, true); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -9147,7 +9385,8 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id /* Verify correct data was written */ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { C_DATATYPE *tmp_buf = read_bufs[dset_idx]; @@ -9231,6 +9470,9 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ plist_id = H5Pcopy(dcpl_id); VRFY((plist_id >= 0), "DCPL copy succeeded"); + /* Make sure the fill time is set appropriately */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded"); + VRFY((H5Pset_chunk(plist_id, FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); /* Add test filter to the pipeline */ @@ -9283,8 +9525,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ * allocation in parallel, so the read should succeed in that case. */ if (alloc_time == H5D_ALLOC_TIME_EARLY) { - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, - test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, true); } else { for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { @@ -9322,9 +9564,11 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ VRFY((ret == expected), "Dataset write"); if (expected == SUCCEED) - verify_chunk_opt_status(1, dxpl_id); + verify_chunk_opt_status(1, test_mode, true, false, true, false, false, dxpl_id); else - verify_chunk_opt_status(0, dxpl_id); + verify_chunk_opt_status( + 0, test_mode, false, true, true, + alloc_time == H5D_ALLOC_TIME_INCR || alloc_time == H5D_ALLOC_TIME_LATE, false, dxpl_id); } } @@ -9358,8 +9602,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -9369,7 +9613,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); @@ -9393,8 +9638,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, true); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -9405,7 +9650,8 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_ /* Verify correct data was written */ open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { free(data_bufs_nc[dset_idx]); @@ -9449,12 +9695,10 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap size_t num_dsets; hid_t dset_ids[MAX_NUM_DSETS_MULTI]; hid_t fspace_ids[MAX_NUM_DSETS_MULTI]; - hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; - hid_t group_id = H5I_INVALID_HID; - hid_t filespace = H5I_INVALID_HID; - int *recvcounts = NULL; - int *displs = NULL; - int mpi_code; + hid_t file_id = H5I_INVALID_HID; + hid_t plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; if (MAINPROCESS) puts("Testing fill time H5D_FILL_TIME_NEVER"); @@ -9504,7 +9748,7 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); /* Set a fill value */ - fill_value = FILL_VALUES_TEST_FILL_VAL; + fill_value = FILL_TIME_NEVER_TEST_FILL_VAL; VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set"); /* Set fill time of 'never' */ @@ -9519,6 +9763,21 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + /* Allocate buffer for reading entire dataset */ + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); + + for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { + read_bufs[dset_idx] = calloc(1, read_buf_size); + VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); + } + + /* Allocate buffer of fill values */ + fill_buf = calloc(1, read_buf_size); + VRFY((NULL != fill_buf), "calloc succeeded"); + + for (size_t i = 0; i < read_buf_size / sizeof(C_DATATYPE); i++) + fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL; + /* * Since we aren't writing fill values to the chunks of the * datasets we just created, close and re-open file to ensure @@ -9538,37 +9797,22 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - /* Allocate buffer for reading entire dataset */ - read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE); - - for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { - read_bufs[dset_idx] = calloc(1, read_buf_size); - VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded"); - } - - fill_buf = calloc(1, read_buf_size); - VRFY((NULL != fill_buf), "calloc succeeded"); - - /* Read entire dataset and verify that the fill value isn't returned */ - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - - for (size_t i = 0; i < read_buf_size / sizeof(C_DATATYPE); i++) - fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL; - /* - * It should be very unlikely for the dataset's random - * values to all be the fill value, so this should be - * a safe comparison in theory. + * Read entire dataset just to try to verify bad behavior doesn't + * occur. Don't attempt to verify the contents of the read buffer(s) + * yet, because there's no guarantee as to what may have been + * read from the dataset. */ - for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) - VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded"); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, true); /* * Write to part of the first chunk in the dataset with - * all ranks, then read the whole dataset and ensure that - * the fill value isn't returned for the unwritten part of - * the chunk, as well as for the rest of the dataset that - * hasn't been written to yet. + * all ranks, then read the whole dataset just to try to + * verify bad behavior doesn't occur. Don't attempt to + * verify the contents of the read buffer(s) yet, because + * there's no guarantee as to what may have been read from + * the dataset. */ count[0] = 1; count[1] = 1; @@ -9595,8 +9839,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap data_bufs_nc[dset_idx] = tmp_buf; } - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN); @@ -9607,35 +9851,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap /* Verify correct data was written */ open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); - - /* - * Each MPI rank communicates their written piece of data - * into each other rank's correctness-checking buffer - */ - recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts)); - VRFY((NULL != recvcounts), "calloc succeeded"); - - displs = calloc(1, (size_t)mpi_size * sizeof(*displs)); - VRFY((NULL != displs), "calloc succeeded"); - - for (size_t i = 0; i < (size_t)mpi_size; i++) { - recvcounts[i] = (int)(count[1] * block[1]); - displs[i] = (int)(i * dataset_dims[1]); - } - - for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { - mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf, - recvcounts, displs, C_DATATYPE_MPI, comm); - VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded"); - - /* - * It should be very unlikely for the dataset's random - * values to all be the fill value, so this should be - * a safe comparison in theory. - */ - VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded"); - } + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded"); @@ -9659,8 +9876,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids); - write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, - test_mode); + write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dcpl_id, dxpl_id, + data_bufs, test_mode, true, true, false); /* Verify space allocation status */ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN); @@ -9671,7 +9888,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap /* Verify correct data was written */ open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids); - read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode); + read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dcpl_id, dxpl_id, read_bufs, + test_mode, true, true, false); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { C_DATATYPE *tmp_buf = read_bufs[dset_idx]; @@ -9680,9 +9898,6 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap VRFY((tmp_buf[j] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded"); } - free(displs); - free(recvcounts); - free(fill_buf); for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) { @@ -9707,6 +9922,7 @@ int main(int argc, char **argv) { unsigned seed; + double total_test_time = 0.0; size_t cur_filter_idx = 0; size_t num_filters = 0; hid_t file_id = H5I_INVALID_HID; @@ -9798,8 +10014,13 @@ main(int argc, char **argv) srand(seed); - if (MAINPROCESS) - printf("Using seed: %u\n\n", seed); + /* Print test settings */ + if (MAINPROCESS) { + printf("Test Info:\n"); + printf(" MPI size: %d\n", mpi_size); + printf(" Test express level: %d\n", test_express_level_g); + printf(" Using seed: %u\n\n", seed); + } num_filters = ARRAY_SIZE(filterIDs); @@ -9821,7 +10042,13 @@ main(int argc, char **argv) fcpl_id = H5Pcreate(H5P_FILE_CREATE); VRFY((fcpl_id >= 0), "FCPL creation succeeded"); - VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, true, 1) >= 0), + /* + * TODO: Ideally, use persistent free space management. However, + * this occasionally runs into an infinite loop in the library's + * free space management code, so don't persist free space for now + * until that is fixed. + */ + VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, false, 1) >= 0), "H5Pset_file_space_strategy succeeded"); VRFY((h5_fixname(FILENAME[0], fapl_id, filenames[0], sizeof(filenames[0])) != NULL), @@ -9884,6 +10111,8 @@ main(int argc, char **argv) const char *alloc_time; const char *mode; unsigned filter_config; + double start_time = 0.0; + double end_time = 0.0; char group_name[512]; switch (sel_io_mode) { @@ -9948,7 +10177,20 @@ main(int argc, char **argv) continue; } - if (MAINPROCESS) + /* + * If TestExpress is > 1, only run the multi-chunk I/O + * configuration tests for the 'USE_SINGLE_DATASET' case, + * as the 'USE_MULTIPLE_DATASETS' and 'USE_MULTIPLE_DATASETS_MIXED_FILTERED' + * cases are more stressful on the file system. + */ + if (test_express_level_g > 1) { + if (((test_mode == USE_MULTIPLE_DATASETS) || + (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) && + (chunk_opt != H5FD_MPIO_CHUNK_ONE_IO)) + continue; + } + + if (MAINPROCESS) { printf("== Running tests in mode '%s' with filter '%s' using selection I/O mode " "'%s', '%s' and '%s' allocation time ==\n\n", test_mode_to_string(test_mode), filterNames[cur_filter_idx], sel_io_str, @@ -9956,6 +10198,9 @@ main(int argc, char **argv) : "Multi-Chunk I/O", alloc_time); + start_time = MPI_Wtime(); + } + /* Get the current filter's info */ VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0), "H5Zget_filter_info succeeded"); @@ -9981,6 +10226,15 @@ main(int argc, char **argv) VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded"); + /* + * Disable writing of fill values by default. Otherwise, a + * lot of time may be spent writing fill values to chunks + * when they're going to be fully overwritten anyway. + * Individual tests will alter this behavior as necessary. + */ + VRFY((H5Pset_fill_time(dcpl_id, H5D_FILL_TIME_NEVER) >= 0), + "H5Pset_fill_time succeeded"); + /* Create a group to hold all the datasets for this combination * of filter and chunk optimization mode. Then, close the file * again since some tests may need to open the file in a special @@ -10018,6 +10272,12 @@ main(int argc, char **argv) if (MAINPROCESS) puts(""); + + if (MAINPROCESS) { + end_time = MPI_Wtime(); + total_test_time += end_time - start_time; + printf("Tests took %f seconds\n\n", end_time - start_time); + } } } } @@ -10041,7 +10301,7 @@ main(int argc, char **argv) goto exit; if (MAINPROCESS) - puts("All Parallel Filters tests passed\n"); + printf("All Parallel Filters tests passed - total test time was %f seconds\n", total_test_time); exit: if (nerrors) diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h index c0b1db878f9..04d36395dbc 100644 --- a/testpar/t_filters_parallel.h +++ b/testpar/t_filters_parallel.h @@ -444,7 +444,7 @@ typedef struct { #define SHRINKING_GROWING_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) #define SHRINKING_GROWING_CHUNKS_CH_NROWS (SHRINKING_GROWING_CHUNKS_NROWS / mpi_size) #define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size) -#define SHRINKING_GROWING_CHUNKS_NLOOPS 20 +#define SHRINKING_GROWING_CHUNKS_NLOOPS 8 /* Defines for the unshared filtered edge chunks write test */ #define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "unshared_filtered_edge_chunks_write" diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c index eff39d057e0..0f1e27b506e 100644 --- a/testpar/t_mpi.c +++ b/testpar/t_mpi.c @@ -53,14 +53,14 @@ test_mpio_overlap_writes(char *filename) MPI_Offset mpi_off; MPI_Status mpi_stat; - if (VERBOSE_MED) - printf("MPIO independent overlapping writes test on file %s\n", filename); - nerrs = 0; /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + if (VERBOSE_MED && MAINPROCESS) + printf("MPIO independent overlapping writes test on file %s\n", filename); + /* Need at least 2 processes */ if (mpi_size < 2) { if (MAINPROCESS) @@ -211,7 +211,7 @@ test_mpio_gb_file(char *filename) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - if (VERBOSE_MED) + if (VERBOSE_MED && MAINPROCESS) printf("MPI_Offset range test\n"); /* figure out the signness and sizeof MPI_Offset */ @@ -274,12 +274,13 @@ test_mpio_gb_file(char *filename) /* * Verify if we can write to a file of multiple GB sizes. */ - if (VERBOSE_MED) + if (VERBOSE_MED && MAINPROCESS) printf("MPIO GB file test %s\n", filename); if (sizeof_mpi_offset <= 4) { - printf("Skipped GB file range test " - "because MPI_Offset cannot support it\n"); + if (MAINPROCESS) + printf("Skipped GB file range test " + "because MPI_Offset cannot support it\n"); } else { buf = (char *)malloc(MB); @@ -294,7 +295,8 @@ test_mpio_gb_file(char *filename) mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh); VRFY((mrc == MPI_SUCCESS), "MPI_FILE_OPEN"); - printf("MPIO GB file write test %s\n", filename); + if (MAINPROCESS) + printf("MPIO GB file write test %s\n", filename); /* instead of writing every bytes of the file, we will just write * some data around the 2 and 4 GB boundaries. That should cover @@ -333,7 +335,8 @@ test_mpio_gb_file(char *filename) */ /* open it again to verify the data written */ /* but only if there was no write errors */ - printf("MPIO GB file read test %s\n", filename); + if (MAINPROCESS) + printf("MPIO GB file read test %s\n", filename); if (errors_sum(writerrs) > 0) { printf("proc %d: Skip read test due to previous write errors\n", mpi_rank); goto finish; @@ -377,7 +380,8 @@ test_mpio_gb_file(char *filename) mrc = MPI_Barrier(MPI_COMM_WORLD); VRFY((mrc == MPI_SUCCESS), "Sync before leaving test"); - printf("Test if MPI_File_get_size works correctly with %s\n", filename); + if (MAINPROCESS) + printf("Test if MPI_File_get_size works correctly with %s\n", filename); mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh); VRFY((mrc == MPI_SUCCESS), ""); @@ -432,7 +436,6 @@ test_mpio_gb_file(char *filename) static int test_mpio_1wMr(char *filename, int special_request) { - char hostname[128]; int mpi_size, mpi_rank; MPI_File fh; char mpi_err_str[MPI_MAX_ERROR_STRING]; @@ -456,19 +459,8 @@ test_mpio_1wMr(char *filename, int special_request) } /* show the hostname so that we can tell where the processes are running */ - if (VERBOSE_DEF) { -#ifdef H5_HAVE_GETHOSTNAME - if (gethostname(hostname, sizeof(hostname)) < 0) { - printf("gethostname failed\n"); - hostname[0] = '\0'; - } -#else - printf("gethostname unavailable\n"); - hostname[0] = '\0'; -#endif - PRINTID; - printf("hostname=%s\n", hostname); - } + if (VERBOSE_DEF) + h5_show_hostname(); /* Delete any old file in order to start anew. */ /* Must delete because MPI_File_open does not have a Truncate mode. */ @@ -1005,6 +997,10 @@ test_mpio_special_collective(char *filename) static int parse_options(int argc, char **argv) { + int mpi_rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + while (--argc) { if (**(++argv) != '-') { break; @@ -1053,7 +1049,7 @@ parse_options(int argc, char **argv) return (1); } H5Pclose(plist); - if (VERBOSE_MED) { + if (VERBOSE_MED && MAINPROCESS) { printf("Test filenames are:\n"); for (i = 0; i < n; i++) printf(" %s\n", filenames[i]); diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c index 95ad1257801..e1dce1bbfd7 100644 --- a/testpar/t_pflush2.c +++ b/testpar/t_pflush2.c @@ -20,7 +20,8 @@ #include "h5test.h" -static const char *FILENAME[] = {"flush", "noflush", NULL}; +static const char *FLUSH_FILENAME[] = {"flush", NULL}; +static const char *NOFLUSH_FILENAME[] = {"noflush", NULL}; static int *data_g = NULL; @@ -173,7 +174,7 @@ main(int argc, char *argv[]) goto error; /* Check the case where the file was flushed */ - h5_fixname(FILENAME[0], fapl_id1, name, sizeof(name)); + h5_fixname(FLUSH_FILENAME[0], fapl_id1, name, sizeof(name)); if (check_test_file(name, sizeof(name), fapl_id1)) { H5_FAILED(); goto error; @@ -190,7 +191,7 @@ main(int argc, char *argv[]) H5Eget_auto2(H5E_DEFAULT, &func, NULL); H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name)); + h5_fixname(NOFLUSH_FILENAME[0], fapl_id2, name, sizeof(name)); if (check_test_file(name, sizeof(name), fapl_id2)) { if (mpi_rank == 0) PASSED(); @@ -202,8 +203,8 @@ main(int argc, char *argv[]) H5Eset_auto2(H5E_DEFAULT, func, NULL); - h5_clean_files(&FILENAME[0], fapl_id1); - h5_clean_files(&FILENAME[1], fapl_id2); + h5_clean_files(FLUSH_FILENAME, fapl_id1); + h5_clean_files(NOFLUSH_FILENAME, fapl_id2); if (data_g) { free(data_g); diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c index b627b7c40ef..7fdefeb3ee9 100644 --- a/testpar/t_ph5basic.c +++ b/testpar/t_ph5basic.c @@ -177,3 +177,139 @@ test_fapl_mpio_dup(void) VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); } } /* end test_fapl_mpio_dup() */ + +/*------------------------------------------------------------------------- + * Function: test_get_dxpl_mpio + * + * Purpose: Test that H5Pget_dxpl_mpio will properly return the data + * transfer mode of collective and independent I/O access + * after setting it and writing some data. + * + * Return: Success: None + * Failure: Abort + *------------------------------------------------------------------------- + */ +void +test_get_dxpl_mpio(void) +{ + hid_t fid = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t fapl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + H5FD_mpio_xfer_t xfer_mode; + hsize_t dims[2] = {100, 100}; + hsize_t i, j; + int *data = NULL; + int mpi_rank, mpi_size; + const char *filename; + herr_t ret; + + if (VERBOSE_MED) + printf("Verify get_dxpl_mpio correctly gets the data transfer mode" + "set in the data transfer property list after a write\n"); + + /* Set up MPI for VRFY macro */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Initialize data array */ + data = malloc(100 * 100 * sizeof(*data)); + VRFY((data != NULL), "Data buffer initialized properly"); + + /* Create parallel fapl */ + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, FACC_MPIO); + VRFY((fapl >= 0), "Fapl creation succeeded"); + + /* Create a file */ + filename = (const char *)GetTestParameters(); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Create a dataset */ + sid = H5Screate_simple(2, dims, NULL); + VRFY((sid >= 0), "H5Screate succeeded"); + did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((did >= 0), "H5Dcreate2 succeeded"); + + /* Use collective I/O access */ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio set to collective succeeded"); + + /* Write some data */ + for (i = 0; i < dims[0]; i++) + for (j = 0; j < dims[1]; j++) + data[(i * 100) + j] = (int)(i + (i * j) + j); + + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl, data); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Check to make sure the property is still correct */ + ret = H5Pget_dxpl_mpio(dxpl, &xfer_mode); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded"); + VRFY((xfer_mode == H5FD_MPIO_COLLECTIVE), "Xfer_mode retrieved" + " successfully"); + + /* Read the data */ + ret = H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl, data); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* Check to make sure the property is still correct */ + ret = H5Pget_dxpl_mpio(dxpl, &xfer_mode); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded"); + VRFY((xfer_mode == H5FD_MPIO_COLLECTIVE), "Xfer_mode retrieved" + " successfully"); + + /* Check it does nothing on receiving NULL */ + ret = H5Pget_dxpl_mpio(dxpl, NULL); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded on NULL input"); + + /* Use independent I/O access */ + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio set to independent succeeded"); + + /* Write some data */ + for (i = 0; i < dims[0]; i++) + for (j = 0; j < dims[1]; j++) + data[(i * 100) + j] = (int)(i + (j * j) + i); + + ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl, data); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Check to make sure the property is still correct */ + ret = H5Pget_dxpl_mpio(dxpl, &xfer_mode); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded"); + VRFY((xfer_mode == H5FD_MPIO_INDEPENDENT), "Xfer_mode retrieved" + " successfully"); + + /* Read the data */ + ret = H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl, data); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* Check to make sure the property is still correct */ + ret = H5Pget_dxpl_mpio(dxpl, &xfer_mode); + VRFY((ret >= 0), "H5Pget_dxpl_mpio succeeded"); + VRFY((xfer_mode == H5FD_MPIO_INDEPENDENT), "Xfer_mode retrieved" + " successfully"); + + /* Close everything */ + free(data); + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Pclose(dxpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Dclose(did); + VRFY((ret >= 0), "H5Dclose succeeded"); + + ret = H5Sclose(sid); + VRFY((ret >= 0), "H5Sclose succeeded"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + +} /* end test_get_dxpl_mpio() */ diff --git a/testpar/t_select_io_dset.c b/testpar/t_select_io_dset.c index 2e6839efcec..9d3f1205051 100644 --- a/testpar/t_select_io_dset.c +++ b/testpar/t_select_io_dset.c @@ -159,7 +159,7 @@ set_dxpl(hid_t dxpl, H5D_selection_io_mode_t select_io_mode, H5FD_mpio_xfer_t mp } /* set_dxpl() */ /* - * Helper routine to check actual I/O mode on a dxpl + * Helper routine to check actual parallel I/O mode on a dxpl */ static void check_io_mode(hid_t dxpl, unsigned chunked) @@ -186,29 +186,85 @@ check_io_mode(hid_t dxpl, unsigned chunked) } /* check_io_mode() */ +static void +testing_check_io_mode(hid_t dxpl, H5D_mpio_actual_io_mode_t exp_io_mode) +{ + H5D_mpio_actual_io_mode_t actual_io_mode; + + if (H5Pget_mpio_actual_io_mode(dxpl, &actual_io_mode) < 0) + P_TEST_ERROR; + + if (actual_io_mode != exp_io_mode) { + nerrors++; + if (MAINPROCESS) + printf("\n Failed: Incorrect I/O mode (expected/actual) %u:%u", (unsigned)exp_io_mode, + (unsigned)actual_io_mode); + } + +} /* testing_check_io_mode() */ + +/* + * Helper routine to check actual selection I/O mode on a dxpl + */ +static void +check_actual_selection_io_mode(hid_t dxpl, uint32_t sel_io_mode_expected) +{ + uint32_t actual_sel_io_mode; + + if (H5Pget_actual_selection_io_mode(dxpl, &actual_sel_io_mode) < 0) + P_TEST_ERROR; + if (actual_sel_io_mode != sel_io_mode_expected) { + if (MAINPROCESS) + printf("\n Failed: Incorrect selection I/O mode (expected/actual) %u:%u", + (unsigned)sel_io_mode_expected, (unsigned)actual_sel_io_mode); + P_TEST_ERROR; + } +} + +/* + * Helper routine to check actual selection I/O mode on a dxpl + */ +static void +check_actual_selection_io_mode_either(hid_t dxpl, uint32_t sel_io_mode_expected1, + uint32_t sel_io_mode_expected2) +{ + uint32_t actual_sel_io_mode; + + if (H5Pget_actual_selection_io_mode(dxpl, &actual_sel_io_mode) < 0) + P_TEST_ERROR; + if (actual_sel_io_mode != sel_io_mode_expected1 && actual_sel_io_mode != sel_io_mode_expected2) { + if (MAINPROCESS) + printf("\n Failed: Incorrect selection I/O mode (expected/actual) %u or %u : %u", + (unsigned)sel_io_mode_expected1, (unsigned)sel_io_mode_expected2, + (unsigned)actual_sel_io_mode); + P_TEST_ERROR; + } +} + /* * Case 1: single dataset read/write, no type conversion (null case) */ static void -test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { - int i; - hid_t did = H5I_INVALID_HID; - hid_t sid = H5I_INVALID_HID; - hid_t dcpl = H5I_INVALID_HID; - hid_t dxpl = H5I_INVALID_HID; - hid_t ntrans_dxpl = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hid_t mspace_id = H5I_INVALID_HID; - hsize_t dims[1]; - hsize_t cdims[1]; - hsize_t start[1], stride[1], count[1], block[1]; - int wbuf[DSET_SELECT_DIM]; - int wbuf_bak[DSET_SELECT_DIM]; - int trans_wbuf[DSET_SELECT_DIM]; - int rbuf[DSET_SELECT_DIM]; - char dset_name[DSET_NAME_LEN]; - const char *expr = "2*x"; + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + int wbuf[DSET_SELECT_DIM]; + int wbuf_bak[DSET_SELECT_DIM]; + int trans_wbuf[DSET_SELECT_DIM]; + int rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "2*x"; + H5D_mpio_actual_io_mode_t exp_io_mode = H5D_MPIO_NO_COLLECTIVE; curr_nerrors = nerrors; @@ -224,11 +280,13 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) cdims[0] = DSET_SELECT_CHUNK_DIM; if (H5Pset_chunk(dcpl, 1, cdims) < 0) P_TEST_ERROR; + if (!dtrans && H5Pset_deflate(dcpl, 2) < 0) + P_TEST_ERROR; } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "no_tconv_%s_%s_%s", chunked ? "chunked" : "contig", - dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "no_tconv_%s_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -262,7 +320,8 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -284,7 +343,20 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) if (mwbuf) memcpy(wbuf, wbuf_bak, sizeof(wbuf)); - check_io_mode(dxpl, chunked); + if (!dtrans || select) + exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; + testing_check_io_mode(dxpl, exp_io_mode); + + if (chunked && !dtrans) { + /* If there are more ranks than chunks, then some ranks will not perform vector I/O due to how the + * parallel compression code redistributes data */ + if ((hsize_t)mpi_size > (dims[0] / cdims[0])) + check_actual_selection_io_mode_either(dxpl, H5D_VECTOR_IO, 0); + else + check_actual_selection_io_mode(dxpl, H5D_VECTOR_IO); + } + else + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ if (H5Dread(did, H5T_NATIVE_INT, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) @@ -327,6 +399,8 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) P_TEST_ERROR; if (H5Pclose(dxpl) < 0) P_TEST_ERROR; + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; if (H5Pclose(ntrans_dxpl) < 0) P_TEST_ERROR; @@ -339,7 +413,7 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) * Case 2: single dataset read/write, no size change, no background buffer */ static void -test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -356,6 +430,8 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) char *rbuf = NULL; char dset_name[DSET_NAME_LEN]; + H5D_mpio_actual_io_mode_t exp_io_mode = H5D_MPIO_NO_COLLECTIVE; + curr_nerrors = nerrors; if ((wbuf = (char *)malloc((size_t)(4 * DSET_SELECT_DIM))) == NULL) @@ -379,8 +455,8 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "no_size_change_%s_%s", chunked ? "chunked" : "contig", - mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "no_size_change_%s_%s_%s", chunked ? "chunked" : "contig", + select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create 1d dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_STD_I32BE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -416,7 +492,8 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -430,7 +507,11 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) if (mwbuf) memcpy(wbuf, wbuf_bak, (size_t)(4 * DSET_SELECT_DIM)); - check_io_mode(dxpl, chunked); + if (select) + exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; + + testing_check_io_mode(dxpl, exp_io_mode); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read the data from the dataset with little endian */ if (H5Dread(did, H5T_STD_I32LE, mspace_id, fspace_id, dxpl, rbuf) < 0) @@ -489,25 +570,26 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * Case 3: single dataset read/write, larger mem type, no background buffer */ static void -test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { - int i; - hid_t did = H5I_INVALID_HID; - hid_t sid = H5I_INVALID_HID; - hid_t dcpl = H5I_INVALID_HID; - hid_t dxpl = H5I_INVALID_HID; - hid_t ntrans_dxpl = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hid_t mspace_id = H5I_INVALID_HID; - hsize_t dims[1]; - hsize_t cdims[1]; - hsize_t start[1], stride[1], count[1], block[1]; - long wbuf[DSET_SELECT_DIM]; - long wbuf_bak[DSET_SELECT_DIM]; - long trans_wbuf[DSET_SELECT_DIM]; - long long rbuf[DSET_SELECT_DIM]; - char dset_name[DSET_NAME_LEN]; - const char *expr = "100 - x"; + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + long wbuf[DSET_SELECT_DIM]; + long wbuf_bak[DSET_SELECT_DIM]; + long trans_wbuf[DSET_SELECT_DIM]; + long long rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "100 - x"; + H5D_mpio_actual_io_mode_t exp_io_mode = H5D_MPIO_NO_COLLECTIVE; curr_nerrors = nerrors; @@ -525,8 +607,8 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "larger_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig", - dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "larger_no_bkg_%s_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create 1d chunked dataset with/without data transform */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -560,7 +642,8 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -582,7 +665,11 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign if (mwbuf) memcpy(wbuf, wbuf_bak, sizeof(wbuf)); - check_io_mode(dxpl, chunked); + if (select) + exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; + + testing_check_io_mode(dxpl, exp_io_mode); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ if (H5Dread(did, H5T_NATIVE_LLONG, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) @@ -637,25 +724,26 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign * Case 4: single dataset reader/write, smaller mem type, no background buffer */ static void -test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { - int i; - hid_t did = H5I_INVALID_HID; - hid_t sid = H5I_INVALID_HID; - hid_t dcpl = H5I_INVALID_HID; - hid_t dxpl = H5I_INVALID_HID; - hid_t ntrans_dxpl = H5I_INVALID_HID; - hid_t fspace_id = H5I_INVALID_HID; - hid_t mspace_id = H5I_INVALID_HID; - hsize_t dims[1]; - hsize_t cdims[1]; - hsize_t start[1], stride[1], count[1], block[1]; - short wbuf[DSET_SELECT_DIM]; - int wbuf_bak[DSET_SELECT_DIM]; - short trans_wbuf[DSET_SELECT_DIM]; - short rbuf[DSET_SELECT_DIM]; - char dset_name[DSET_NAME_LEN]; - const char *expr = "2 * (10 + x)"; + int i; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl = H5I_INVALID_HID; + hid_t dxpl = H5I_INVALID_HID; + hid_t ntrans_dxpl = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + hsize_t dims[1]; + hsize_t cdims[1]; + hsize_t start[1], stride[1], count[1], block[1]; + short wbuf[DSET_SELECT_DIM]; + int wbuf_bak[DSET_SELECT_DIM]; + short trans_wbuf[DSET_SELECT_DIM]; + short rbuf[DSET_SELECT_DIM]; + char dset_name[DSET_NAME_LEN]; + const char *expr = "2 * (10 + x)"; + H5D_mpio_actual_io_mode_t exp_io_mode = H5D_MPIO_NO_COLLECTIVE; curr_nerrors = nerrors; @@ -673,8 +761,8 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "smaller_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig", - dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "smaller_no_bkg_%s_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create 1d chunked dataset with/without data transform */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -708,7 +796,8 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -731,7 +820,11 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig if (mwbuf) memcpy(wbuf, wbuf_bak, sizeof(wbuf)); - check_io_mode(dxpl, chunked); + if (select) + exp_io_mode = chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE; + + testing_check_io_mode(dxpl, exp_io_mode); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ if (H5Dread(did, H5T_NATIVE_SHORT, mspace_id, fspace_id, ntrans_dxpl, rbuf) < 0) @@ -804,7 +897,7 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig * Verify the values read */ static void -test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -870,8 +963,8 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) /* Case 5(a) */ /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "cmpd_with_bkg_%s_%s", chunked ? "chunked" : "contig", - mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "cmpd_with_bkg_%s_%s_%s", chunked ? "chunked" : "contig", + select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create 1d dataset */ if ((did = H5Dcreate2(fid, dset_name, s1_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -907,7 +1000,8 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Copy wbuf if the library will be modifying it */ if (mwbuf) @@ -917,12 +1011,12 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) if (H5Dwrite(did, s1_tid, mspace_id, fspace_id, dxpl, s1_wbuf) < 0) P_TEST_ERROR; + check_io_mode(dxpl, chunked); + /* Restore wbuf from backup if the library modified it */ if (mwbuf) memcpy(s1_wbuf, s1_wbuf_bak, sizeof(s1_t) * DSET_SELECT_DIM); - check_io_mode(dxpl, chunked); - /* Read all the data from the dataset */ memset(s1_rbuf, 0, sizeof(s1_t) * DSET_SELECT_DIM); if (H5Dread(did, s1_tid, mspace_id, fspace_id, dxpl, s1_rbuf) < 0) @@ -1094,6 +1188,10 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; if (H5Tclose(ss_bc_tid) < 0) P_TEST_ERROR; + if (H5Pclose(dxpl) < 0) + P_TEST_ERROR; + if (H5Pclose(dcpl) < 0) + P_TEST_ERROR; if (H5Dclose(did) < 0) P_TEST_ERROR; @@ -1115,7 +1213,7 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * Case 6: Type conversions + some processes have null/empty selections in datasets */ static void -test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { int i; hid_t did = H5I_INVALID_HID; @@ -1158,8 +1256,8 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned } /* Generate dataset name */ - snprintf(dset_name, sizeof(dset_name), "tconv_sel_empty_%s_%s_%s", chunked ? "chunked" : "contig", - dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_name, sizeof(dset_name), "tconv_sel_empty_%s_%s_%s_%s", chunked ? "chunked" : "contig", + dtrans ? "xform" : "noxform", select ? "sel" : "nosel", mwbuf ? "mwbuf" : "nomwbuf"); /* Create dataset */ if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) @@ -1170,7 +1268,8 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -1210,7 +1309,7 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned /* Create a memory dataspace */ if ((mspace_id = H5Screate_simple(1, block, NULL)) < 0) P_TEST_ERROR; - if (mpi_rank) { + if (!MAINPROCESS) { if (H5Sselect_none(mspace_id) < 0) P_TEST_ERROR; } @@ -1227,7 +1326,13 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned if (mwbuf) memcpy(lwbuf, lwbuf_bak, sizeof(lwbuf)); - check_io_mode(dxpl, chunked); + /* If not using selection I/O there will be no collective I/O, since type conversion is unsupported by + * legacy collective I/O */ + testing_check_io_mode( + dxpl, select ? (chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE) : 0); + + /* If not using selection I/O then the main process will do scalar I/O and others will do none */ + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : (MAINPROCESS ? H5D_SCALAR_IO : 0)); /* Read the data from the dataset: type conversion int-->long */ /* If dtrans, without data transform set in dxpl */ @@ -1395,7 +1500,7 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned * Datatype for all datasets: H5T_NATIVE_LONG */ static void -test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j; @@ -1410,6 +1515,8 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m hid_t mem_sids[MULTI_NUM_DSETS]; hid_t mem_tids[MULTI_NUM_DSETS]; + bool any_tconv = false; + char dset_names[MULTI_NUM_DSETS][DSET_NAME_LEN]; hid_t dset_dids[MULTI_NUM_DSETS]; @@ -1457,7 +1564,8 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -1469,17 +1577,24 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m /* Set up file space ids and dataset ids */ for (i = 0; i < (int)ndsets; i++) { + bool tconv; + if ((file_sids[i] = H5Screate_simple(1, dims, NULL)) < 0) P_TEST_ERROR; /* Generate dataset name */ - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s", i, - chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s_%s", i, + chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); + + /* Flip a coin to see if we're doing type conversion */ + tconv = HDrandom() % 2; + if (tconv) + any_tconv = true; /* Create ith dataset */ - if ((dset_dids[i] = - H5Dcreate2(fid, dset_names[i], ((HDrandom() % 2) ? H5T_NATIVE_LONG : H5T_NATIVE_INT), - file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], (tconv ? H5T_NATIVE_LONG : H5T_NATIVE_INT), + file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; } @@ -1555,7 +1670,12 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m if (mwbuf) memcpy(total_wbuf, total_wbuf_bak, ndsets * DSET_SELECT_DIM * sizeof(int)); - check_io_mode(dxpl, chunked); + /* If doing type conversion or transform and not using selection I/O there will be no collective I/O, + * since type conversion is unsupported by legacy collective I/O */ + testing_check_io_mode(dxpl, ((any_tconv || dtrans) && !select) + ? 0 + : (chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE)); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset (if dtrans, without data transform set in dxpl) */ if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, ntrans_dxpl, rbufs) < 0) @@ -1708,7 +1828,7 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m * --Verify values read */ static void -test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j, mm; @@ -1769,7 +1889,8 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Each process takes x number of elements */ block[0] = dims[0] / (hsize_t)mpi_size; @@ -1794,8 +1915,8 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Generate dataset name */ - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", mwbuf ? "mwbuf" : "nomwbuf"); /* Create ith dataset */ if ((dset_dids[i] = @@ -1860,6 +1981,7 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) memcpy(total_wbuf, total_wbuf_bak, buf_size); check_io_mode(dxpl, chunked); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) P_TEST_ERROR; @@ -2164,7 +2286,7 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * Datatype for all datasets: H5T_STD_I16BE */ static void -test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) +test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j; @@ -2222,7 +2344,8 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Set up file space ids, mem space ids, and dataset ids */ for (i = 0; i < (int)ndsets; i++) { @@ -2230,8 +2353,8 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Generate dataset name */ - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", mwbuf ? "mwbuf" : "nomwbuf"); /* Create ith dataset */ if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_STD_I32BE, file_sids[i], H5P_DEFAULT, dcpl, @@ -2301,6 +2424,7 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) memcpy(total_wbuf, total_wbuf_bak, buf_size); check_io_mode(dxpl, chunked); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Read data from the dataset */ if (H5Dread_multi(ndsets, dset_dids, mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) @@ -2510,7 +2634,7 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf) * --this will trigger type conversion for (a), (b) & (c) */ static void -test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf) +test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j; @@ -2568,7 +2692,8 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); if ((ntrans_dxpl = H5Pcopy(dxpl)) < 0) P_TEST_ERROR; @@ -2584,8 +2709,9 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un P_TEST_ERROR; /* Generate dataset name */ - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_sel_dset%d_%s_%s_%s", i, - chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_sel_dset%d_%s_%s_%s_%s", i, + chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); if (i == 0) { if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, @@ -2769,7 +2895,11 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un if (mwbuf) memcpy(total_wbuf, total_wbuf_bak, buf_size); - check_io_mode(dxpl, chunked); + /* If not using selection I/O there will be no collective I/O, since type conversion is unsupported by + * legacy collective I/O */ + testing_check_io_mode( + dxpl, select ? (chunked ? H5D_MPIO_CHUNK_COLLECTIVE : H5D_MPIO_CONTIGUOUS_COLLECTIVE) : 0); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Initialize buffer indices */ for (i = 0; i < (int)ndsets; i++) { @@ -2903,7 +3033,7 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un * --fields 'b' and 'd' are (DSET_SELECT_DIM + j + start[0]) */ static void -test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) +test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned select, unsigned mwbuf) { size_t ndsets; int i, j, mm; @@ -2920,6 +3050,8 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) hid_t mem_tids[MULTI_NUM_DSETS]; hid_t r_mem_tids[MULTI_NUM_DSETS]; + bool any_tconv; + multi_dset_type_t dset_types[MULTI_NUM_DSETS]; hid_t s1_tid = H5I_INVALID_HID; @@ -2978,7 +3110,8 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) P_TEST_ERROR; /* Set selection I/O mode, type of I/O and type of collective I/O */ - set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, mwbuf); + set_dxpl(dxpl, select ? H5D_SELECTION_IO_MODE_ON : H5D_SELECTION_IO_MODE_OFF, H5FD_MPIO_COLLECTIVE, + H5FD_MPIO_COLLECTIVE_IO, mwbuf); /* Set dataset layout: contiguous or chunked */ dims[0] = DSET_SELECT_DIM; @@ -3039,24 +3172,27 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) mm = HDrandom() % (int)ndsets; if (mm == 0) { dset_types[i] = DSET_WITH_NO_CONV; - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; } else if (mm == 1) { dset_types[i] = DSET_WITH_CONV_AND_NO_BKG; - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_nbkg_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_nbkg_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_LONG, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; } else { dset_types[i] = DSET_WITH_CONV_AND_BKG; - snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_bkg_dset%d_%s_%s", i, - chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf"); + snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_bkg_dset%d_%s_%s_%s", i, + chunked ? "chunked" : "contig", select ? "select" : "noselect", + mwbuf ? "mwbuf" : "nomwbuf"); if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; @@ -3119,6 +3255,8 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) /* Test with s settings for ndsets */ for (s = SETTING_A; s <= SETTING_B; s++) { + any_tconv = false; + /* for i ndsets */ for (i = 0; i < (int)ndsets; i++) { @@ -3171,6 +3309,9 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) mem_tids[i] = H5T_NATIVE_LONG; r_mem_tids[i] = H5T_NATIVE_SHORT; + + /* There is type conversion in the read op */ + any_tconv = true; } break; @@ -3194,6 +3335,9 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) } mem_tids[i] = s1_tid; r_mem_tids[i] = s3_tid; + + /* There is type conversion in the read op */ + any_tconv = true; } else if (s == SETTING_B) { /* Initialize buffer indices */ @@ -3246,7 +3390,12 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf) if (H5Dread_multi(ndsets, dset_dids, r_mem_tids, mem_sids, file_sids, dxpl, rbufs) < 0) P_TEST_ERROR; - check_io_mode(dxpl, chunked); + /* If doing type conversion and not using selection I/O there will be no collective I/O, since + * type conversion is unsupported by legacy collective I/O */ + testing_check_io_mode(dxpl, (any_tconv && !select) ? 0 + : (chunked ? H5D_MPIO_CHUNK_COLLECTIVE + : H5D_MPIO_CONTIGUOUS_COLLECTIVE)); + check_actual_selection_io_mode(dxpl, select ? H5D_SELECTION_IO : H5D_SCALAR_IO); /* Verify result read */ /* for i ndsets */ @@ -3422,6 +3571,8 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) P_TEST_ERROR; + set_dxpl(dxpl, H5D_SELECTION_IO_MODE_ON, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, false); + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) P_TEST_ERROR; @@ -3442,20 +3593,12 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ /* Datatype conversion */ if (test_mode & TEST_DATATYPE_CONVERSION) { - /* With one exception, all will land at H5FD__mpio_read/write_selection(). - * As the xfer mode is H5FD_MPIO_INDEPENDENT, this will call - * H5FD__read/write_from_selection() triggering H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB. - */ - no_selection_io_cause_read_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + /* With one exception, all will land at H5FD__mpio_read/write_selection() */ - /* Exception case: This will turn off selection I/O landing at H5FD__mpio_write() */ - if ((test_mode & TEST_TCONV_BUF_TOO_SMALL) && !(test_mode & TEST_IN_PLACE_TCONV)) - no_selection_io_cause_write_expected |= H5D_SEL_IO_TCONV_BUF_TOO_SMALL; - else - no_selection_io_cause_write_expected |= H5D_SEL_IO_NO_VECTOR_OR_SELECTION_IO_CB; + if (test_mode & TEST_IN_PLACE_TCONV) + if (H5Pset_modify_write_buf(dxpl, true) < 0) + P_TEST_ERROR; - if (H5Pset_selection_io(dxpl, H5D_SELECTION_IO_MODE_ON) < 0) - P_TEST_ERROR; tid = H5T_NATIVE_UINT; /* If we're testing a too small tconv buffer, set the buffer to be too small */ @@ -3463,11 +3606,13 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if (H5Pset_buffer(dxpl, sizeof(int), NULL, NULL) < 0) P_TEST_ERROR; - if (test_mode & TEST_IN_PLACE_TCONV) { - if (H5Pset_modify_write_buf(dxpl, true) < 0) - P_TEST_ERROR; - } - /* In-place type conversion for read doesn't require modify_write_buf */ + /* Exception case: When the type conversion buffer is too small and we're not allowing the library + * to modify the write buffer, the library will fall back to scalar independent I/O since the + * selection I/O path with type conversion requires a full size conversion buffer */ + if (!(test_mode & TEST_IN_PLACE_TCONV)) + /* In-place type conversion for read doesn't require modify_write_buf, so the read will still + * use selection I/O */ + no_selection_io_cause_write_expected |= H5D_SEL_IO_TCONV_BUF_TOO_SMALL; } } @@ -3493,6 +3638,10 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ if (H5Dwrite(did, tid, H5S_ALL, H5S_ALL, dxpl, wbuf) < 0) P_TEST_ERROR; + if (!(test_mode & TEST_DISABLE_BY_API || test_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET || + ((test_mode & TEST_TCONV_BUF_TOO_SMALL) && !(test_mode & TEST_IN_PLACE_TCONV)))) + check_actual_selection_io_mode(dxpl, H5D_SELECTION_IO); + if (H5Pget_no_selection_io_cause(dxpl, &no_selection_io_cause_write) < 0) P_TEST_ERROR; @@ -3535,9 +3684,6 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_ static void test_get_no_selection_io_cause(const char *filename, hid_t fapl) { - hid_t dxpl = H5I_INVALID_HID; - H5D_selection_io_mode_t selection_io_mode; - if (MAINPROCESS) { printf("\n"); TESTING("for H5Pget_no_selection_io_cause()"); @@ -3545,21 +3691,6 @@ test_get_no_selection_io_cause(const char *filename, hid_t fapl) curr_nerrors = nerrors; - if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) - P_TEST_ERROR; - if (H5Pget_selection_io(dxpl, &selection_io_mode) < 0) - P_TEST_ERROR; - if (H5Pclose(dxpl) < 0) - P_TEST_ERROR; - - /* The following tests are based on H5D_SELECTION_IO_MODE_DEFAULT as the - * default setting in the library; skip the tests if that is not true */ - if (selection_io_mode != H5D_SELECTION_IO_MODE_DEFAULT) { - if (MAINPROCESS) - SKIPPED(); - return; - } - test_no_selection_io_cause_mode(filename, fapl, TEST_DISABLE_BY_API); test_no_selection_io_cause_mode(filename, fapl, TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET); test_no_selection_io_cause_mode(filename, fapl, TEST_DATATYPE_CONVERSION); @@ -3952,6 +4083,7 @@ main(int argc, char *argv[]) int test_select_config; unsigned chunked; unsigned dtrans; + unsigned select; unsigned mwbuf; h5_reset(); @@ -3978,163 +4110,170 @@ main(int argc, char *argv[]) /* therefore, not all tests are run with data transform */ for (dtrans = false; dtrans <= true; dtrans++) { - /* Test with and without modify_write_buf turned on */ - for (mwbuf = false; mwbuf <= true; mwbuf++) { - - if (MAINPROCESS) { - /* Print configuration message */ - printf("Testing for selection I/O "); - if (chunked) - printf("with chunked dataset, "); - else - printf("with contiguous dataset, "); - if (dtrans) - printf("data transform, "); - else - printf("without data transform, "); - if (mwbuf) - printf("and with modifying write buffers\n"); - else - printf("and without modifying write buffers\n"); - } + for (select = false; select <= true; select++) { + + /* Test with and without modify_write_buf turned on */ + for (mwbuf = false; mwbuf <= true; mwbuf++) { + + if (MAINPROCESS) { + /* Print configuration message */ + printf("Testing for selection I/O "); + if (chunked) + printf("with chunked dataset, "); + else + printf("with contiguous dataset, "); + if (dtrans) + printf("data transform, "); + else + printf("without data transform, "); + if (select) + printf("selection I/O ON, "); + else + printf("selection I/O OFF, "); + if (mwbuf) + printf("and with modifying write buffers\n"); + else + printf("and without modifying write buffers\n"); + } + + for (test_select_config = (int)TEST_NO_TYPE_CONV; + test_select_config < (int)TEST_SELECT_NTESTS; test_select_config++) { + + switch (test_select_config) { + + case TEST_NO_TYPE_CONV: /* case 1 */ + if (MAINPROCESS) + TESTING_2("No type conversion (null case)"); - for (test_select_config = (int)TEST_NO_TYPE_CONV; - test_select_config < (int)TEST_SELECT_NTESTS; test_select_config++) { + test_no_type_conv(fid, chunked, dtrans, select, mwbuf); - switch (test_select_config) { + break; - case TEST_NO_TYPE_CONV: /* case 1 */ - if (MAINPROCESS) - TESTING_2("No type conversion (null case)"); + case TEST_NO_SIZE_CHANGE_NO_BKG: /* case 2 */ + if (MAINPROCESS) + TESTING_2("No size change, no background buffer"); - test_no_type_conv(fid, chunked, dtrans, mwbuf); + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + continue; + } - break; + test_no_size_change_no_bkg(fid, chunked, select, mwbuf); - case TEST_NO_SIZE_CHANGE_NO_BKG: /* case 2 */ - if (MAINPROCESS) - TESTING_2("No size change, no background buffer"); + break; - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_LARGER_MEM_NO_BKG: /* case 3 */ if (MAINPROCESS) - SKIPPED(); - continue; - } - - test_no_size_change_no_bkg(fid, chunked, mwbuf); - - break; + TESTING_2("Larger memory type, no background buffer"); - case TEST_LARGER_MEM_NO_BKG: /* case 3 */ - if (MAINPROCESS) - TESTING_2("Larger memory type, no background buffer"); + test_larger_mem_type_no_bkg(fid, chunked, dtrans, select, mwbuf); - test_larger_mem_type_no_bkg(fid, chunked, dtrans, mwbuf); - - break; + break; - case TEST_SMALLER_MEM_NO_BKG: /* case 4 */ - if (MAINPROCESS) - TESTING_2("Smaller memory type, no background buffer"); + case TEST_SMALLER_MEM_NO_BKG: /* case 4 */ + if (MAINPROCESS) + TESTING_2("Smaller memory type, no background buffer"); - test_smaller_mem_type_no_bkg(fid, chunked, dtrans, mwbuf); + test_smaller_mem_type_no_bkg(fid, chunked, dtrans, select, mwbuf); - break; + break; - case TEST_CMPD_WITH_BKG: /* case 5 */ - if (MAINPROCESS) - TESTING_2("Compound types with background buffer"); - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_CMPD_WITH_BKG: /* case 5 */ if (MAINPROCESS) - SKIPPED(); - continue; - } - - test_cmpd_with_bkg(fid, chunked, mwbuf); + TESTING_2("Compound types with background buffer"); + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + continue; + } - break; + test_cmpd_with_bkg(fid, chunked, select, mwbuf); - case TEST_TYPE_CONV_SEL_EMPTY: /* case 6 */ - if (MAINPROCESS) - TESTING_2("Empty selections + Type conversion"); + break; - test_type_conv_sel_empty(fid, chunked, dtrans, mwbuf); + case TEST_TYPE_CONV_SEL_EMPTY: /* case 6 */ + if (MAINPROCESS) + TESTING_2("Empty selections + Type conversion"); - break; + test_type_conv_sel_empty(fid, chunked, dtrans, select, mwbuf); - case TEST_MULTI_CONV_NO_BKG: /* case 7 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: type conv + no bkg buffer"); + break; - test_multi_dsets_no_bkg(fid, chunked, dtrans, mwbuf); + case TEST_MULTI_CONV_NO_BKG: /* case 7 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: type conv + no bkg buffer"); - break; + test_multi_dsets_no_bkg(fid, chunked, dtrans, select, mwbuf); - case TEST_MULTI_CONV_BKG: /* case 8 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: type conv + bkg buffer"); + break; - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_MULTI_CONV_BKG: /* case 8 */ if (MAINPROCESS) - SKIPPED(); - } - else - test_multi_dsets_cmpd_with_bkg(fid, chunked, mwbuf); + TESTING_2("multi-datasets: type conv + bkg buffer"); - break; + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_cmpd_with_bkg(fid, chunked, select, mwbuf); - case TEST_MULTI_CONV_SIZE_CHANGE: /* case 9 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: type conv + size change + no bkg buffer"); + break; - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_MULTI_CONV_SIZE_CHANGE: /* case 9 */ if (MAINPROCESS) - SKIPPED(); - } - else - test_multi_dsets_size_change_no_bkg(fid, chunked, mwbuf); + TESTING_2("multi-datasets: type conv + size change + no bkg buffer"); - break; + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_size_change_no_bkg(fid, chunked, select, mwbuf); - case TEST_MULTI_CONV_SEL_EMPTY: /* case 10 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: type conv + empty selections"); + break; - test_multi_dsets_conv_sel_empty(fid, chunked, dtrans, mwbuf); + case TEST_MULTI_CONV_SEL_EMPTY: /* case 10 */ + if (MAINPROCESS) + TESTING_2("multi-datasets: type conv + empty selections"); - break; + test_multi_dsets_conv_sel_empty(fid, chunked, dtrans, select, mwbuf); - case TEST_MULTI_ALL: /* case 11 */ - if (MAINPROCESS) - TESTING_2("multi-datasets: no conv + conv without bkg + conv with bkg"); + break; - /* Data transforms does not apply to the dataset datatype for this test */ - if (dtrans) { + case TEST_MULTI_ALL: /* case 11 */ if (MAINPROCESS) - SKIPPED(); - } - else - test_multi_dsets_all(2, fid, chunked, mwbuf); + TESTING_2("multi-datasets: no conv + conv without bkg + conv with bkg"); - break; + /* Data transforms does not apply to the dataset datatype for this test */ + if (dtrans) { + if (MAINPROCESS) + SKIPPED(); + } + else + test_multi_dsets_all(2, fid, chunked, select, mwbuf); - case TEST_SELECT_NTESTS: - default: - P_TEST_ERROR; - break; + break; + + case TEST_SELECT_NTESTS: + default: + P_TEST_ERROR; + break; - } /* end switch */ + } /* end switch */ - } /* end for test_select_config */ + } /* end for test_select_config */ - } /* end mwbuf */ + } /* end mwbuf */ - } /* end dtrans */ - } /* end chunked */ + } /* end select */ + } /* end dtrans */ + } /* end chunked */ if (H5Fclose(fid) < 0) P_TEST_ERROR; diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index 98e307772a9..0a3d3d0a49e 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -4089,7 +4089,8 @@ parse_options(int argc, char **argv) case 'h': /* print help message--return with nerrors set */ return (1); default: - printf("Illegal option(%s)\n", *argv); + if (MAINPROCESS) + printf("Illegal option(%s)\n", *argv); nerrors++; return (1); } @@ -4098,12 +4099,14 @@ parse_options(int argc, char **argv) /* check validity of dimension and chunk sizes */ if (dim0 <= 0 || dim1 <= 0) { - printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + if (MAINPROCESS) + printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); nerrors++; return (1); } if (chunkdim0 <= 0 || chunkdim1 <= 0) { - printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + if (MAINPROCESS) + printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); nerrors++; return (1); } @@ -4128,9 +4131,11 @@ parse_options(int argc, char **argv) nerrors++; return (1); } - printf("Test filenames are:\n"); - for (i = 0; i < n; i++) - printf(" %s\n", filenames[i]); + if (MAINPROCESS) { + printf("Test filenames are:\n"); + for (i = 0; i < n; i++) + printf(" %s\n", filenames[i]); + } } return (0); diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index ccece41b846..72613a3bef1 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -40,6 +40,8 @@ #define PATH_MAX 4096 #endif +#define DEFAULT_DEFLATE_LEVEL 9 + #define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0]) #define CHECK_PASSED() \ @@ -82,12 +84,15 @@ static char *config_dir = NULL; int nerrors = 0; int curr_nerrors = 0; +bool enable_compression = false; + /* Function pointer typedef for test functions */ typedef void (*test_func)(void); /* Utility functions */ static hid_t create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, bool custom_config, H5FD_subfiling_params_t *custom_cfg, int32_t thread_pool_size); +static hid_t create_dcpl_id(int rank, const hsize_t dims[], hid_t dxpl_id); /* Test functions */ static void test_create_and_close(void); @@ -182,7 +187,47 @@ create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, bool custom_config, return H5I_INVALID_HID; } +/* --------------------------------------------------------------------------- + * Function: create_dcpl_id + * + * Purpose: Creates dataset creation property list identifier with + * chunking and compression, and enforces the + * required collective IO. + * + * Return: Success: HID Dataset creation property list identifier, + * a non-negative value. + * Failure: H5I_INVALID_HID, a negative value. + * --------------------------------------------------------------------------- + */ +static hid_t +create_dcpl_id(int rank, const hsize_t dset_dims[], hid_t dxpl_id) +{ + hsize_t chunk_dims[1]; + hid_t ret_value = H5I_INVALID_HID; + + if ((ret_value = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if (enable_compression) { + if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) + TEST_ERROR; + chunk_dims[0] = dset_dims[0] / 2; + if (H5Pset_chunk(ret_value, rank, chunk_dims) < 0) + TEST_ERROR; + if (H5Pset_deflate(ret_value, DEFAULT_DEFLATE_LEVEL) < 0) + TEST_ERROR; + } + + return ret_value; +error: + if ((H5I_INVALID_HID != ret_value) && (H5Pclose(ret_value) < 0)) { + H5_FAILED(); + AT(); + } + + return H5I_INVALID_HID; +} /* * A simple test that creates and closes a file with the * subfiling VFD @@ -382,16 +427,20 @@ test_config_file(void) substr = strstr(config_buf, "hdf5_file"); VRFY(substr, "strstr succeeded"); + H5_GCC_CLANG_DIAG_OFF("format-nonliteral") snprintf(scan_format, sizeof(scan_format), "hdf5_file=%%%zus", (size_t)(PATH_MAX - 1)); VRFY((sscanf(substr, scan_format, tmp_buf) == 1), "sscanf succeeded"); + H5_GCC_CLANG_DIAG_ON("format-nonliteral") VRFY((strcmp(tmp_buf, resolved_path) == 0), "strcmp succeeded"); substr = strstr(config_buf, "subfile_dir"); VRFY(substr, "strstr succeeded"); + H5_GCC_CLANG_DIAG_OFF("format-nonliteral") snprintf(scan_format, sizeof(scan_format), "subfile_dir=%%%zus", (size_t)(PATH_MAX - 1)); VRFY((sscanf(substr, scan_format, tmp_buf) == 1), "sscanf succeeded"); + H5_GCC_CLANG_DIAG_ON("format-nonliteral") VRFY((strcmp(tmp_buf, subfile_dir) == 0), "strcmp succeeded"); @@ -1056,6 +1105,7 @@ test_read_different_stripe_size(void) hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; char *tmp_filename = NULL; void *buf = NULL; @@ -1102,7 +1152,10 @@ test_read_different_stripe_size(void) fspace_id = H5Screate_simple(1, dset_dims, NULL); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dcpl_id = create_dcpl_id(1, dset_dims, dxpl_id); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); /* Select hyperslab */ @@ -1125,6 +1178,7 @@ test_read_different_stripe_size(void) VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); /* Ensure all the subfiles are present */ @@ -1149,10 +1203,12 @@ test_read_different_stripe_size(void) VRFY((fclose(subfile_ptr) >= 0), "fclose on subfile succeeded"); /* Check file size */ - VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); - subfile_size = (h5_stat_size_t)subfile_info.st_size; + if (!enable_compression) { + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + subfile_size = (h5_stat_size_t)subfile_info.st_size; - VRFY((subfile_size >= cfg.stripe_size), "File size verification succeeded"); + VRFY((subfile_size >= cfg.stripe_size), "File size verification succeeded"); + } } } @@ -1372,10 +1428,12 @@ test_subfiling_precreate_rank_0(void) VRFY((fclose(subfile_ptr) >= 0), "fclose on subfile succeeded"); /* Check file size */ - VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); - file_size = (h5_stat_size_t)subfile_info.st_size; + if (!enable_compression) { + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + file_size = (h5_stat_size_t)subfile_info.st_size; - VRFY((file_size >= cfg.stripe_size), "File size verification succeeded"); + VRFY((file_size >= cfg.stripe_size), "File size verification succeeded"); + } } /* Verify that there aren't too many subfiles */ @@ -1466,6 +1524,7 @@ test_subfiling_write_many_read_one(void) hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; @@ -1513,7 +1572,10 @@ test_subfiling_write_many_read_one(void) fspace_id = H5Screate_simple(1, dset_dims, NULL); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dcpl_id = create_dcpl_id(1, dset_dims, dxpl_id); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); /* Select hyperslab */ @@ -1535,6 +1597,7 @@ test_subfiling_write_many_read_one(void) buf = NULL; VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); mpi_code_g = MPI_Barrier(comm_g); @@ -1612,6 +1675,7 @@ test_subfiling_write_many_read_few(void) hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; @@ -1669,7 +1733,10 @@ test_subfiling_write_many_read_few(void) fspace_id = H5Screate_simple(1, dset_dims, NULL); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dcpl_id = create_dcpl_id(1, dset_dims, dxpl_id); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); /* Select hyperslab */ @@ -1691,6 +1758,7 @@ test_subfiling_write_many_read_few(void) buf = NULL; VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); /* @@ -1804,6 +1872,7 @@ test_subfiling_h5fuse(void) hid_t fapl_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; void *buf = NULL; int skip_test = 0; @@ -1894,7 +1963,10 @@ test_subfiling_h5fuse(void) fspace_id = H5Screate_simple(1, dset_dims, NULL); VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); - dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dcpl_id = create_dcpl_id(1, dset_dims, dxpl_id); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "Dataset creation succeeded"); /* Select hyperslab */ @@ -1915,8 +1987,11 @@ test_subfiling_h5fuse(void) free(buf); buf = NULL; + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_INDEPENDENT) >= 0), "H5Pset_dxpl_mpio succeeded"); + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); if (MAINPROCESS) { @@ -1969,8 +2044,10 @@ test_subfiling_h5fuse(void) } /* Verify the size of the fused file */ - VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); - VRFY(((size_t)file_info.st_size >= target_size), "File size verification succeeded"); + if (!enable_compression) { + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + VRFY(((size_t)file_info.st_size >= target_size), "File size verification succeeded"); + } /* Re-open file with sec2 driver and verify the data */ file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); @@ -2410,9 +2487,28 @@ main(int argc, char **argv) if (num_iocs_g > mpi_size) num_iocs_g = mpi_size; - if (MAINPROCESS) { - printf("Re-running tests with environment variables set\n"); + if (MAINPROCESS) + printf(" Re-running tests with compression enabled\n"); + +#ifdef H5_HAVE_FILTER_DEFLATE + enable_compression = true; + for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { + if (MPI_SUCCESS == (mpi_code_g = MPI_Barrier(comm_g))) { + (*tests[i])(); + } + else { + if (MAINPROCESS) + MESG("MPI_Barrier failed"); + nerrors++; + } } + enable_compression = false; +#else + if (MAINPROCESS) + SKIPPED(); +#endif + if (MAINPROCESS) + printf("\nRe-running tests with environment variables set\n"); for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { if (MPI_SUCCESS == (mpi_code_g = MPI_Barrier(comm_g))) { @@ -2426,13 +2522,29 @@ main(int argc, char **argv) } if (MAINPROCESS) - puts(""); - + printf("\n Re-running tests with compression enabled\n"); +#ifdef H5_HAVE_FILTER_DEFLATE + enable_compression = true; + for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { + if (MPI_SUCCESS == (mpi_code_g = MPI_Barrier(comm_g))) { + (*tests[i])(); + } + else { + if (MAINPROCESS) + MESG("MPI_Barrier failed"); + nerrors++; + } + } + enable_compression = false; +#else + if (MAINPROCESS) + SKIPPED(); +#endif if (nerrors) goto exit; if (MAINPROCESS) - puts("All Subfiling VFD tests passed\n"); + puts("\nAll Subfiling VFD tests passed\n"); exit: if (must_unset_stripe_size_env) diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 584ca1f6107..e094ad6dcd3 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -351,6 +351,7 @@ main(int argc, char **argv) /* Tests are generally arranged from least to most complexity... */ AddTest("mpiodup", test_fapl_mpio_dup, NULL, "fapl_mpio duplicate", NULL); + AddTest("getdxplmpio", test_get_dxpl_mpio, NULL, "dxpl_mpio get", PARATESTFILE); AddTest("split", test_split_comm_access, NULL, "dataset using split communicators", PARATESTFILE); AddTest("h5oflusherror", test_oflush, NULL, "H5Oflush failure", PARATESTFILE); @@ -366,6 +367,11 @@ main(int argc, char **argv) AddTest("invlibverassert", test_invalid_libver_bounds_file_close_assert, NULL, "Invalid libver bounds assertion failure", PARATESTFILE); + AddTest("evictparassert", test_evict_on_close_parallel_unsupp, NULL, "Evict on close in parallel failure", + PARATESTFILE); + AddTest("fapl_preserve", test_fapl_preserve_hints, NULL, "preserve MPI I/O hints after fapl closed", + PARATESTFILE); + AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE); AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE); @@ -521,6 +527,8 @@ main(int argc, char **argv) "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE); AddTest("GH_coll_MD_wr", test_collective_global_heap_write, NULL, "Collective MD write of global heap data", PARATESTFILE); + AddTest("COLLIO_INDMDWR", test_coll_io_ind_md_write, NULL, + "Collective I/O with Independent metadata writes", PARATESTFILE); /* Display testing information */ TestInfo(argv[0]); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 6ac8080c82a..31b7c6963d5 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -233,6 +233,8 @@ void zero_dim_dset(void); void test_file_properties(void); void test_delete(void); void test_invalid_libver_bounds_file_close_assert(void); +void test_evict_on_close_parallel_unsupp(void); +void test_fapl_preserve_hints(void); void multiple_dset_write(void); void multiple_group_write(void); void multiple_group_read(void); @@ -240,6 +242,7 @@ void collective_group_write_independent_group_read(void); void collective_group_write(void); void independent_group_read(void); void test_fapl_mpio_dup(void); +void test_get_dxpl_mpio(void); void test_split_comm_access(void); void test_page_buffer_access(void); void dataset_atomicity(void); @@ -296,6 +299,7 @@ void test_partial_no_selection_coll_md_read(void); void test_multi_chunk_io_addrmap_issue(void); void test_link_chunk_io_sort_chunk_issue(void); void test_collective_global_heap_write(void); +void test_coll_io_ind_md_write(void); void test_oflush(void); /* commonly used prototypes */ diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 55f2c2a576f..91c57c9cf08 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -7,7 +7,7 @@ add_subdirectory (lib) add_subdirectory (src) #-- Add the tests -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) # -------------------------------------------------------------------- diff --git a/tools/Makefile.am b/tools/Makefile.am index 7db4040d0eb..d0a6c5c5bc4 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -19,7 +19,7 @@ include $(top_srcdir)/config/commence.am if BUILD_TESTS_CONDITIONAL - TESTSERIAL_DIR =test + TESTSERIAL_DIR=libtest test else TESTSERIAL_DIR= endif diff --git a/tools/lib/h5diff.c b/tools/lib/h5diff.c index 924f9f35de1..15f2a1428bf 100644 --- a/tools/lib/h5diff.c +++ b/tools/lib/h5diff.c @@ -1485,9 +1485,6 @@ diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1, hid_t file2_id, } /* end else */ } /* end while */ - for (i = 1; (int)i < g_nTasks; i++) - MPI_Send(NULL, 0, MPI_BYTE, (int)i, MPI_TAG_END, MPI_COMM_WORLD); - /* Print any final data waiting in our queue */ print_incoming_data(); } /* end if */ diff --git a/tools/libtest/Makefile.am b/tools/libtest/Makefile.am index 835667c74b0..8a503d033b2 100644 --- a/tools/libtest/Makefile.am +++ b/tools/libtest/Makefile.am @@ -19,15 +19,15 @@ include $(top_srcdir)/config/commence.am -# Include src and tools/lib directories -AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib +# Include src, test, and tools/lib directories +AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test -I$(top_srcdir)/tools/lib -# All programs depend on the hdf5 and h5tools libraries -LDADD=$(LIBH5TOOLS) $(LIBHDF5) +# All programs depend on the hdf5, hdf5 test, and h5tools libraries +LDADD=$(LIBH5TOOLS) $(LIBH5TEST) $(LIBHDF5) # main target -bin_PROGRAMS=h5tools_test_utils +noinst_PROGRAMS=h5tools_test_utils # check_PROGRAMS=$(TEST_PROG) diff --git a/tools/src/h5diff/ph5diff_main.c b/tools/src/h5diff/ph5diff_main.c index 0f432610788..f90bd484ac8 100644 --- a/tools/src/h5diff/ph5diff_main.c +++ b/tools/src/h5diff/ph5diff_main.c @@ -127,7 +127,7 @@ ph5diff_worker(int nID) char filenames[2][MAX_FILENAME]; /* Retrieve filenames */ - MPI_Recv(filenames, MAX_FILENAME * 2, MPI_CHAR, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &Status); + MPI_Recv(filenames, MAX_FILENAME * 2, MPI_CHAR, 0, MPI_TAG_PARALLEL, MPI_COMM_WORLD, &Status); /* disable error reporting */ H5E_BEGIN_TRY @@ -173,7 +173,7 @@ ph5diff_worker(int nID) /* When get token, send all of our output to the manager task and then return the token */ for (i = 0; i < outBuffOffset; i += PRINT_DATA_MAX_SIZE) - MPI_Send(outBuff + i, PRINT_DATA_MAX_SIZE, MPI_BYTE, 0, MPI_TAG_PRINT_DATA, + MPI_Send(outBuff + i, PRINT_DATA_MAX_SIZE, MPI_CHAR, 0, MPI_TAG_PRINT_DATA, MPI_COMM_WORLD); /* An overflow file exists, so we send it's output to the manager too and then delete it */ @@ -188,7 +188,7 @@ ph5diff_worker(int nID) while ((tmp = getc(overflow_file)) >= 0) { *(out_data + i++) = (char)tmp; if (i == PRINT_DATA_MAX_SIZE) { - MPI_Send(out_data, PRINT_DATA_MAX_SIZE, MPI_BYTE, 0, MPI_TAG_PRINT_DATA, + MPI_Send(out_data, PRINT_DATA_MAX_SIZE, MPI_CHAR, 0, MPI_TAG_PRINT_DATA, MPI_COMM_WORLD); i = 0; memset(out_data, 0, PRINT_DATA_MAX_SIZE); @@ -196,7 +196,7 @@ ph5diff_worker(int nID) } if (i > 0) - MPI_Send(out_data, PRINT_DATA_MAX_SIZE, MPI_BYTE, 0, MPI_TAG_PRINT_DATA, + MPI_Send(out_data, PRINT_DATA_MAX_SIZE, MPI_CHAR, 0, MPI_TAG_PRINT_DATA, MPI_COMM_WORLD); fclose(overflow_file); diff --git a/tools/src/misc/h5clear.c b/tools/src/misc/h5clear.c index 31f7c1744f8..ea3e07249b1 100644 --- a/tools/src/misc/h5clear.c +++ b/tools/src/misc/h5clear.c @@ -61,6 +61,13 @@ static struct h5_long_options l_opts[] = { static void usage(const char *prog) { + fprintf(stdout, "h5clear clears superblock status flag field, removes metadata cache image, prints\n"); + fprintf(stdout, "EOA and EOF, or sets EOA of a file. It is not a general repair tool and should not\n"); + fprintf(stdout, "be used to fix file corruption. If a process doesn't shut down cleanly, the\n"); + fprintf(stdout, "superblock mark can be left that prevents opening a file without SWMR. Then,\n"); + fprintf(stdout, "h5clear can be used to remove this superblock mark so that the file can be inspected\n"); + fprintf(stdout, "and appropriate actions can be taken.\n"); + fprintf(stdout, "\n"); fprintf(stdout, "usage: %s [OPTIONS] file_name\n", prog); fprintf(stdout, " OPTIONS\n"); fprintf(stdout, " -h, --help Print a usage message and exit\n"); @@ -73,8 +80,8 @@ usage(const char *prog) fprintf(stdout, " C is >= 0; C is optional and will default to 1M when not set.\n"); fprintf(stdout, - " This option helps to repair a crashed file where the stored EOA\n"); - fprintf(stdout, " in the superblock is different from the actual EOF.\n"); + " This option helps to repair a crashed SWMR file when the stored\n"); + fprintf(stdout, " EOA in the superblock is different from the actual EOF.\n"); fprintf(stdout, " The file's EOA and EOF will be the same after applying\n"); fprintf(stdout, " this option to the file.\n"); fprintf(stdout, "\n"); diff --git a/tools/src/misc/h5repart.c b/tools/src/misc/h5repart.c index feb447f7e09..12b293273bc 100644 --- a/tools/src/misc/h5repart.c +++ b/tools/src/misc/h5repart.c @@ -227,6 +227,10 @@ main(int argc, char *argv[]) if (argno >= argc) usage(prog_name); src_gen_name = argv[argno++]; + if (!src_gen_name) { + fprintf(stderr, "invalid source file name pointer"); + exit(EXIT_FAILURE); + } snprintf(src_name, NAMELEN, src_gen_name, src_membno); src_is_family = strcmp(src_name, src_gen_name); @@ -249,6 +253,10 @@ main(int argc, char *argv[]) if (argno >= argc) usage(prog_name); dst_gen_name = argv[argno++]; + if (!dst_gen_name) { + fprintf(stderr, "invalid destination file name pointer"); + exit(EXIT_FAILURE); + } snprintf(dst_name, NAMELEN, dst_gen_name, dst_membno); dst_is_family = strcmp(dst_name, dst_gen_name); diff --git a/tools/test/h5copy/testh5copy.sh.in b/tools/test/h5copy/testh5copy.sh.in index f2c4a8e1237..916e2bec3e9 100644 --- a/tools/test/h5copy/testh5copy.sh.in +++ b/tools/test/h5copy/testh5copy.sh.in @@ -84,7 +84,7 @@ nerrors=0 verbose=yes h5haveexitcode=yes # default is yes -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR # RUNSERIAL is used. Check if it can return exit code from executalbe correctly. diff --git a/tools/test/h5diff/Makefile.am b/tools/test/h5diff/Makefile.am index b561d722275..f920afab74a 100644 --- a/tools/test/h5diff/Makefile.am +++ b/tools/test/h5diff/Makefile.am @@ -60,8 +60,7 @@ endif # Temporary files. *.h5 are generated by h5diff. They should # be copied to the testfiles/ directory if update is required -CHECK_CLEANFILES+=*.h5 expect_sorted actual_sorted - +CHECK_CLEANFILES+=*.h5 *.onion expect_sorted actual_sorted DISTCLEANFILES=testh5diff.sh h5diff_plugin.sh include $(top_srcdir)/config/conclude.am diff --git a/tools/test/h5diff/h5diff_plugin.sh.in b/tools/test/h5diff/h5diff_plugin.sh.in index 525503d68eb..f6783af8ff9 100644 --- a/tools/test/h5diff/h5diff_plugin.sh.in +++ b/tools/test/h5diff/h5diff_plugin.sh.in @@ -47,7 +47,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5DIFF_TESTFILES="$SRC_TOOLS/test/h5diff/testfiles" SRC_H5DIFF_OUTFILES="$SRC_TOOLS/test/h5diff/expected" -TESTDIR=./testplug +TESTDIR=./tmppl test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5diff/testh5diff.sh.in b/tools/test/h5diff/testh5diff.sh.in index efaac2e4082..1378f07cf34 100644 --- a/tools/test/h5diff/testh5diff.sh.in +++ b/tools/test/h5diff/testh5diff.sh.in @@ -49,7 +49,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5DIFF_OUTFILES="$SRC_TOOLS/test/h5diff/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5dump/Makefile.am b/tools/test/h5dump/Makefile.am index a79b0fe8b73..619647c670e 100644 --- a/tools/test/h5dump/Makefile.am +++ b/tools/test/h5dump/Makefile.am @@ -45,7 +45,7 @@ endif # Temporary files. *.h5 are generated by h5dumpgentest. They should # copied to the testfiles/ directory if update is required. -CHECK_CLEANFILES+=*.h5 *.bin +CHECK_CLEANFILES+=*.h5 *.bin *.onion DISTCLEANFILES=testh5dump.sh testh5dumppbits.sh testh5dumpxml.sh h5dump_plugin.sh include $(top_srcdir)/config/conclude.am diff --git a/tools/test/h5dump/h5dump_plugin.sh.in b/tools/test/h5dump/h5dump_plugin.sh.in index d080c1da6a8..c9e485d565f 100644 --- a/tools/test/h5dump/h5dump_plugin.sh.in +++ b/tools/test/h5dump/h5dump_plugin.sh.in @@ -48,7 +48,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5DUMP_TESTFILES="$SRC_TOOLS/test/h5dump/testfiles" SRC_H5DUMP_OUTFILES="$SRC_TOOLS/test/h5dump/expected" -TESTDIR=./testplug +TESTDIR=./tmppl test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5dump/testh5dump.sh.in b/tools/test/h5dump/testh5dump.sh.in index 8796aa91fa7..fdeb17d29aa 100644 --- a/tools/test/h5dump/testh5dump.sh.in +++ b/tools/test/h5dump/testh5dump.sh.in @@ -66,7 +66,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/std +TESTDIR=./tmp test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir -p $TESTDIR diff --git a/tools/test/h5dump/testh5dumppbits.sh.in b/tools/test/h5dump/testh5dumppbits.sh.in index 4094bfc8da7..e90cc86e2ee 100644 --- a/tools/test/h5dump/testh5dumppbits.sh.in +++ b/tools/test/h5dump/testh5dumppbits.sh.in @@ -58,7 +58,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/pbits +TESTDIR=./tmpbits test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir -p $TESTDIR @@ -187,7 +187,7 @@ CLEAN_TESTFILES_AND_TESTDIR() # skip rm if srcdir is same as destdir # this occurs when build/test performed in source dir and # make cp fail - SDIR=$SRC_H5DUMP_TESTFILES/pbits + SDIR=$SRC_H5DUMP_TESTFILES INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'` INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'` if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then diff --git a/tools/test/h5dump/testh5dumpvds.sh.in b/tools/test/h5dump/testh5dumpvds.sh.in index e09e429eb39..2bd38dc8a32 100644 --- a/tools/test/h5dump/testh5dumpvds.sh.in +++ b/tools/test/h5dump/testh5dumpvds.sh.in @@ -58,7 +58,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/vds +TESTDIR=./tmpvds test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir -p $TESTDIR @@ -166,7 +166,7 @@ CLEAN_TESTFILES_AND_TESTDIR() # skip rm if srcdir is same as destdir # this occurs when build/test performed in source dir and # make cp fail - SDIR=$SRC_H5DUMP_TESTFILES/vds + SDIR=$SRC_H5DUMP_TESTFILES INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'` INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'` if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then diff --git a/tools/test/h5dump/testh5dumpxml.sh.in b/tools/test/h5dump/testh5dumpxml.sh.in index b433fa210e3..880fc6be340 100644 --- a/tools/test/h5dump/testh5dumpxml.sh.in +++ b/tools/test/h5dump/testh5dumpxml.sh.in @@ -49,7 +49,7 @@ SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5DUMP_OUTFILES="$SRC_TOOLS/test/h5dump/expected" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/xml +TESTDIR=./tmpxml test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir -p $TESTDIR diff --git a/tools/test/h5format_convert/CMakeTests.cmake b/tools/test/h5format_convert/CMakeTests.cmake index 3046521a365..ab14a8c09a6 100644 --- a/tools/test/h5format_convert/CMakeTests.cmake +++ b/tools/test/h5format_convert/CMakeTests.cmake @@ -378,11 +378,11 @@ COMMAND "${CMAKE_COMMAND}" -D "TEST_EMULATOR=${CMAKE_CROSSCOMPILING_EMULATOR}" -D "TEST_PROGRAM=$" - -D "TEST_ARGS:STRING=-BH;./testfiles/${testname}-tmp.h5" - -D "TEST_FOLDER=${PROJECT_BINARY_DIR}" - -D "TEST_OUTPUT=testfiles/${testname}_chk.out" + -D "TEST_ARGS:STRING=-BH;${testname}-tmp.h5" + -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles" + -D "TEST_OUTPUT=${testname}_chk.out" -D "TEST_EXPECT=0" - -D "TEST_REFERENCE=testfiles/${testname}.ddl" + -D "TEST_REFERENCE=${testname}.ddl" -P "${HDF_RESOURCES_DIR}/runTest.cmake" ) set_tests_properties (H5FC_H5DUMP_CHECK-${testname}-dump PROPERTIES diff --git a/tools/test/h5format_convert/expected/h5fc_ext1_f.ddl b/tools/test/h5format_convert/expected/h5fc_ext1_f.ddl index f78891384db..98806a730c4 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext1_f.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext1_f.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext1_f-tmp.h5" { +HDF5 "h5fc_ext1_f-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext1_i.ddl b/tools/test/h5format_convert/expected/h5fc_ext1_i.ddl index 65640314077..03729100837 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext1_i.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext1_i.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext1_i-tmp.h5" { +HDF5 "h5fc_ext1_i-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext1_s.ddl b/tools/test/h5format_convert/expected/h5fc_ext1_s.ddl index 746de2b1d7d..f97ebf334f6 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext1_s.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext1_s.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext1_s-tmp.h5" { +HDF5 "h5fc_ext1_s-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext2_if.ddl b/tools/test/h5format_convert/expected/h5fc_ext2_if.ddl index 57781ecdada..003defcfdef 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext2_if.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext2_if.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext2_if-tmp.h5" { +HDF5 "h5fc_ext2_if-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext2_is.ddl b/tools/test/h5format_convert/expected/h5fc_ext2_is.ddl index 8fd061d5c78..6b5d0d624c3 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext2_is.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext2_is.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext2_is-tmp.h5" { +HDF5 "h5fc_ext2_is-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl b/tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl index 435ed464384..47e8c3bd0da 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext2_sf.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext2_sf-tmp.h5" { +HDF5 "h5fc_ext2_sf-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/h5fc_ext3_isf.ddl b/tools/test/h5format_convert/expected/h5fc_ext3_isf.ddl index 57a78d3398e..bd7058ab2ea 100644 --- a/tools/test/h5format_convert/expected/h5fc_ext3_isf.ddl +++ b/tools/test/h5format_convert/expected/h5fc_ext3_isf.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/h5fc_ext3_isf-tmp.h5" { +HDF5 "h5fc_ext3_isf-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext1_f.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext1_f.ddl index 45fa3fbd438..420191bc301 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext1_f.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext1_f.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext1_f-tmp.h5" { +HDF5 "old_h5fc_ext1_f-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext1_i.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext1_i.ddl index 67a71164ac8..1ea166339ca 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext1_i.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext1_i.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext1_i-tmp.h5" { +HDF5 "old_h5fc_ext1_i-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 1 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext1_s.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext1_s.ddl index 7f67d9f72a9..eeaece7af78 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext1_s.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext1_s.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext1_s-tmp.h5" { +HDF5 "old_h5fc_ext1_s-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext2_if.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext2_if.ddl index 350d3ba4eb3..40a7f69f34e 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext2_if.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext2_if.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext2_if-tmp.h5" { +HDF5 "old_h5fc_ext2_if-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext2_is.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext2_is.ddl index 6b2b2c366fa..d83042ae451 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext2_is.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext2_is.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext2_is-tmp.h5" { +HDF5 "old_h5fc_ext2_is-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext2_sf.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext2_sf.ddl index 4a038e381d9..4cc7b2f5f58 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext2_sf.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext2_sf.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext2_sf-tmp.h5" { +HDF5 "old_h5fc_ext2_sf-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/expected/old_h5fc_ext3_isf.ddl b/tools/test/h5format_convert/expected/old_h5fc_ext3_isf.ddl index 602627f4614..e3bb99437e6 100644 --- a/tools/test/h5format_convert/expected/old_h5fc_ext3_isf.ddl +++ b/tools/test/h5format_convert/expected/old_h5fc_ext3_isf.ddl @@ -1,4 +1,4 @@ -HDF5 "./testfiles/old_h5fc_ext3_isf-tmp.h5" { +HDF5 "old_h5fc_ext3_isf-tmp.h5" { SUPER_BLOCK { SUPERBLOCK_VERSION 2 FREELIST_VERSION 0 diff --git a/tools/test/h5format_convert/testh5fc.sh.in b/tools/test/h5format_convert/testh5fc.sh.in index 4ba46cbe88c..756156ebb02 100644 --- a/tools/test/h5format_convert/testh5fc.sh.in +++ b/tools/test/h5format_convert/testh5fc.sh.in @@ -62,7 +62,7 @@ SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5FORMCONV_TESTFILES="$SRC_TOOLS/test/h5format_convert/testfiles" SRC_H5FORMCONV_OUTFILES="$SRC_TOOLS/test/h5format_convert/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR # Copy the testfile to a temporary file for testing as h5format_convert is changing the file in place @@ -419,8 +419,15 @@ H5DUMP_CHECK() { actual="$TESTDIR/`basename $2 .ddl`.out" actual_err="$TESTDIR/`basename $2 .ddl`.err" testfile="`basename $2 .ddl`-tmp.h5" - $RUNSERIAL $H5DUMP_BIN -BH $TESTDIR/$testfile > $actual 2>$actual_err + # Run test. + ( + cd $TESTDIR + $RUNSERIAL $H5DUMP_BIN -BH $testfile + ) >$actual 2>$actual_err cat $actual_err >> $actual + cp $actual $actual_sav + cp $actual_err $actual_err_sav + STDERR_FILTER $actual_err # Compare output COMPARE_OUT $expect $actual diff --git a/tools/test/h5import/h5importtestutil.sh.in b/tools/test/h5import/h5importtestutil.sh.in index 65b899fc9c4..04582ee87c2 100644 --- a/tools/test/h5import/h5importtestutil.sh.in +++ b/tools/test/h5import/h5importtestutil.sh.in @@ -52,7 +52,7 @@ SRC_H5JAM_TESTFILES="$SRC_TOOLS/test/h5jam/testfiles" SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5jam/testh5jam.sh.in b/tools/test/h5jam/testh5jam.sh.in index 49598da07d0..ee34377047d 100644 --- a/tools/test/h5jam/testh5jam.sh.in +++ b/tools/test/h5jam/testh5jam.sh.in @@ -55,7 +55,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5JAM_OUTFILES="$SRC_TOOLS/test/h5jam/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5ls/h5ls_plugin.sh.in b/tools/test/h5ls/h5ls_plugin.sh.in index 3408876e0b0..8b606d6ff29 100644 --- a/tools/test/h5ls/h5ls_plugin.sh.in +++ b/tools/test/h5ls/h5ls_plugin.sh.in @@ -49,7 +49,7 @@ SRC_H5LS_TESTFILES="$SRC_TOOLS/test/h5ls/testfiles" SRC_H5DUMP_TESTFILES="$SRC_TOOLS/test/h5dump/testfiles" SRC_H5LS_OUTFILES="$SRC_TOOLS/test/h5ls/expected" -TESTDIR=./testplug +TESTDIR=./tmppl test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5ls/testh5ls.sh.in b/tools/test/h5ls/testh5ls.sh.in index fc6daab00ac..01f94024aa9 100644 --- a/tools/test/h5ls/testh5ls.sh.in +++ b/tools/test/h5ls/testh5ls.sh.in @@ -54,7 +54,7 @@ SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5LS_ERRFILES="$SRC_TOOLS/test/h5ls/errfiles" SRC_H5LS_OUTFILES="$SRC_TOOLS/test/h5ls/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5ls/testh5lsvds.sh.in b/tools/test/h5ls/testh5lsvds.sh.in index 2408ee29466..9038cf18ddd 100644 --- a/tools/test/h5ls/testh5lsvds.sh.in +++ b/tools/test/h5ls/testh5lsvds.sh.in @@ -51,7 +51,7 @@ SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5LS_OUTFILES="$SRC_TOOLS/test/h5ls/expected" TEST_P_DIR=./testfiles -TESTDIR=./testfiles/vds +TESTDIR=./tmpvds test -d $TEST_P_DIR || mkdir -p $TEST_P_DIR test -d $TESTDIR || mkdir $TESTDIR @@ -145,7 +145,7 @@ CLEAN_TESTFILES_AND_TESTDIR() # skip rm if srcdir is same as destdir # this occurs when build/test performed in source dir and # make cp fail - SDIR=$SRC_H5LS_TESTFILES/vds + SDIR=$SRC_H5LS_TESTFILES INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'` INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'` if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then diff --git a/tools/test/h5repack/h5repack.sh.in b/tools/test/h5repack/h5repack.sh.in index f3b8d3eb9b9..ac213f1f4ba 100644 --- a/tools/test/h5repack/h5repack.sh.in +++ b/tools/test/h5repack/h5repack.sh.in @@ -68,7 +68,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5REPACK_OUTFILES="$SRC_TOOLS/test/h5repack/expected" -TESTDIR=./testpack +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5repack/h5repack_plugin.sh.in b/tools/test/h5repack/h5repack_plugin.sh.in index 43be1ee1c7b..a39cd7b967f 100644 --- a/tools/test/h5repack/h5repack_plugin.sh.in +++ b/tools/test/h5repack/h5repack_plugin.sh.in @@ -49,7 +49,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5REPACK_TESTFILES="$SRC_TOOLS/test/h5repack/testfiles" SRC_H5REPACK_OUTFILES="$SRC_TOOLS/test/h5repack/expected" -TESTDIR=testplug +TESTDIR=./tmppl test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/h5stat/testh5stat.sh.in b/tools/test/h5stat/testh5stat.sh.in index 7ce0ad495e3..1d732c600ae 100644 --- a/tools/test/h5stat/testh5stat.sh.in +++ b/tools/test/h5stat/testh5stat.sh.in @@ -51,7 +51,7 @@ SRC_H5STAT_TESTFILES="$SRC_TOOLS/test/h5stat/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/test/h5import/testfiles" SRC_H5STAT_OUTFILES="$SRC_TOOLS/test/h5stat/expected" -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR ###################################################################### diff --git a/tools/test/misc/expected/h5clear_missing_file.ddl b/tools/test/misc/expected/h5clear_missing_file.ddl index c7a21189ea7..fe659af9593 100644 --- a/tools/test/misc/expected/h5clear_missing_file.ddl +++ b/tools/test/misc/expected/h5clear_missing_file.ddl @@ -1,3 +1,10 @@ +h5clear clears superblock status flag field, removes metadata cache image, prints +EOA and EOF, or sets EOA of a file. It is not a general repair tool and should not +be used to fix file corruption. If a process doesn't shut down cleanly, the +superblock mark can be left that prevents opening a file without SWMR. Then, +h5clear can be used to remove this superblock mark so that the file can be inspected +and appropriate actions can be taken. + usage: h5clear [OPTIONS] file_name OPTIONS -h, --help Print a usage message and exit @@ -8,8 +15,8 @@ usage: h5clear [OPTIONS] file_name --increment=C Set the file's EOA to the maximum of (EOA, EOF) + C for the file . C is >= 0; C is optional and will default to 1M when not set. - This option helps to repair a crashed file where the stored EOA - in the superblock is different from the actual EOF. + This option helps to repair a crashed SWMR file when the stored + EOA in the superblock is different from the actual EOF. The file's EOA and EOF will be the same after applying this option to the file. diff --git a/tools/test/misc/expected/h5clear_usage.ddl b/tools/test/misc/expected/h5clear_usage.ddl index c7a21189ea7..fe659af9593 100644 --- a/tools/test/misc/expected/h5clear_usage.ddl +++ b/tools/test/misc/expected/h5clear_usage.ddl @@ -1,3 +1,10 @@ +h5clear clears superblock status flag field, removes metadata cache image, prints +EOA and EOF, or sets EOA of a file. It is not a general repair tool and should not +be used to fix file corruption. If a process doesn't shut down cleanly, the +superblock mark can be left that prevents opening a file without SWMR. Then, +h5clear can be used to remove this superblock mark so that the file can be inspected +and appropriate actions can be taken. + usage: h5clear [OPTIONS] file_name OPTIONS -h, --help Print a usage message and exit @@ -8,8 +15,8 @@ usage: h5clear [OPTIONS] file_name --increment=C Set the file's EOA to the maximum of (EOA, EOF) + C for the file . C is >= 0; C is optional and will default to 1M when not set. - This option helps to repair a crashed file where the stored EOA - in the superblock is different from the actual EOF. + This option helps to repair a crashed SWMR file when the stored + EOA in the superblock is different from the actual EOF. The file's EOA and EOF will be the same after applying this option to the file. diff --git a/tools/test/misc/talign.c b/tools/test/misc/talign.c index 2387be4b670..7de9d1afe5c 100644 --- a/tools/test/misc/talign.c +++ b/tools/test/misc/talign.c @@ -179,7 +179,7 @@ main(void) " %6f = %f\n", (double)fok[0], (double)fptr[0], (double)fok[1], (double)fptr[1], (double)fnok[0], (double)fptr[2], (double)fnok[1], (double)fptr[3]); - puts("*FAILED - compound type alignmnent problem*"); + puts("*FAILED - compound type alignment problem*"); } else { puts(" PASSED"); diff --git a/tools/test/misc/testh5clear.sh.in b/tools/test/misc/testh5clear.sh.in index b5bf5cc3d83..2306d589938 100644 --- a/tools/test/misc/testh5clear.sh.in +++ b/tools/test/misc/testh5clear.sh.in @@ -44,7 +44,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5CLEAR_TESTFILES="$SRC_TOOLS/test/misc/testfiles" SRC_H5CLEAR_OUTFILES="$SRC_TOOLS/test/misc/expected" -TESTDIR=./testh5clear +TESTDIR=./tmpclr test -d $TESTDIR || mkdir -p $TESTDIR ###################################################################### diff --git a/tools/test/misc/testh5mkgrp.sh.in b/tools/test/misc/testh5mkgrp.sh.in index 676f6b04e55..297f89f9f19 100644 --- a/tools/test/misc/testh5mkgrp.sh.in +++ b/tools/test/misc/testh5mkgrp.sh.in @@ -43,7 +43,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5MKGRP_TESTFILES="$SRC_TOOLS/test/misc/testfiles" SRC_H5MKGRP_OUTFILES="$SRC_TOOLS/test/misc/expected" -TESTDIR=./testgrp +TESTDIR=./tmpmkg test -d $TESTDIR || mkdir -p $TESTDIR ###################################################################### diff --git a/tools/test/misc/testh5repart.sh.in b/tools/test/misc/testh5repart.sh.in index addd0db9e22..e101b088067 100644 --- a/tools/test/misc/testh5repart.sh.in +++ b/tools/test/misc/testh5repart.sh.in @@ -40,7 +40,7 @@ SRC_TOOLS="$srcdir/../.." SRC_H5REPART_TESTFILES="$SRC_TOOLS/test/misc/testfiles" -TESTDIR=./testrepart +TESTDIR=./tmprp test -d $TESTDIR || mkdir -p $TESTDIR # diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 718f88e1907..b7e4630c07e 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required (VERSION 3.18) project (HDF5_UTILS C) -if (BUILD_TESTING) +if (NOT HDF5_EXTERNALLY_CONFIGURED AND BUILD_TESTING) add_subdirectory (test) endif () diff --git a/utils/subfiling_vfd/h5fuse.sh.in b/utils/subfiling_vfd/h5fuse.sh.in index 360e2741b21..a4c6a053cc0 100755 --- a/utils/subfiling_vfd/h5fuse.sh.in +++ b/utils/subfiling_vfd/h5fuse.sh.in @@ -13,7 +13,6 @@ BLD='\033[1m' GRN='\033[0;32m' RED='\033[0;31m' -PUR='\033[0;35m' CYN='\033[0;36m' NC='\033[0m' # No Color @@ -27,11 +26,11 @@ function usage { configuration file either as a command-line argument or the script will search for the *.config file in the current directory." echo "" - echo "usage: h5fuse.sh [-f filename] [-h] [-p] [-q] [-r] [-v] " + echo "usage: h5fuse [-f filename] [-h] [-p] [-q] [-r] [-v] " echo "-f filename Subfile configuration file." echo "-h Print this help." echo "-q Quiet all output. [no]" - echo "-p h5fuse.sh is being run in parallel, with more than one rank. [no]" + echo "-p h5fuse is being run in parallel, with more than one rank. [no]" echo "-r Remove subfiles after being processed. [no]" echo "-v Verbose output. [no]" echo "" @@ -58,6 +57,106 @@ EOL } +function fuse { + +# function for fusing the files + +mpi_rank=0 +mpi_size=1 +nstart=1 +nend=$nsubfiles + +if [ "$parallel" == "true" ]; then + + hex=$(hexdump -n 16 -v -e '/1 "%02X"' /dev/urandom) + c_exec="h5fuse_"${hex} + c_src=${c_exec}.c + + # Generate and compile an MPI program to get MPI rank and size + if [ ! -f "${c_src}" ]; then + gen_mpi + CC=@CC@ + ${CC} "${c_src}" -o "${c_exec}" + fi + wait + rank_size=$(./"${c_exec}") + read -r mpi_rank mpi_size <<<"$rank_size" + + rm -f "${c_src}" "${c_exec}" + + # Divide the subfiles among the ranks + iwork1=$(( nsubfiles / mpi_size )) + iwork2=$(( nsubfiles % mpi_size )) + min=$(( mpi_rank < iwork2 ? mpi_rank : iwork2 )) + nstart=$(( mpi_rank * iwork1 + 1 + min )) + nend=$(( nstart + iwork1 - 1 )) + if [ $iwork2 -gt "$mpi_rank" ]; then + nend=$(( nend + 1 )) + fi +fi + +############################################################ +# COMBINE SUBFILES INTO AN HDF5 FILE # +############################################################ +icnt=1 +skip=0 +seek=0 +seek_cnt=0 +for i in "${subfiles[@]}"; do + + subfile="${subfile_dir}/${i}" + + # bs=BYTES read and write up to BYTES bytes at a time; overrides ibs and obs + # ibs=BYTES read up to BYTES bytes at a time + # obs=BYTES write BYTES bytes at a time + # seek=N skip N obs-sized blocks at start of output + # skip=N skip N ibs-sized blocks at start of input + + status=1 + fsize=${subfiles_size[icnt-1]} + if [ "$fsize" -eq "0" ]; then + seek_cnt=$((seek_cnt+1)) + seek=$seek_cnt + if [ "$rm_subf" == "true" ]; then + if [ -f "${subfile}" ]; then + \rm -f "$subfile" + fi + fi + else + if [ $icnt -ge "$nstart" ] && [ $icnt -le "$nend" ]; then + records_left=$fsize + while [ "$status" -gt 0 ]; do + if [ $((skip*stripe_size)) -le "$fsize" ] && [ "$records_left" -gt 0 ]; then + EXEC="dd count=1 bs=$stripe_size if=$subfile of=$hdf5_file skip=$skip seek=$seek conv=notrunc" + if [ "$verbose" == "true" ]; then + echo -e "$GRN $EXEC $NC" + fi + err=$( $EXEC 2>&1 1>/dev/null ) + if [ $? -ne 0 ]; then + echo -e "$CYN ERR: dd Utility Failed $NC" + echo -e "$CYN MSG: $err $NC" + exit $FAILED + fi + records_left=$((records_left-stripe_size)) + skip=$((skip+1)) + seek=$((seek_cnt+skip*nsubfiles)) + else + status=0 + skip=0 + fi + done; wait + if [ "$rm_subf" == "true" ]; then + \rm -f "$subfile" + fi + fi + seek_cnt=$((seek_cnt+1)) + seek=$seek_cnt + fi + icnt=$(( icnt +1 )) +done; wait + +} + ############################################################ ############################################################ # Main program # @@ -172,104 +271,9 @@ for i in "${subfiles[@]}"; do fi done -START="$(date +%s%N)" - -mpi_rank=0 -mpi_size=1 -nstart=1 -nend=$nsubfiles - -if [ "$parallel" == "true" ]; then - - hex=$(hexdump -n 16 -v -e '/1 "%02X"' /dev/urandom) - c_exec="h5fuse_"${hex} - c_src=${c_exec}.c - - # Generate and compile an MPI program to get MPI rank and size - if [ ! -f "${c_src}" ]; then - gen_mpi - CC=@CC@ - ${CC} "${c_src}" -o "${c_exec}" - fi - wait - rank_size=$(./"${c_exec}") - read -r mpi_rank mpi_size <<<"$rank_size" - - rm -f "${c_src}" "${c_exec}" - - # Divide the subfiles among the ranks - iwork1=$(( nsubfiles / mpi_size )) - iwork2=$(( nsubfiles % mpi_size )) - min=$(( mpi_rank < iwork2 ? mpi_rank : iwork2 )) - nstart=$(( mpi_rank * iwork1 + 1 + min )) - nend=$(( nstart + iwork1 - 1 )) - if [ $iwork2 -gt "$mpi_rank" ]; then - nend=$(( nend + 1 )) - fi -fi - -############################################################ -# COMBINE SUBFILES INTO AN HDF5 FILE # -############################################################ -icnt=1 -skip=0 -seek=0 -seek_cnt=0 -for i in "${subfiles[@]}"; do - - subfile="${subfile_dir}/${i}" - - # bs=BYTES read and write up to BYTES bytes at a time; overrides ibs and obs - # ibs=BYTES read up to BYTES bytes at a time - # obs=BYTES write BYTES bytes at a time - # seek=N skip N obs-sized blocks at start of output - # skip=N skip N ibs-sized blocks at start of input - - status=1 - fsize=${subfiles_size[icnt-1]} - if [ "$fsize" -eq "0" ]; then - seek_cnt=$((seek_cnt+1)) - seek=$seek_cnt - if [ "$rm_subf" == "true" ]; then - if [ -f "${subfile}" ]; then - \rm -f "$subfile" - fi - fi - else - if [ $icnt -ge "$nstart" ] && [ $icnt -le "$nend" ]; then - records_left=$fsize - while [ "$status" -gt 0 ]; do - if [ $((skip*stripe_size)) -le "$fsize" ] && [ "$records_left" -gt 0 ]; then - EXEC="dd count=1 bs=$stripe_size if=$subfile of=$hdf5_file skip=$skip seek=$seek conv=notrunc" - if [ "$verbose" == "true" ]; then - echo -e "$GRN $EXEC $NC" - fi - err=$( $EXEC 2>&1 1>/dev/null ) - if [ $? -ne 0 ]; then - echo -e "$CYN ERR: dd Utility Failed $NC" - echo -e "$CYN MSG: $err $NC" - exit $FAILED - fi - records_left=$((records_left-stripe_size)) - skip=$((skip+1)) - seek=$((seek_cnt+skip*nsubfiles)) - else - status=0 - skip=0 - fi - done; wait - if [ "$rm_subf" == "true" ]; then - \rm -f "$subfile" - fi - fi - seek_cnt=$((seek_cnt+1)) - seek=$seek_cnt - fi - icnt=$(( icnt +1 )) -done; wait - -END=$(( $(date +%s%N) - START )) -DURATION_SEC=$(awk -vp="$END" -vq=0.000000001 'BEGIN{printf "%.4f" ,p * q}') if [ "$quiet" == "false" ]; then - echo -e "$PUR COMPLETION TIME = $DURATION_SEC s $NC" -fi \ No newline at end of file + TIMEFORMAT="COMPLETION TIME = %R s" + time fuse +else + fuse +fi diff --git a/utils/tools/test/h5dwalk/copy_demo_files.sh.in b/utils/tools/test/h5dwalk/copy_demo_files.sh.in index 02df202ccf9..8ccc5e8b678 100644 --- a/utils/tools/test/h5dwalk/copy_demo_files.sh.in +++ b/utils/tools/test/h5dwalk/copy_demo_files.sh.in @@ -30,7 +30,7 @@ exit_code=$EXIT_SUCCESS # Add Testing files into the local testfiles directory:: -TESTDIR=./testfiles +TESTDIR=./tmp test -d $TESTDIR || mkdir $TESTDIR echo "HDF5 \"$THIS_DIR/testfiles/h5diff_basic1.h5\" {" > "$THIS_DIR"/testfiles/h5diff_basic1.h5_h5dump.txt diff --git a/utils/tools/test/h5dwalk/testh5dwalk.sh.in b/utils/tools/test/h5dwalk/testh5dwalk.sh.in index a123f8d98cb..694dad01e3b 100644 --- a/utils/tools/test/h5dwalk/testh5dwalk.sh.in +++ b/utils/tools/test/h5dwalk/testh5dwalk.sh.in @@ -68,7 +68,7 @@ SRC_H5JAM_TESTFILES="$SRC_TOOLS/h5jam/testfiles" SRC_H5DWALK_TESTFILES="$SRC_TOOLS/h5dwalk/testfiles" SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/h5import/testfiles" -TESTDIR=./testfiles +TESTDIR=./tmpdw test -d $TESTDIR || mkdir $TESTDIR echo "SRC_H5DIFF_TESTFILES = $SRC_H5DIFF_TESTFILES"