diff --git a/.github/workflows/autotools.yml b/.github/workflows/autotools.yml index 1a2a49fc4bd..a7ded3235d1 100644 --- a/.github/workflows/autotools.yml +++ b/.github/workflows/autotools.yml @@ -83,4 +83,10 @@ jobs: with: build_mode: "production" + call-release-cmake-julia: + name: "Autotools Julia Workflows" + uses: ./.github/workflows/julia-auto.yml + with: + build_mode: "production" + \ No newline at end of file diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml index 8769e86107c..82b185e4e1b 100644 --- a/.github/workflows/cmake.yml +++ b/.github/workflows/cmake.yml @@ -85,4 +85,10 @@ jobs: uses: ./.github/workflows/cygwin-cmake.yml with: build_mode: "Release" + + call-release-cmake-julia: + name: "CMake Julia Workflows" + uses: ./.github/workflows/julia-cmake.yml + with: + build_mode: "Release" \ No newline at end of file diff --git a/.github/workflows/cygwin-auto.yml b/.github/workflows/cygwin-auto.yml index 64a35e41be7..e027f296bee 100644 --- a/.github/workflows/cygwin-auto.yml +++ b/.github/workflows/cygwin-auto.yml @@ -15,6 +15,7 @@ jobs: cygwin_build_and_test: name: "cygwin ${{ inputs.build_mode }}" runs-on: windows-latest + timeout-minutes: 30 steps: - name: Set git to use LF run: | diff --git a/.github/workflows/cygwin-cmake.yml b/.github/workflows/cygwin-cmake.yml index 8fe586ee12c..5b1416b07aa 100644 --- a/.github/workflows/cygwin-cmake.yml +++ b/.github/workflows/cygwin-cmake.yml @@ -15,6 +15,7 @@ jobs: cygwin_build_and_test: name: "cygwin-${{ inputs.build_mode }}" runs-on: windows-latest + timeout-minutes: 30 steps: - name: Set git to use LF run: | diff --git a/.github/workflows/julia-auto.yml b/.github/workflows/julia-auto.yml new file mode 100644 index 00000000000..9c13990edf6 --- /dev/null +++ b/.github/workflows/julia-auto.yml @@ -0,0 +1,79 @@ +name: hdf5 1.14 autotools julia + +on: + workflow_call: + inputs: + build_mode: + description: "release vs. debug build" + required: true + type: string + +permissions: + contents: read + +jobs: + julia_build_and_test: + name: "julia ${{ inputs.build_mode }}" + runs-on: ubuntu-latest + steps: + - name: Get Sources + uses: actions/checkout@v4.1.1 + + - name: Install Dependencies + shell: bash + run: | + sudo apt-get update + sudo apt-get install autoconf automake libtool libtool-bin libaec-dev + sudo apt-get install doxygen graphviz + sudo apt install -y zlib1g-dev libcurl4-openssl-dev libjpeg-dev wget curl bzip2 + sudo apt install -y m4 flex bison cmake libzip-dev openssl build-essential + + - name: Autotools Configure + shell: bash + run: | + sh ./autogen.sh + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + $GITHUB_WORKSPACE/configure \ + --enable-build-mode=${{ inputs.build_mode }} \ + --disable-fortran \ + --enable-shared \ + --disable-parallel \ + --prefix=/tmp + + - name: Autotools Build + shell: bash + run: | + make -j3 + working-directory: ${{ runner.workspace }}/build + + - name: Install HDF5 + shell: bash + run: | + make install + working-directory: ${{ runner.workspace }}/build + + - name: Install julia + uses: julia-actions/setup-julia@latest + with: + version: '1.6' + arch: 'x64' + + - name: Get julia hdf5 source + uses: actions/checkout@v4.1.1 + with: + repository: JuliaIO/HDF5.jl + path: . + + - name: Generate LocalPreferences + run: | + echo '[HDF5]' >> LocalPreferences.toml + echo 'libhdf5 = "/tmp/lib/libhdf5.so"' >> LocalPreferences.toml + echo 'libhdf5_hl = "/tmp/lib/libhdf5_hl.so"' >> LocalPreferences.toml + + - uses: julia-actions/julia-buildpkg@latest + + - name: Julia Run Tests + uses: julia-actions/julia-runtest@latest + env: + JULIA_DEBUG: Main diff --git a/.github/workflows/julia-cmake.yml b/.github/workflows/julia-cmake.yml new file mode 100644 index 00000000000..96170b3aec6 --- /dev/null +++ b/.github/workflows/julia-cmake.yml @@ -0,0 +1,82 @@ +name: hdf5 1.14 CMake julia + +on: + workflow_call: + inputs: + build_mode: + description: "release vs. debug build" + required: true + type: string + +permissions: + contents: read + +jobs: + julia_build_and_test: + name: "julia ${{ inputs.build_mode }}" + runs-on: ubuntu-latest + steps: + - name: Get Sources + uses: actions/checkout@v4.1.1 + + - name: Install Dependencies + shell: bash + run: | + sudo apt update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt install -y libaec-dev zlib1g-dev wget curl bzip2 flex bison cmake libzip-dev openssl build-essential + + - name: CMake Configure + shell: bash + run: | + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja \ + -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ + -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF \ + -DHDF5_ENABLE_PARALLEL:BOOL=OFF \ + -DHDF5_BUILD_CPP_LIB:BOOL=OFF \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ + -DHDF5_BUILD_FORTRAN:BOOL=OFF \ + -DHDF5_BUILD_JAVA:BOOL=OFF \ + -DCMAKE_INSTALL_PREFIX=/tmp \ + $GITHUB_WORKSPACE + + - name: CMake Build + shell: bash + run: | + cmake --build . --parallel 3 --config ${{ inputs.build_mode }} + working-directory: ${{ runner.workspace }}/build + + - name: Install HDF5 + shell: bash + run: | + cmake --install . + working-directory: ${{ runner.workspace }}/build + + - name: Install julia + uses: julia-actions/setup-julia@latest + with: + version: '1.6' + arch: 'x64' + + - name: Get julia hdf5 source + uses: actions/checkout@v4.1.1 + with: + repository: JuliaIO/HDF5.jl + path: . + + - name: Generate LocalPreferences + run: | + echo '[HDF5]' >> LocalPreferences.toml + echo 'libhdf5 = "/tmp/lib/libhdf5.so"' >> LocalPreferences.toml + echo 'libhdf5_hl = "/tmp/lib/libhdf5_hl.so"' >> LocalPreferences.toml + + - uses: julia-actions/julia-buildpkg@latest + + - name: Julia Run Tests + uses: julia-actions/julia-runtest@latest + env: + JULIA_DEBUG: Main diff --git a/.github/workflows/lin-auto-jl.yml b/.github/workflows/lin-auto-jl.yml deleted file mode 100644 index 4f4e3203f1a..00000000000 --- a/.github/workflows/lin-auto-jl.yml +++ /dev/null @@ -1,58 +0,0 @@ -name: lin auto jl - -on: - workflow_dispatch: - push: - pull_request: - branches: [ hdf5_1_14 ] - paths-ignore: - - '.github/CODEOWNERS' - - '.github/FUNDING.yml' - - 'doc/**' - - 'release_docs/**' - - 'ACKNOWLEDGEMENTS' - - 'COPYING**' - - '**.md' - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - Julia: - name: Julia - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - path: hdf5 - - name: Install HDF5 - run: | - cd hdf5 - ./autogen.sh - ./configure --prefix=/tmp - make -j - make install - - - uses: julia-actions/setup-julia@latest - with: - version: '1.6' - arch: 'x64' - - uses: actions/checkout@v4 - with: - repository: JuliaIO/HDF5.jl - path: . - - - name: Generate LocalPreferences - run: | - echo '[HDF5]' >> LocalPreferences.toml - echo 'libhdf5 = "/tmp/lib/libhdf5.so"' >> LocalPreferences.toml - echo 'libhdf5_hl = "/tmp/lib/libhdf5_hl.so"' >> LocalPreferences.toml - - - uses: julia-actions/julia-buildpkg@latest - - uses: julia-actions/julia-runtest@latest - env: - JULIA_DEBUG: Main diff --git a/.github/workflows/lin-jl.yml b/.github/workflows/lin-jl.yml deleted file mode 100644 index a34cfc0f25e..00000000000 --- a/.github/workflows/lin-jl.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: lin jl - -on: - workflow_dispatch: - push: - pull_request: - branches: [ hdf5_1_14 ] - paths-ignore: - - '.github/CODEOWNERS' - - '.github/FUNDING.yml' - - 'doc/**' - - 'release_docs/**' - - 'ACKNOWLEDGEMENTS' - - 'COPYING**' - - '**.md' - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - Julia: - name: Julia - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - path: hdf5 - - name: Install HDF5 - run: | - cd hdf5 - mkdir build - cd build - cmake -DCMAKE_INSTALL_PREFIX=/tmp .. - make -j - make install - cd .. - - - uses: julia-actions/setup-julia@latest - with: - version: '1.6' - arch: 'x64' - - uses: actions/checkout@v4 - with: - repository: JuliaIO/HDF5.jl - path: . - - - name: Generate LocalPreferences - run: | - echo '[HDF5]' >> LocalPreferences.toml - echo 'libhdf5 = "/tmp/lib/libhdf5.so"' >> LocalPreferences.toml - echo 'libhdf5_hl = "/tmp/lib/libhdf5_hl.so"' >> LocalPreferences.toml - - - uses: julia-actions/julia-buildpkg@latest - - uses: julia-actions/julia-runtest@latest - env: - JULIA_DEBUG: Main diff --git a/.github/workflows/main-auto-spc.yml b/.github/workflows/main-auto-spc.yml index 33cd0673f92..5a129be5ee4 100644 --- a/.github/workflows/main-auto-spc.yml +++ b/.github/workflows/main-auto-spc.yml @@ -417,8 +417,10 @@ jobs: sh ./autogen.sh mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" - CFLAGS=-Werror $GITHUB_WORKSPACE/configure \ + CFLAGS=-Werror JAVACFLAGS=-Werror JNIFLAGS=-Werror \ + $GITHUB_WORKSPACE/configure \ --enable-build-mode=debug \ + --enable-warnings-as-errors \ --enable-deprecated-symbols \ --with-default-api-version=v114 \ --enable-shared \ @@ -478,8 +480,10 @@ jobs: sh ./autogen.sh mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" - CFLAGS=-Werror $GITHUB_WORKSPACE/configure \ + CFLAGS=-Werror JAVACFLAGS=-Werror JNIFLAGS=-Werror \ + $GITHUB_WORKSPACE/configure \ --enable-build-mode=production \ + --enable-warnings-as-errors \ --enable-deprecated-symbols \ --with-default-api-version=v114 \ --enable-shared \ diff --git a/.github/workflows/main-cmake.yml b/.github/workflows/main-cmake.yml index f4978e81718..aac5bc4a240 100644 --- a/.github/workflows/main-cmake.yml +++ b/.github/workflows/main-cmake.yml @@ -135,10 +135,6 @@ jobs: # CMake gets libaec from fetchcontent - - name: Install Dependencies (Windows) - run: choco install ninja - if: matrix.os == 'windows-latest' - - name: Install Dependencies (macOS) run: brew install ninja if: matrix.os == 'macos-13' diff --git a/.github/workflows/mingw-cmake.yml b/.github/workflows/mingw-cmake.yml deleted file mode 100644 index e97c1f8e92c..00000000000 --- a/.github/workflows/mingw-cmake.yml +++ /dev/null @@ -1,91 +0,0 @@ -name: hdf5 1.14 CMake MinGW - -on: - workflow_call: - inputs: - build_mode: - description: "release vs. debug build" - required: true - type: string - shared: - description: "shared true/false" - required: true - type: string - netcdf: - description: "netcdf true/false" - required: true - type: string - -permissions: - contents: read - -jobs: - mingw_build_and_test: - name: "mingw-${{ inputs.build_mode }}-NC=${{ inputs.netcdf }}" - if: "!contains(github.event.head_commit.message, 'skip-ci')" - runs-on: ubuntu-latest - steps: - - name: Get Sources - uses: actions/checkout@v4.1.1 - - - name: Install Dependencies - shell: bash - run: | - sudo apt update - sudo apt-get install -y ninja-build libtirpc-dev graphviz - - - name: Install MinGW - uses: egor-tensin/setup-mingw@v2 - with: - platform: x64 - - - name: Install Doxygen - uses: ssciwr/doxygen-install@v1 - with: - version: "1.9.7" - - - name: CMake Configure - shell: bash - run: | - mkdir "${{ runner.workspace }}/build" - cd "${{ runner.workspace }}/build" - cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \ - -G Ninja \ - --log-level=VERBOSE \ - -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ - -DCMAKE_TOOLCHAIN_FILE=$GITHUB_WORKSPACE/config/toolchain/mingw64.cmake \ - -DBUILD_SHARED_LIBS:BOOL=${{ inputs.shared }} \ - -DHDF4_BUILD_EXAMPLES:BOOL=ON \ - -DBUILD_JPEG_WITH_PIC:BOOL=ON \ - -DHDF4_ENABLE_NETCDF:BOOL=${{ inputs.netcdf }} \ - -DHDF4_BUILD_FORTRAN:BOOL=OFF \ - -DHDF4_BUILD_JAVA:BOOL=OFF \ - -DHDF4_BUILD_DOC:BOOL=ON \ - -DJPEG_USE_LOCALCONTENT:BOOL=OFF \ - -DLIBAEC_USE_LOCALCONTENT:BOOL=OFF \ - -DZLIB_USE_LOCALCONTENT:BOOL=OFF \ - -DHDF4_PACK_EXAMPLES:BOOL=ON \ - -DHDF4_PACKAGE_EXTLIBS:BOOL=ON \ - $GITHUB_WORKSPACE - - - name: CMake Build - shell: bash - run: | - cmake --build . --parallel 3 --config ${{ inputs.build_mode }} - working-directory: ${{ runner.workspace }}/build - - - name: CMake Run Tests - shell: bash - run: | - ctest . --parallel 2 -C ${{ inputs.build_mode }} -V - if: false - - - name: CMake Package - shell: bash - run: | - cpack -C ${{ inputs.build_mode }} -V - working-directory: ${{ runner.workspace }}/build - - - name: List files in the space - run: | - ls -l ${{ runner.workspace }}/build diff --git a/CMakeLists.txt b/CMakeLists.txt index f81eb594b37..3b5a556a7a1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1064,8 +1064,6 @@ set (H5_FC_FUNC_ "H5_FC_FUNC_(name,NAME) name ## _") if (EXISTS "${HDF5_SOURCE_DIR}/fortran" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/fortran") option (HDF5_BUILD_FORTRAN "Build FORTRAN support" OFF) if (HDF5_BUILD_FORTRAN) - include (${HDF_RESOURCES_DIR}/HDFUseFortran.cmake) - message (VERBOSE "Fortran compiler ID is ${CMAKE_Fortran_COMPILER_ID}") include (${HDF_RESOURCES_DIR}/HDFFortranCompilerFlags.cmake) include (${HDF_RESOURCES_DIR}/HDF5UseFortran.cmake) @@ -1139,16 +1137,6 @@ if (EXISTS "${HDF5_SOURCE_DIR}/c++" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/c++") endif () endif () -#----------------------------------------------------------------------------- -# Check if Fortran's default real is double precision. If it is and HL is -# being built then configure should fail due to bug HDFFV-889. -#----------------------------------------------------------------------------- -if (HDF5_BUILD_FORTRAN AND HDF5_BUILD_HL_LIB) - if (NOT H5_FORTRAN_DEFAULT_REAL_NOT_DOUBLE) - message (FATAL_ERROR " **** Fortran high-level routines are not supported when the default REAL is DOUBLE PRECISION, use HDF5_BUILD_HL_LIB:BOOL=OFF **** ") - endif () -endif () - #----------------------------------------------------------------------------- # Option to build HDF5 Java Library #----------------------------------------------------------------------------- diff --git a/COPYING b/COPYING index 9bd04025250..fd50633d9e7 100644 --- a/COPYING +++ b/COPYING @@ -49,7 +49,7 @@ works thereof, in binary and source code form. Limited portions of HDF5 were developed by Lawrence Berkeley National Laboratory (LBNL). LBNL's Copyright Notice and Licensing Terms can be found here: COPYING_LBNL_HDF5 file in this directory or at -https://raw.githubusercontent.com/hdfgroup/hdf5/develop/COPYING_LBNL_HDF5. +https://raw.githubusercontent.com/hdfgroup/hdf5/hdf5_1_14/COPYING_LBNL_HDF5. ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- diff --git a/HDF5Examples/C/H5T/CMakeLists.txt b/HDF5Examples/C/H5T/CMakeLists.txt index 8c4084f1a1b..74dead06c53 100644 --- a/HDF5Examples/C/H5T/CMakeLists.txt +++ b/HDF5Examples/C/H5T/CMakeLists.txt @@ -115,7 +115,7 @@ if (HDF5_BUILD_TOOLS) if (NOT ${example_name} STREQUAL "h5ex_t_convert") if (${example_name} STREQUAL "h5ex_t_vlen" OR ${example_name} STREQUAL "h5ex_t_vlenatt") if (HDF5_VERSION_STRING VERSION_GREATER_EQUAL "1.14.3") - if (${H5_LIBVER_DIR} EQUAL 16 AND ${example_name} STREQUAL "h5ex_t_vlenatt") + if ((${EXAMPLE_VARNAME}_USE_16_API OR ${H5_LIBVER_DIR} EQUAL 16) AND ${example_name} STREQUAL "h5ex_t_vlenatt") add_custom_command ( TARGET ${EXAMPLE_VARNAME}_${example_name} POST_BUILD @@ -130,7 +130,7 @@ if (HDF5_BUILD_TOOLS) ARGS -E copy_if_different ${PROJECT_SOURCE_DIR}/tfiles/114/${example_name}.ddl ${example_name}.ddl ) endif () - elseif (${H5_LIBVER_DIR} EQUAL 16) + elseif (${EXAMPLE_VARNAME}_USE_16_API OR ${H5_LIBVER_DIR} EQUAL 16) add_custom_command ( TARGET ${EXAMPLE_VARNAME}_${example_name} POST_BUILD @@ -180,7 +180,7 @@ if (HDF5_BUILD_TOOLS) ) endif () elseif (HDF5_VERSION_MAJOR VERSION_GREATER_EQUAL "1.12") - if (${H5_LIBVER_DIR} EQUAL 16) + if (${EXAMPLE_VARNAME}_USE_16_API OR ${H5_LIBVER_DIR} EQUAL 16) add_custom_command ( TARGET ${EXAMPLE_VARNAME}_${example_name} POST_BUILD @@ -196,12 +196,21 @@ if (HDF5_BUILD_TOOLS) ) endif () else () - add_custom_command ( - TARGET ${EXAMPLE_VARNAME}_${example_name} - POST_BUILD - COMMAND ${CMAKE_COMMAND} - ARGS -E copy_if_different ${PROJECT_SOURCE_DIR}/tfiles/18/${example_name}.ddl ${example_name}.ddl - ) + if (${EXAMPLE_VARNAME}_USE_16_API OR ${H5_LIBVER_DIR} EQUAL 16) + add_custom_command ( + TARGET ${EXAMPLE_VARNAME}_${example_name} + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different ${PROJECT_SOURCE_DIR}/tfiles/16/${example_name}.ddl ${PROJECT_BINARY_DIR}/${example_name}.ddl + ) + else () + add_custom_command ( + TARGET ${EXAMPLE_VARNAME}_${example_name} + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different ${PROJECT_SOURCE_DIR}/tfiles/18/${example_name}.ddl ${PROJECT_BINARY_DIR}/${example_name}.ddl + ) + endif () endif () else () if (HDF5_VERSION_MAJOR VERSION_EQUAL "1.8") @@ -260,12 +269,21 @@ if (HDF5_BUILD_TOOLS) ARGS -E copy_if_different ${PROJECT_SOURCE_DIR}/tfiles/16/${example_name}.ddl ${PROJECT_BINARY_DIR}/${example_name}.ddl ) else () - add_custom_command ( - TARGET ${EXAMPLE_VARNAME}_${example_name} - POST_BUILD - COMMAND ${CMAKE_COMMAND} - ARGS -E copy_if_different ${PROJECT_SOURCE_DIR}/tfiles/18/${example_name}.ddl ${example_name}.ddl - ) + if (${EXAMPLE_VARNAME}_USE_16_API) + add_custom_command ( + TARGET ${EXAMPLE_VARNAME}_${example_name} + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different ${PROJECT_SOURCE_DIR}/tfiles/16/${example_name}.ddl ${PROJECT_BINARY_DIR}/${example_name}.ddl + ) + else () + add_custom_command ( + TARGET ${EXAMPLE_VARNAME}_${example_name} + POST_BUILD + COMMAND ${CMAKE_COMMAND} + ARGS -E copy_if_different ${PROJECT_SOURCE_DIR}/tfiles/18/${example_name}.ddl ${PROJECT_BINARY_DIR}/${example_name}.ddl + ) + endif () endif () endif () endforeach () diff --git a/HDF5Examples/Using_CMake.txt b/HDF5Examples/Using_CMake.txt index 78751599b33..778fa7534b5 100644 --- a/HDF5Examples/Using_CMake.txt +++ b/HDF5Examples/Using_CMake.txt @@ -90,11 +90,11 @@ These steps are described in more detail below. * MinGW Makefiles * NMake Makefiles * Unix Makefiles - * Visual Studio 15 - * Visual Studio 15 Win64 - * Visual Studio 17 - * Visual Studio 17 Win64 - * Visual Studio 19 + * Visual Studio 15 2017 + * Visual Studio 15 2017 Win64 + * Visual Studio 16 2019 + * Visual Studio 17 2022 + is: * H5EX_BUILD_TESTING:BOOL=ON diff --git a/HDF5Examples/config/cmake/HDFExampleMacros.cmake b/HDF5Examples/config/cmake/HDFExampleMacros.cmake index 5c425dbbe0c..8173562de88 100644 --- a/HDF5Examples/config/cmake/HDFExampleMacros.cmake +++ b/HDF5Examples/config/cmake/HDFExampleMacros.cmake @@ -45,10 +45,10 @@ macro (BASIC_SETTINGS varname) #----------------------------------------------------------------------------- # Compiler specific flags : Shouldn't there be compiler tests for these #----------------------------------------------------------------------------- - if (CMAKE_COMPILER_IS_GNUCC) + if (CMAKE_C_COMPILER_ID STREQUAL "GNU") set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}") endif () - if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_COMPILER_IS_GNUCXX) + if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_CXX_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_CXX_FLAGS}") endif () @@ -56,10 +56,10 @@ macro (BASIC_SETTINGS varname) # This is in here to help some of the GCC based IDES like Eclipse # and code blocks parse the compiler errors and warnings better. #----------------------------------------------------------------------------- - if (CMAKE_COMPILER_IS_GNUCC) + if (CMAKE_C_COMPILER_ID STREQUAL "GNU") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmessage-length=0") endif () - if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_COMPILER_IS_GNUCXX) + if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fmessage-length=0") endif () @@ -74,7 +74,7 @@ macro (BASIC_SETTINGS varname) set (HDF_WARNINGS_BLOCKED 1) string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /w") - if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_COMPILER_IS_GNUCXX) + if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /w") endif () @@ -91,7 +91,7 @@ macro (BASIC_SETTINGS varname) # Most compilers use -w to suppress warnings. if (NOT HDF_WARNINGS_BLOCKED) set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") - if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_COMPILER_IS_GNUCXX) + if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w") endif () endif () diff --git a/autogen.sh b/autogen.sh index 142375d6a8b..33a82779465 100755 --- a/autogen.sh +++ b/autogen.sh @@ -11,12 +11,12 @@ # help@hdfgroup.org. # -# A script to reconfigure autotools for HDF5, and to recreate other +# A script to reconfigure the Autotools for HDF5, and to recreate other # generated files specific to HDF5. # # IMPORTANT OS X NOTE # -# If you are using OS X, you will probably not have the autotools +# If you are using OS X, you will probably not have the Autotools # installed, even if you have the Xcode command-line tools. # # The easiest way to fix this is to install everything via Homebrew: @@ -30,31 +30,7 @@ # brew install automake # brew install libtool # -# This only takes a few minutes. Note that libtool and libtoolize will -# be glibtool and glibtoolize so as not to conflict with Apple's non-gnu -# tools. This autogen.sh script handles this for you. -# # END IMPORTANT OS X NOTE -# -# If you want to use a particular version of the autotools, the paths -# to each tool can be overridden using the following environment -# variables: -# -# HDF5_ACLOCAL -# HDF5_AUTOHEADER -# HDF5_AUTOMAKE -# HDF5_AUTOCONF -# HDF5_LIBTOOLIZE -# HDF5_M4 -# -# Note that aclocal will attempt to include libtool's share/aclocal -# directory. -# -# Aside from -h for help, this script takes one potential option: -# -# -v -# -# This emits some extra information, mainly tool versions. echo echo "**************************" @@ -62,76 +38,9 @@ echo "* HDF5 autogen.sh script *" echo "**************************" echo -# Default is not verbose output -verbose=false - -optspec=":hpv-" -while getopts "$optspec" optchar; do - case "${optchar}" in - h) - echo "usage: $0 [OPTIONS]" - echo - echo " -h Print this help message." - echo - echo " -v Show more verbose output." - echo - echo " NOTE: Each tool can be set via an environment variable." - echo " These are documented inside this autogen.sh script." - echo - exit 0 - ;; - v) - echo "Setting verbosity: high" - echo - verbose=true - ;; - *) - if [ "$OPTERR" != 1 ] || case $optspec in :*) ;; *) false; esac; then - echo "ERROR: non-option argument: '-${OPTARG}'" >&2 - echo "Quitting" - exit 1 - fi - ;; - esac -done - -# If paths to autotools are not specified, use whatever the system -# has installed as the default. We use 'command -v ' to -# show exactly what's being used (shellcheck complains that 'which' -# is non-standard and deprecated). -if test -z "${HDF5_AUTOCONF}"; then - HDF5_AUTOCONF="$(command -v autoconf)" -fi -if test -z "${HDF5_AUTOMAKE}"; then - HDF5_AUTOMAKE="$(command -v automake)" -fi -if test -z "${HDF5_AUTOHEADER}"; then - HDF5_AUTOHEADER="$(command -v autoheader)" -fi -if test -z "${HDF5_ACLOCAL}"; then - HDF5_ACLOCAL="$(command -v aclocal)" -fi -if test -z "${HDF5_LIBTOOLIZE}"; then - # check for glibtoolize (likely found on MacOS). If not found - check for libtoolize - HDF5_LIBTOOLIZE="$(command -v glibtoolize)" - if [ ! -f "$HDF5_LIBTOOLIZE" ] ; then - HDF5_LIBTOOLIZE="$(command -v libtoolize)" - fi -fi -if test -z "${HDF5_M4}"; then - HDF5_M4="$(command -v m4)" -fi - - -# Make sure that these versions of the autotools are in the path -AUTOCONF_DIR=$(dirname "${HDF5_AUTOCONF}") -LIBTOOL_DIR=$(dirname "${HDF5_LIBTOOLIZE}") -M4_DIR=$(dirname "${HDF5_M4}") -PATH=${AUTOCONF_DIR}:${LIBTOOL_DIR}:${M4_DIR}:$PATH - # Run scripts that process source. # -# These should be run before the autotools so that failures here block +# These should be run before the Autotools so that failures here block # compilation. # Run trace script @@ -162,73 +71,20 @@ echo "Running overflow macro generation script:" bin/make_overflow src/H5overflow.txt || exit 1 echo -# Run autotools in order -# -# When available, we use the --force option to ensure all files are -# updated. This prevents the autotools from re-running to fix dependencies -# during the 'make' step, which can be a problem if environment variables -# were set on the command line during autogen invocation. - -# Some versions of libtoolize will suggest that we add ACLOCAL_AMFLAGS -# = '-I m4'. This is already done in commence.am, which is included -# in Makefile.am. You can ignore this suggestion. +# Run Autotools -# LIBTOOLIZE -libtoolize_cmd="${HDF5_LIBTOOLIZE} --copy --force" -echo "${libtoolize_cmd}" -if [ "$verbose" = true ] ; then - ${HDF5_LIBTOOLIZE} --version -fi -${libtoolize_cmd} || exit 1 +# The "obsolete" warnings category flags our Java macros as obsolete. +# Since there is no clear way to upgrade them (Java support in the Autotools +# is not great) and they work well enough for now, we suppress those warnings. +echo "Running Autotools" echo echo "NOTE: You can ignore the warning about adding -I m4." echo " We already do this in an included file." echo - -# ACLOCAL -if test -e "${LIBTOOL_DIR}/../share/aclocal" ; then - aclocal_include="-I ${LIBTOOL_DIR}/../share/aclocal" -fi -aclocal_cmd="${HDF5_ACLOCAL} --force -I m4 ${aclocal_include}" -echo "${aclocal_cmd}" -if [ "$verbose" = true ] ; then - ${HDF5_ACLOCAL} --version -fi -${aclocal_cmd} || exit 1 -echo - -# AUTOHEADER -autoheader_cmd="${HDF5_AUTOHEADER} --force" -echo "${autoheader_cmd}" -if [ "$verbose" = true ] ; then - ${HDF5_AUTOHEADER} --version -fi -${autoheader_cmd} || exit 1 -echo - -# AUTOMAKE -automake_cmd="${HDF5_AUTOMAKE} --copy --add-missing --force-missing" -echo "${automake_cmd}" -if [ "$verbose" = true ] ; then - ${HDF5_AUTOMAKE} --version -fi -${automake_cmd} || exit 1 -echo - -# AUTOCONF -# The "obsolete" warnings category flags our Java macros as obsolete. -# Since there is no clear way to upgrade them (Java support in the Autotools -# is not great) and they work well enough for now, we suppress those warnings. -autoconf_cmd="${HDF5_AUTOCONF} --force --warnings=no-obsolete" -echo "${autoconf_cmd}" -if [ "$verbose" = true ] ; then - ${HDF5_AUTOCONF} --version -fi -${autoconf_cmd} || exit 1 +autoreconf -vif --warnings=no-obsolete || exit 1 echo echo "*** SUCCESS ***" echo exit 0 - diff --git a/config/cmake/HDF5UseFortran.cmake b/config/cmake/HDF5UseFortran.cmake index f22d8d2c390..b4172eace4b 100644 --- a/config/cmake/HDF5UseFortran.cmake +++ b/config/cmake/HDF5UseFortran.cmake @@ -14,27 +14,15 @@ # This file provides functions for HDF5 specific Fortran support. # #------------------------------------------------------------------------------- -enable_language (Fortran) +include (${HDF_RESOURCES_DIR}/HDFUseFortran.cmake) -set (HDF_PREFIX "H5") +include (CheckFortranFunctionExists) # Force lowercase Fortran module file names if (CMAKE_Fortran_COMPILER_ID STREQUAL "Cray") set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -ef") endif () -include (CheckFortranFunctionExists) - -include (CheckFortranSourceRuns) -include (CheckFortranSourceCompiles) - -# Read source line beginning at the line matching Input:"START" and ending at the line matching Input:"END" -macro (READ_SOURCE SOURCE_START SOURCE_END RETURN_VAR) - file (READ "${HDF5_SOURCE_DIR}/m4/aclocal_fc.f90" SOURCE_MASTER) - string (REGEX MATCH "${SOURCE_START}[\\\t\\\n\\\r[].+]*${SOURCE_END}" SOURCE_CODE ${SOURCE_MASTER}) - set (RETURN_VAR "${SOURCE_CODE}") -endmacro () - set (RUN_OUTPUT_PATH_DEFAULT ${CMAKE_BINARY_DIR}) # The provided CMake Fortran macros don't provide a general compile/run function # so this one is used. @@ -152,8 +140,12 @@ endif () #----------------------------------------------------------------------------- # Determine the available KINDs for REALs and INTEGERs #----------------------------------------------------------------------------- +if (${HAVE_ISO_FORTRAN_ENV}) + READ_SOURCE ("PROGRAM FC08_AVAIL_KINDS" "END PROGRAM FC08_AVAIL_KINDS" SOURCE_CODE) +else () + READ_SOURCE ("PROGRAM FC_AVAIL_KINDS" "END PROGRAM FC_AVAIL_KINDS" SOURCE_CODE) +endif () -READ_SOURCE ("PROGRAM FC_AVAIL_KINDS" "END PROGRAM FC_AVAIL_KINDS" SOURCE_CODE) FORTRAN_RUN ("REAL and INTEGER KINDs" "${SOURCE_CODE}" XX @@ -167,6 +159,9 @@ FORTRAN_RUN ("REAL and INTEGER KINDs" # dnl -- LINE 3 -- max decimal precision for reals # dnl -- LINE 4 -- number of valid integer kinds # dnl -- LINE 5 -- number of valid real kinds +# dnl -- LINE 6 -- number of valid logical kinds +# dnl -- LINE 7 -- valid logical kinds (comma separated list) + # # Convert the string to a list of strings by replacing the carriage return with a semicolon string (REGEX REPLACE "[\r\n]+" ";" PROG_OUTPUT "${PROG_OUTPUT}") @@ -202,6 +197,61 @@ message (STATUS "....REAL KINDS FOUND ${PAC_FC_ALL_REAL_KINDS}") message (STATUS "....INTEGER KINDS FOUND ${PAC_FC_ALL_INTEGER_KINDS}") message (STATUS "....MAX DECIMAL PRECISION ${${HDF_PREFIX}_PAC_FC_MAX_REAL_PRECISION}") +if (${HAVE_ISO_FORTRAN_ENV}) + + list (GET PROG_OUTPUT 5 NUM_LKIND) + set (PAC_FORTRAN_NUM_LOGICAL_KINDS "${NUM_LKIND}") + + list (GET PROG_OUTPUT 6 pac_validLogicalKinds) + # If the list is empty then something went wrong. + if (NOT pac_validLogicalKinds) + message (FATAL_ERROR "Failed to find available LOGICAL KINDs for Fortran") + endif () + + set (PAC_FC_ALL_LOGICAL_KINDS "\{${pac_validLogicalKinds}\}") + message (STATUS "....LOGICAL KINDS FOUND ${PAC_FC_ALL_LOGICAL_KINDS}") + +# ******************** +# LOGICAL KIND FOR MPI +# ******************** + if (HDF5_ENABLE_PARALLEL AND BUILD_TESTING) + string (REGEX REPLACE "," ";" VAR "${pac_validLogicalKinds}") + + set(CMAKE_REQUIRED_QUIET TRUE) + set(save_CMAKE_Fortran_FLAGS ${CMAKE_Fortran_FLAGS}) + if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") + set(CMAKE_Fortran_FLAGS "-warn error") + endif () + + foreach (KIND ${VAR}) + unset(MPI_LOGICAL_KIND CACHE) + set (PROG_SRC + " + PROGRAM main + USE MPI + IMPLICIT NONE + LOGICAL(KIND=${KIND}) :: flag + INTEGER(KIND=MPI_INTEGER_KIND) :: info_ret, mpierror + CHARACTER(LEN=3) :: info_val + CALL mpi_info_get(info_ret,\"foo\", 3_MPI_INTEGER_KIND, info_val, flag, mpierror) + END + " + ) + check_fortran_source_compiles (${PROG_SRC} MPI_LOGICAL_KIND SRC_EXT f90) + + if (MPI_LOGICAL_KIND) + set (${HDF_PREFIX}_MPI_LOGICAL_KIND ${KIND}) + message (STATUS "....FORTRAN LOGICAL KIND for MPI is ${KIND}") + endif () + endforeach () + if (${HDF_PREFIX}_MPI_LOGICAL_KIND STREQUAL "") + message (FATAL_ERROR "Failed to determine LOGICAL KIND for MPI") + endif () + set(CMAKE_REQUIRED_QUIET FALSE) + set(CMAKE_Fortran_FLAGS ${save_CMAKE_Fortran_FLAGS}) + endif() +endif() + #----------------------------------------------------------------------------- # Determine the available KINDs for REALs and INTEGERs #----------------------------------------------------------------------------- @@ -350,7 +400,6 @@ if (NOT PAC_FORTRAN_NATIVE_DOUBLE_KIND) message (FATAL_ERROR "Failed to find KIND of NATIVE DOUBLE for Fortran") endif () - set (${HDF_PREFIX}_FORTRAN_SIZEOF_LONG_DOUBLE ${${HDF_PREFIX}_SIZEOF_LONG_DOUBLE}) # remove the invalid kind from the list diff --git a/config/cmake/HDFCXXCompilerFlags.cmake b/config/cmake/HDFCXXCompilerFlags.cmake index dd120c911cb..5f977f534f6 100644 --- a/config/cmake/HDFCXXCompilerFlags.cmake +++ b/config/cmake/HDFCXXCompilerFlags.cmake @@ -65,7 +65,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "NVHPC" AND CMAKE_CXX_COMPILER_LOADED) endif () endif () -if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_LOADED) +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_LOADED) set (CMAKE_CXX_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_CXX_FLAGS}") if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0) @@ -143,7 +143,7 @@ else () endif() endif() elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_LOADED + if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_LOADED AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 4.8) # add the general CXX flags for g++ compiler versions 4.8 and above. ADD_H5_FLAGS (HDF5_CMAKE_CXX_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/cxx-general") @@ -311,7 +311,7 @@ endif () # This is in here to help some of the GCC based IDES like Eclipse # and code blocks parse the compiler errors and warnings better. #----------------------------------------------------------------------------- -if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_LOADED) +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_LOADED) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fmessage-length=0") endif () diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index 007747a6166..d8a444b84d2 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -96,7 +96,7 @@ if (CMAKE_C_COMPILER_ID STREQUAL "NVHPC" ) set (CMAKE_C_FLAGS_RELWITHDEBINFO "${cmake_c_flags_relwithdebinfo_edited}") endif () -if (CMAKE_COMPILER_IS_GNUCC) +if (CMAKE_C_COMPILER_ID STREQUAL "GNU") set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}") if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer") if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0) @@ -405,7 +405,7 @@ endif () # This is in here to help some of the GCC based IDES like Eclipse # and code blocks parse the compiler errors and warnings better. #----------------------------------------------------------------------------- -if (CMAKE_COMPILER_IS_GNUCC) +if (CMAKE_C_COMPILER_ID STREQUAL "GNU") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmessage-length=0") endif () diff --git a/config/cmake/HDFUseFortran.cmake b/config/cmake/HDFUseFortran.cmake index dd35c07531d..44d3e7cfd2b 100644 --- a/config/cmake/HDFUseFortran.cmake +++ b/config/cmake/HDFUseFortran.cmake @@ -43,72 +43,26 @@ file (STRINGS ${CMAKE_BINARY_DIR}/FCMangle.h CONTENTS REGEX "H5_FC_GLOBAL_\\(.*, string (REGEX MATCH "H5_FC_GLOBAL_\\(.*,.*\\) +(.*)" RESULT ${CONTENTS}) set (H5_FC_FUNC_ "H5_FC_FUNC_(name,NAME) ${CMAKE_MATCH_1}") -#test code source -set (SIZEOF_CODE - " - PROGRAM main - i = sizeof(x) - END PROGRAM - " -) -set (C_SIZEOF_CODE - " - PROGRAM main - USE ISO_C_BINDING - INTEGER(C_INT) :: a - INTEGER(C_SIZE_T) :: result - result = c_sizeof(a) - END PROGRAM - " -) -set (STORAGE_SIZE_CODE - " - PROGRAM main - INTEGER :: a - INTEGER :: result - result = storage_size(a) - END PROGRAM - " -) -set (CHAR_ALLOC - " - PROGRAM main - CHARACTER(:), ALLOCATABLE :: str - END PROGRAM - " -) -set (ISO_FORTRAN_ENV_CODE - " - PROGRAM main - USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : atomic_logical_kind - LOGICAL(KIND=atomic_logical_kind) :: state - END PROGRAM - " -) -set (REALISNOTDOUBLE_CODE - " - MODULE type_mod - INTERFACE h5t - MODULE PROCEDURE h5t_real - MODULE PROCEDURE h5t_dble - END INTERFACE - CONTAINS - SUBROUTINE h5t_real(r) - REAL :: r - END SUBROUTINE h5t_real - SUBROUTINE h5t_dble(d) - DOUBLE PRECISION :: d - END SUBROUTINE h5t_dble - END MODULE type_mod - PROGRAM main - USE type_mod - REAL :: r - DOUBLE PRECISION :: d - CALL h5t(r) - CALL h5t(d) - END PROGRAM main - " -) +# Read source line beginning at the line matching Input:"START" and ending at the line matching Input:"END" +macro (READ_SOURCE SOURCE_START SOURCE_END RETURN_VAR) + file (READ "${HDF5_SOURCE_DIR}/m4/aclocal_fc.f90" SOURCE_MASTER) + string (REGEX MATCH "${SOURCE_START}[\\\t\\\n\\\r[].+]*${SOURCE_END}" SOURCE_CODE ${SOURCE_MASTER}) + set (RETURN_VAR "${SOURCE_CODE}") +endmacro () + +if (HDF5_REQUIRED_LIBRARIES) + set (CMAKE_REQUIRED_LIBRARIES "${HDF5_REQUIRED_LIBRARIES}") +endif () + +READ_SOURCE("PROGRAM PROG_FC_SIZEOF" "END PROGRAM PROG_FC_SIZEOF" SOURCE_CODE) +check_fortran_source_compiles (${SOURCE_CODE} ${HDF_PREFIX}_FORTRAN_HAVE_SIZEOF SRC_EXT f90) + +READ_SOURCE("PROGRAM PROG_FC_C_SIZEOF" "END PROGRAM PROG_FC_C_SIZEOF" SOURCE_CODE) +check_fortran_source_compiles (${SOURCE_CODE} ${HDF_PREFIX}_FORTRAN_HAVE_C_SIZEOF SRC_EXT f90) + +READ_SOURCE("PROGRAM PROG_FC_STORAGE_SIZE" "END PROGRAM PROG_FC_STORAGE_SIZE" SOURCE_CODE) +check_fortran_source_compiles (${SOURCE_CODE} ${HDF_PREFIX}_FORTRAN_HAVE_STORAGE_SIZE SRC_EXT f90) + set (ISO_C_BINDING_CODE " PROGRAM main @@ -122,17 +76,7 @@ set (ISO_C_BINDING_CODE END PROGRAM " ) - -if (HDF5_REQUIRED_LIBRARIES) - set (CMAKE_REQUIRED_LIBRARIES "${HDF5_REQUIRED_LIBRARIES}") -endif () -check_fortran_source_compiles (${SIZEOF_CODE} ${HDF_PREFIX}_FORTRAN_HAVE_SIZEOF SRC_EXT f90) -check_fortran_source_compiles (${C_SIZEOF_CODE} ${HDF_PREFIX}_FORTRAN_HAVE_C_SIZEOF SRC_EXT f90) -check_fortran_source_compiles (${STORAGE_SIZE_CODE} ${HDF_PREFIX}_FORTRAN_HAVE_STORAGE_SIZE SRC_EXT f90) -check_fortran_source_compiles (${ISO_FORTRAN_ENV_CODE} ${HDF_PREFIX}_HAVE_ISO_FORTRAN_ENV SRC_EXT f90) -check_fortran_source_compiles (${REALISNOTDOUBLE_CODE} ${HDF_PREFIX}_FORTRAN_DEFAULT_REAL_NOT_DOUBLE SRC_EXT f90) check_fortran_source_compiles (${ISO_C_BINDING_CODE} ${HDF_PREFIX}_FORTRAN_HAVE_ISO_C_BINDING SRC_EXT f90) -check_fortran_source_compiles (${CHAR_ALLOC} ${HDF_PREFIX}_FORTRAN_HAVE_CHAR_ALLOC SRC_EXT f90) #----------------------------------------------------------------------------- # Add debug information (intel Fortran : JB) diff --git a/config/cmake/LIBAEC/CMakeLists.txt b/config/cmake/LIBAEC/CMakeLists.txt index 11f79414e2a..54482163c5e 100644 --- a/config/cmake/LIBAEC/CMakeLists.txt +++ b/config/cmake/LIBAEC/CMakeLists.txt @@ -134,7 +134,7 @@ endif () # This is in here to help some of the GCC based IDES like Eclipse # and code blocks parse the compiler errors and warnings better. #----------------------------------------------------------------------------- -if (CMAKE_COMPILER_IS_GNUCC) +if (CMAKE_C_COMPILER_ID STREQUAL "GNU") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmessage-length=0") endif () diff --git a/config/cmake/ZLIB/CMakeLists.txt b/config/cmake/ZLIB/CMakeLists.txt index 529f1446a58..7b5d0cf62dd 100644 --- a/config/cmake/ZLIB/CMakeLists.txt +++ b/config/cmake/ZLIB/CMakeLists.txt @@ -128,7 +128,7 @@ endif () #----------------------------------------------------------------------------- # Compiler specific flags : Shouldn't there be compiler tests for these #----------------------------------------------------------------------------- -if (CMAKE_COMPILER_IS_GNUCC) +if (CMAKE_C_COMPILER_ID STREQUAL "GNU") set (CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS} -Wno-strict-prototypes") endif () if (CMAKE_C_COMPILER_ID MATCHES "IntelLLVM" OR CMAKE_C_COMPILER_ID MATCHES "[Cc]lang") @@ -139,7 +139,7 @@ endif () # This is in here to help some of the GCC based IDES like Eclipse # and code blocks parse the compiler errors and warnings better. #----------------------------------------------------------------------------- -if (CMAKE_COMPILER_IS_GNUCC) +if (CMAKE_C_COMPILER_ID STREQUAL "GNU") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmessage-length=0") endif () @@ -196,7 +196,7 @@ if(NOT MINGW) ) endif() -if(CMAKE_COMPILER_IS_GNUCC) +if(CMAKE_C_COMPILER_ID STREQUAL "GNU") if(ASM686) set(ZLIB_ASMS contrib/asm686/match.S) elseif (AMD64) diff --git a/config/gnu-warnings/8 b/config/gnu-warnings/8 index 5e7519dd795..323110950b1 100644 --- a/config/gnu-warnings/8 +++ b/config/gnu-warnings/8 @@ -1,3 +1,2 @@ -Wattribute-alias --Wcast-align=strict -Wshift-overflow=2 diff --git a/config/gnu-warnings/error-8 b/config/gnu-warnings/error-8 index 36c14143228..25839a847b4 100644 --- a/config/gnu-warnings/error-8 +++ b/config/gnu-warnings/error-8 @@ -7,3 +7,14 @@ # that GCC 8 only performs that analysis at -O3, though. # -Werror=maybe-uninitialized +# Ask GCC to warn about cast-align problems, even on platforms where +# it normally wouldn't (because those platforms don't require alignment). +# While this flag doesn't follow the -Werror format like above, it's +# placed here to make sure that it comes after the -Werror=cast-align +# line from error-general in the list of flags. Otherwise, if the +# '--enable-warnings-as-errors' flag isn't passed at configure time, +# the logic in config/gnu-flags that demotes these errors to their +# normal warning form will cause -Wcast-align to come after +# -Wcast-align=strict in the list of flags, causing it to take +# precedence and mask cast-align warnings from GCC on certain platforms. +-Wcast-align=strict diff --git a/config/sanitizer/sanitizers.cmake b/config/sanitizer/sanitizers.cmake index bf2aad27d7c..72f101f4c25 100644 --- a/config/sanitizer/sanitizers.cmake +++ b/config/sanitizer/sanitizers.cmake @@ -57,9 +57,15 @@ if(USE_SANITIZER) if(UNIX) append("-fno-omit-frame-pointer" CMAKE_C_FLAGS) message(STATUS "Building with sanitize, base flags=${CMAKE_C_SANITIZER_FLAGS}") + if (CMAKE_CXX_COMPILER_LOADED) + append("-fno-omit-frame-pointer" CMAKE_CXX_FLAGS) + endif () if(uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG") append("-O1" CMAKE_C_FLAGS) + if (CMAKE_CXX_COMPILER_LOADED) + append("-O1" CMAKE_CXX_FLAGS) + endif () endif() if(USE_SANITIZER MATCHES "([Aa]ddress)") @@ -80,11 +86,10 @@ if(USE_SANITIZER) endif() if(USE_SANITIZER MATCHES "([Mm]emory([Ww]ith[Oo]rigins)?)") - # Optional: -fno-optimize-sibling-calls -fsanitize-memory-track-origins=2 set(SANITIZER_MEM_FLAG "-fsanitize=memory") if(USE_SANITIZER MATCHES "([Mm]emory[Ww]ith[Oo]rigins)") message(STATUS "Testing with MemoryWithOrigins sanitizer") - append("-fsanitize-memory-track-origins" SANITIZER_MEM_FLAG) + append("-fno-optimize-sibling-calls -fsanitize-memory-track-origins=2" SANITIZER_MEM_FLAG) else() message(STATUS "Testing with Memory sanitizer") endif() @@ -177,6 +182,9 @@ if(USE_SANITIZER) if(SANITIZER_SELECTED_COMPATIBLE) message(STATUS " Building with ${SANITIZER_SELECTED_FLAGS}") append("${SANITIZER_SELECTED_FLAGS}" CMAKE_C_FLAGS) + if (CMAKE_CXX_COMPILER_LOADED) + append("${SANITIZER_SELECTED_FLAGS}" CMAKE_CXX_FLAGS) + endif () else() message(FATAL_ERROR "Unsupported value of USE_SANITIZER: ${USE_SANITIZER}") endif() @@ -184,6 +192,9 @@ if(USE_SANITIZER) if(USE_SANITIZER MATCHES "([Aa]ddress)") message(STATUS "Building with Address sanitizer") append("-fsanitize=address" CMAKE_C_FLAGS) + if (CMAKE_CXX_COMPILER_LOADED) + append("-fsanitize=address" CMAKE_CXX_FLAGS) + endif () if(AFL) append_quoteless(AFL_USE_ASAN=1 CMAKE_C_COMPILER_LAUNCHER) @@ -198,6 +209,9 @@ if(USE_SANITIZER) if(USE_SANITIZER MATCHES "([Aa]ddress)") message(STATUS "Building with Address sanitizer") append("/fsanitize=address" CMAKE_C_FLAGS) + if (CMAKE_CXX_COMPILER_LOADED) + append("/fsanitize=address" CMAKE_CXX_FLAGS) + endif () else() message(FATAL_ERROR "This sanitizer not yet supported in the MSVC environment: ${USE_SANITIZER}") endif() diff --git a/configure.ac b/configure.ac index 84e5c3389a3..962405abcf3 100644 --- a/configure.ac +++ b/configure.ac @@ -830,8 +830,13 @@ if test "X$HDF_FORTRAN" = "Xyes"; then PAC_FC_NATIVE_INTEGER ## Find all available KINDs - PAC_FC_AVAIL_KINDS - ## Find all sizeofs for available KINDs + if test "X$HAVE_ISO_FORTRAN_ENV" = "X1";then + PAC_FC_AVAIL_KINDS_F08 + else + PAC_FC_AVAIL_KINDS + fi + + ## Find all SIZEOFs for available KINDs PAC_FC_SIZEOF_INT_KINDS PAC_FC_SIZEOF_REAL_KINDS @@ -3055,6 +3060,16 @@ if test -n "$PARALLEL"; then [AC_MSG_RESULT([no])] ) AC_LANG_POP(Fortran) + + if test "X$HDF5_TESTS" = "Xyes"; then + AC_SUBST([MPI_LOGICAL_KIND]) + PAC_FIND_MPI_LOGICAL_KIND + if test "X$" = "Xyes"; then + HAVE_ISO_FORTRAN_ENV="1" + AC_DEFINE([HAVE_ISO_FORTRAN_ENV], [1], [Define if Fortran supports ISO_FORTRAN_ENV (F08)]) + fi + fi + fi ## ---------------------------------------------------------------------- diff --git a/doxygen/dox/ExamplesAPI.dox b/doxygen/dox/ExamplesAPI.dox index 5ab34000772..12b585bdeb2 100644 --- a/doxygen/dox/ExamplesAPI.dox +++ b/doxygen/dox/ExamplesAPI.dox @@ -394,8 +394,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_arrayatt.h5 -h5ex_t_arrayatt.tst -h5ex_t_arrayatt.ddl +h5ex_t_arrayatt.tst +h5ex_t_arrayatt.ddl Read / Write Array (Dataset) @@ -407,8 +407,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_array.h5 -h5ex_t_array.tst -h5ex_t_array.ddl +h5ex_t_array.tst +h5ex_t_array.ddl Read / Write Bitfield (Attribute) @@ -420,8 +420,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_bitatt.h5 -h5ex_t_bitatt.tst -h5ex_t_bitatt.ddl +h5ex_t_bitatt.tst +h5ex_t_bitatt.ddl Read / Write Bitfield (Dataset) @@ -433,8 +433,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_bit.h5 -h5ex_t_bit.tst -h5ex_t_bit.ddl +h5ex_t_bit.tst +h5ex_t_bit.ddl Read / Write Compound (Attribute) @@ -446,8 +446,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_cmpdatt.h5 -h5ex_t_cmpdatt.tst -h5ex_t_cmpdatt.ddl +h5ex_t_cmpdatt.tst +h5ex_t_cmpdatt.ddl Read / Write Compound (Dataset) @@ -459,8 +459,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_cmpd.h5 -h5ex_t_cmpd.tst -h5ex_t_cmpd.ddl +h5ex_t_cmpd.tst +h5ex_t_cmpd.ddl Commit Named Datatype and Read Back @@ -472,8 +472,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_commit.h5 -h5ex_t_commit.tst -h5ex_t_commit.ddl +h5ex_t_commit.tst +h5ex_t_commit.ddl Convert Between Datatypes in Memory @@ -482,8 +482,8 @@ FORTRAN FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_convert.h5 -h5ex_t_convert.tst -h5ex_t_convert.ddl +h5ex_t_convert.tst +h5ex_t_convert.ddl Read / Write Complex Compound (Attribute) @@ -492,8 +492,8 @@ FORTRAN FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_cpxcmpdatt.h5 -h5ex_t_cpxcmpdatt.tst -h5ex_t_cpxcmpdatt.ddl +h5ex_t_cpxcmpdatt.tst +h5ex_t_cpxcmpdatt.ddl Read / Write Complex Compound (Dataset) @@ -502,8 +502,8 @@ FORTRAN FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_cpxcmpd.h5 -h5ex_t_cpxcmpd.tst -h5ex_t_cpxcmpd.ddl +h5ex_t_cpxcmpd.tst +h5ex_t_cpxcmpd.ddl Read / Write Enumerated (Attribute) @@ -513,8 +513,8 @@ FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_enumatt.h5 -h5ex_t_enumatt.tst -h5ex_t_enumatt.ddl +h5ex_t_enumatt.tst +h5ex_t_enumatt.ddl Read / Write Enumerated (Dataset) @@ -524,8 +524,8 @@ FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_enum.h5 -h5ex_t_enum.tst -h5ex_t_enum.ddl +h5ex_t_enum.tst +h5ex_t_enum.ddl Read / Write Floating Point (Attribute) @@ -537,8 +537,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_floatatt.h5 -h5ex_t_floatatt.tst -h5ex_t_floatatt.ddl +h5ex_t_floatatt.tst +h5ex_t_floatatt.ddl Read / Write Floating Point (Dataset) @@ -550,8 +550,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_float.h5 -h5ex_t_float.tst -h5ex_t_float.ddl +h5ex_t_float.tst +h5ex_t_float.ddl Read / Write Integer Datatype (Attribute) @@ -563,8 +563,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_intatt.h5 -h5ex_t_intatt.tst -h5ex_t_intatt.ddl +h5ex_t_intatt.tst +h5ex_t_intatt.ddl Read / Write Integer Datatype (Dataset) @@ -576,8 +576,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_int.h5 -h5ex_t_int.tst -h5ex_t_int.ddl +h5ex_t_int.tst +h5ex_t_int.ddl Read / Write Object References (Attribute) @@ -589,7 +589,7 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_objrefatt.h5 -h5ex_t_objrefatt.tst +h5ex_t_objrefatt.tst h5ex_t_objrefatt.ddl @@ -602,7 +602,7 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_objref.h5 -h5ex_t_objref.tst +h5ex_t_objref.tst h5ex_t_objref.ddl @@ -615,8 +615,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_opaqueatt.h5 -h5ex_t_opaqueatt.tst -h5ex_t_opaqueatt.ddl +h5ex_t_opaqueatt.tst +h5ex_t_opaqueatt.ddl Read / Write Opaque (Dataset) @@ -628,8 +628,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_opaque.h5 -h5ex_t_opaque.tst -h5ex_t_opaque.ddl +h5ex_t_opaque.tst +h5ex_t_opaque.ddl Read / Write Region References (Attribute) @@ -641,7 +641,7 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_regrefatt.h5 -h5ex_t_regrefatt.tst +h5ex_t_regrefatt.tst h5ex_t_regrefatt.ddl @@ -654,7 +654,7 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_regref.h5 -h5ex_t_regref.tst +h5ex_t_regref.tst h5ex_t_regref.ddl @@ -667,8 +667,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_stringatt.h5 -h5ex_t_stringatt.tst -h5ex_t_stringatt.ddl +h5ex_t_stringatt.tst +h5ex_t_stringatt.ddl Read / Write String (Dataset) @@ -680,8 +680,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_string.h5 -h5ex_t_string.tst -h5ex_t_string.ddl +h5ex_t_string.tst +h5ex_t_string.ddl Read / Write Variable Length (Attribute) @@ -691,8 +691,8 @@ FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_vlenatt.h5 -h5ex_t_vlenatt.tst -h5ex_t_vlenatt.ddl +h5ex_t_vlenatt.tst +h5ex_t_vlenatt.ddl Read / Write Variable Length (Dataset) @@ -702,8 +702,8 @@ FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_vlen.h5 -h5ex_t_vlen.tst -h5ex_t_vlen.ddl +h5ex_t_vlen.tst +h5ex_t_vlen.ddl Read / Write Variable Length String (Attribute) @@ -713,8 +713,8 @@ FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_vlstringatt.h5 -h5ex_t_vlstringatt.tst -h5ex_t_vlstringatt.ddl +h5ex_t_vlstringatt.tst +h5ex_t_vlstringatt.ddl Read / Write Variable Length String (Dataset) @@ -726,8 +726,8 @@ FORTRAN MATLAB PyHigh PyLow h5ex_t_vlstring.h5 -h5ex_t_vlstring.tst -h5ex_t_vlstring.ddl +h5ex_t_vlstring.tst +h5ex_t_vlstring.ddl diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt index 80ee2889bc7..1b23c94a976 100644 --- a/fortran/src/CMakeLists.txt +++ b/fortran/src/CMakeLists.txt @@ -79,6 +79,11 @@ if (H5_FORTRAN_HAVE_CHAR_ALLOC) set (CMAKE_H5_FORTRAN_HAVE_CHAR_ALLOC 1) endif () +set (CMAKE_H5_MPI_LOGICAL_KIND 0) +if (H5_MPI_LOGICAL_KIND) + set (CMAKE_H5_MPI_LOGICAL_KIND 1) +endif () + configure_file (${HDF5_F90_SRC_SOURCE_DIR}/H5config_f.inc.cmake ${HDF5_F90_BINARY_DIR}/H5config_f.inc @ONLY) configure_file (${HDF5_F90_SRC_SOURCE_DIR}/H5fort_type_defines.h.cmake ${HDF5_F90_BINARY_DIR}/H5fort_type_defines.h @ONLY) diff --git a/fortran/src/H5Pf.c b/fortran/src/H5Pf.c index 17045a25570..ce62673686c 100644 --- a/fortran/src/H5Pf.c +++ b/fortran/src/H5Pf.c @@ -4606,8 +4606,8 @@ h5pset_fapl_mpio_c(hid_t_f *prp_id, void *comm, void *info) herr_t ret; MPI_Comm c_comm; MPI_Info c_info; - c_comm = MPI_Comm_f2c(*((int *)comm)); - c_info = MPI_Info_f2c(*((int *)info)); + c_comm = MPI_Comm_f2c(*((MPI_Fint *)comm)); + c_info = MPI_Info_f2c(*((MPI_Fint *)info)); /* * Call H5Pset_mpi function. @@ -4677,8 +4677,8 @@ h5pset_mpi_params_c(hid_t_f *prp_id, void *comm, void *info) herr_t ret; MPI_Comm c_comm; MPI_Info c_info; - c_comm = MPI_Comm_f2c(*((int *)comm)); - c_info = MPI_Info_f2c(*((int *)info)); + c_comm = MPI_Comm_f2c(*((MPI_Fint *)comm)); + c_info = MPI_Info_f2c(*((MPI_Fint *)info)); /* * Call H5Pset_mpi_params. diff --git a/fortran/src/H5config_f.inc.cmake b/fortran/src/H5config_f.inc.cmake index 31c149883cf..bc9f036e020 100644 --- a/fortran/src/H5config_f.inc.cmake +++ b/fortran/src/H5config_f.inc.cmake @@ -79,6 +79,14 @@ ! Define if Fortran C_BOOL is different from default LOGICAL #define H5_FORTRAN_C_BOOL_IS_UNIQUE @H5_FORTRAN_C_BOOL_IS_UNIQUE@ +! Define MPI Fortran KIND of LOGICAL +#cmakedefine01 CMAKE_H5_MPI_LOGICAL_KIND +#if CMAKE_H5_MPI_LOGICAL_KIND == 0 +#undef H5_MPI_LOGICAL_KIND +#else +#define H5_MPI_LOGICAL_KIND @H5_MPI_LOGICAL_KIND@ +#endif + ! Define if Fortran supports ISO_FORTRAN_ENV (F08) #cmakedefine01 CMAKE_H5_HAVE_ISO_FORTRAN_ENV #if CMAKE_H5_HAVE_ISO_FORTRAN_ENV == 0 diff --git a/fortran/src/H5config_f.inc.in b/fortran/src/H5config_f.inc.in index 6e465ca014b..7f2d3cad8a9 100644 --- a/fortran/src/H5config_f.inc.in +++ b/fortran/src/H5config_f.inc.in @@ -50,6 +50,9 @@ ! Define if Fortran supports ISO_FORTRAN_ENV (F08) #undef HAVE_ISO_FORTRAN_ENV +! Define MPI Fortran KIND of LOGICAL +#undef MPI_LOGICAL_KIND + ! Define the size of C's double #undef SIZEOF_DOUBLE diff --git a/fortran/test/tH5P_F03.F90 b/fortran/test/tH5P_F03.F90 index c962d52821b..64dd1d2891c 100644 --- a/fortran/test/tH5P_F03.F90 +++ b/fortran/test/tH5P_F03.F90 @@ -47,7 +47,7 @@ MODULE test_genprop_cls_cb1_mod CONTAINS - INTEGER FUNCTION test_genprop_cls_cb1_f(list_id, create_data ) bind(C) + INTEGER(KIND=C_INT) FUNCTION test_genprop_cls_cb1_f(list_id, create_data ) bind(C) IMPLICIT NONE diff --git a/fortran/test/tH5T.F90 b/fortran/test/tH5T.F90 index a38cbeadf53..c4f6aa0ece7 100644 --- a/fortran/test/tH5T.F90 +++ b/fortran/test/tH5T.F90 @@ -941,7 +941,7 @@ END SUBROUTINE enumtest !------------------------------------------------------------------------- ! * Function: test_derived_flt ! * -! * Purpose: Tests user-define and query functions of floating-point types. +! * Purpose: Tests user-defined and query functions of floating-point types. ! * test h5tget/set_fields_f. ! * ! * Return: Success: 0 diff --git a/fortran/testpar/mpi_param.F90 b/fortran/testpar/mpi_param.F90 index 09a95d47177..1d7f8ff70f8 100644 --- a/fortran/testpar/mpi_param.F90 +++ b/fortran/testpar/mpi_param.F90 @@ -18,9 +18,6 @@ SUBROUTINE mpi_param_03(nerrors) -#ifdef H5_HAVE_ISO_FORTRAN_ENV - USE, INTRINSIC :: iso_fortran_env, ONLY : atomic_logical_kind -#endif USE MPI USE HDF5 USE TH5_MISC @@ -39,8 +36,8 @@ SUBROUTINE mpi_param_03(nerrors) INTEGER(KIND=MPI_INTEGER_KIND) :: info, info_ret INTEGER(KIND=MPI_INTEGER_KIND) :: comm, comm_ret INTEGER(KIND=MPI_INTEGER_KIND) :: nkeys -#ifdef H5_HAVE_ISO_FORTRAN_ENV - LOGICAL(KIND=atomic_logical_kind) :: flag +#ifdef H5_MPI_LOGICAL_KIND + LOGICAL(KIND=H5_MPI_LOGICAL_KIND) :: flag #else LOGICAL(KIND=MPI_INTEGER_KIND) :: flag #endif @@ -178,10 +175,6 @@ END SUBROUTINE mpi_param_03 SUBROUTINE mpi_param_08(nerrors) #ifdef H5_HAVE_MPI_F08 - -#ifdef H5_HAVE_ISO_FORTRAN_ENV - USE, INTRINSIC :: iso_fortran_env, ONLY : atomic_logical_kind -#endif USE MPI_F08 USE HDF5 USE TH5_MISC @@ -199,8 +192,8 @@ SUBROUTINE mpi_param_08(nerrors) TYPE(MPI_INFO) :: info, info_ret TYPE(MPI_COMM) :: comm, comm_ret INTEGER(KIND=MPI_INTEGER_KIND) :: nkeys -#ifdef H5_HAVE_ISO_FORTRAN_ENV - LOGICAL(KIND=atomic_logical_kind) :: flag +#ifdef H5_MPI_LOGICAL_KIND + LOGICAL(KIND=H5_MPI_LOGICAL_KIND) :: flag #else LOGICAL(KIND=MPI_INTEGER_KIND) :: flag #endif diff --git a/fortran/testpar/ptest.F90 b/fortran/testpar/ptest.F90 index 3d7280bbcf8..6e34ffdaaac 100644 --- a/fortran/testpar/ptest.F90 +++ b/fortran/testpar/ptest.F90 @@ -37,6 +37,52 @@ PROGRAM parallel_test CHARACTER(LEN=10), DIMENSION(1:2) :: chr_chunk =(/"contiguous", "chunk "/) INTEGER(KIND=MPI_INTEGER_KIND) :: mpi_int_type + INTERFACE + + SUBROUTINE mpi_param_03(ret_total_error) + IMPLICIT NONE + INTEGER, INTENT(inout) :: ret_total_error + END SUBROUTINE mpi_param_03 + + SUBROUTINE mpi_param_08(ret_total_error) + IMPLICIT NONE + INTEGER, INTENT(inout) :: ret_total_error + END SUBROUTINE mpi_param_08 + + SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors) + USE MPI + IMPLICIT NONE + INTEGER, INTENT(in) :: length + LOGICAL, INTENT(in) :: do_collective + LOGICAL, INTENT(in) :: do_chunk + INTEGER(KIND=MPI_INTEGER_KIND), INTENT(in) :: mpi_size + INTEGER(KIND=MPI_INTEGER_KIND), INTENT(in) :: mpi_rank + INTEGER, INTENT(inout) :: nerrors + END SUBROUTINE hyper + + SUBROUTINE pmultiple_dset_hyper_rw(do_collective, do_chunk, mpi_size, mpi_rank, nerrors) + USE MPI + IMPLICIT NONE + LOGICAL, INTENT(in) :: do_collective + LOGICAL, INTENT(in) :: do_chunk + INTEGER(KIND=MPI_INTEGER_KIND), INTENT(in) :: mpi_size + INTEGER(KIND=MPI_INTEGER_KIND), INTENT(in) :: mpi_rank + INTEGER, INTENT(inout) :: nerrors + END SUBROUTINE pmultiple_dset_hyper_rw + + SUBROUTINE multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors) + USE MPI + IMPLICIT NONE + INTEGER, INTENT(in) :: length + LOGICAL, INTENT(in) :: do_collective + LOGICAL, INTENT(in) :: do_chunk + INTEGER(KIND=MPI_INTEGER_KIND), INTENT(in) :: mpi_size + INTEGER(KIND=MPI_INTEGER_KIND), INTENT(in) :: mpi_rank + INTEGER, INTENT(inout) :: nerrors + END SUBROUTINE multiple_dset_write + + END INTERFACE + ! ! initialize MPI ! diff --git a/fortran/testpar/subfiling.F90 b/fortran/testpar/subfiling.F90 index 9bee38bcc6f..67f201e0ec7 100644 --- a/fortran/testpar/subfiling.F90 +++ b/fortran/testpar/subfiling.F90 @@ -18,9 +18,6 @@ PROGRAM subfiling_test USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_INT64_T -#ifdef H5_HAVE_ISO_FORTRAN_ENV - USE, INTRINSIC :: iso_fortran_env, ONLY : atomic_logical_kind -#endif USE HDF5 USE MPI USE TH5_MISC @@ -50,8 +47,8 @@ PROGRAM subfiling_test INTEGER(C_INT64_T) inode TYPE(H5FD_subfiling_config_t) :: vfd_config TYPE(H5FD_ioc_config_t) :: vfd_config_ioc -#ifdef H5_HAVE_ISO_FORTRAN_ENV - LOGICAL(KIND=atomic_logical_kind) :: flag +#ifdef H5_MPI_LOGICAL_KIND + LOGICAL(KIND=H5_MPI_LOGICAL_KIND) :: flag #else LOGICAL(KIND=MPI_INTEGER_KIND) :: flag #endif @@ -137,7 +134,7 @@ PROGRAM subfiling_test ENDIF CALL mpi_info_get(info_ret,"foo", 3_MPI_INTEGER_KIND, info_val, flag, mpierror) - IF(LOGICAL(flag) .EQV. .TRUE.)THEN + IF(LOGICAL(flag) .EQV. LOGICAL(.TRUE.))THEN IF(info_val.NE."bar")THEN IF(mpi_rank.EQ.0) & WRITE(*,*) "Failed H5Pset_mpi_params_f and H5Pget_mpi_params_f sequence" diff --git a/hl/src/H5TB.c b/hl/src/H5TB.c index e718605b5a3..82977b3253c 100644 --- a/hl/src/H5TB.c +++ b/hl/src/H5TB.c @@ -2025,7 +2025,7 @@ H5TBinsert_field(hid_t loc_id, const char *dset_name, const char *field_name, hi goto out; /* alloc fill value attribute buffer */ - if (NULL == (tmp_fill_buf = (unsigned char *)malloc(total_size))) + if (NULL == (tmp_fill_buf = (unsigned char *)calloc(1, total_size))) goto out; /* get the fill value attributes */ diff --git a/hl/test/test_table.c b/hl/test/test_table.c index c6614343037..8996fa46480 100644 --- a/hl/test/test_table.c +++ b/hl/test/test_table.c @@ -376,6 +376,8 @@ test_table(hid_t fid, int do_write) field_type[3] = H5T_NATIVE_DOUBLE; field_type[4] = H5T_NATIVE_INT; + memset(wbufd, 0, NRECORDS * sizeof(particle_t)); + /*------------------------------------------------------------------------- * * Functions tested: diff --git a/m4/aclocal_fc.f90 b/m4/aclocal_fc.f90 index 9e4bfde3c90..918fc6769dd 100644 --- a/m4/aclocal_fc.f90 +++ b/m4/aclocal_fc.f90 @@ -21,8 +21,7 @@ ! PROGRAM PROG_FC_ISO_FORTRAN_ENV - USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : atomic_logical_kind - LOGICAL(KIND=atomic_logical_kind) :: state + USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : logical_kinds END PROGRAM PROG_FC_ISO_FORTRAN_ENV PROGRAM PROG_FC_SIZEOF @@ -183,6 +182,70 @@ PROGRAM FC_AVAIL_KINDS END PROGRAM FC_AVAIL_KINDS !---- END ----- Determine the available KINDs for REALs and INTEGERs +!---- START ----- Determine the available KINDs for REALs, INTEGERs and LOGICALs -- ISO_FORTRAN_ENV (F08) +PROGRAM FC08_AVAIL_KINDS + USE, INTRINSIC :: ISO_FORTRAN_ENV, ONLY : stdout=>OUTPUT_UNIT, integer_kinds, real_kinds, logical_kinds + IMPLICIT NONE + INTEGER :: ik, jk, k, max_decimal_prec + INTEGER :: num_rkinds, num_ikinds, num_lkinds + + ! Find integer KINDs + + num_ikinds = SIZE(integer_kinds) + + DO k = 1, num_ikinds + WRITE(stdout,'(I0)', ADVANCE='NO') integer_kinds(k) + IF(k.NE.num_ikinds)THEN + WRITE(stdout,'(A)',ADVANCE='NO') ',' + ELSE + WRITE(stdout,'()') + ENDIF + ENDDO + + ! Find real KINDs + + num_rkinds = SIZE(real_kinds) + + max_decimal_prec = 1 + + prec: DO ik = 2, 36 + exp: DO jk = 1, 700 + k = SELECTED_REAL_KIND(ik,jk) + IF(k.LT.0) EXIT exp + max_decimal_prec = ik + ENDDO exp + ENDDO prec + + DO k = 1, num_rkinds + WRITE(stdout,'(I0)', ADVANCE='NO') real_kinds(k) + IF(k.NE.num_rkinds)THEN + WRITE(stdout,'(A)',ADVANCE='NO') ',' + ELSE + WRITE(stdout,'()') + ENDIF + ENDDO + + WRITE(stdout,'(I0)') max_decimal_prec + WRITE(stdout,'(I0)') num_ikinds + WRITE(stdout,'(I0)') num_rkinds + + ! Find logical KINDs + + num_lkinds = SIZE(logical_kinds) + WRITE(stdout,'(I0)') num_lkinds + + DO k = 1, num_lkinds + WRITE(stdout,'(I0)', ADVANCE='NO') logical_kinds(k) + IF(k.NE.num_lkinds)THEN + WRITE(stdout,'(A)',ADVANCE='NO') ',' + ELSE + WRITE(stdout,'()') + ENDIF + ENDDO + +END PROGRAM FC08_AVAIL_KINDS +!---- END ----- Determine the available KINDs for REALs, INTEGERs and LOGICALs -- ISO_FORTRAN_ENV (F08) + PROGRAM FC_MPI_CHECK USE mpi INTEGER :: comm, amode, info, fh, ierror diff --git a/m4/aclocal_fc.m4 b/m4/aclocal_fc.m4 index 6e5703e9132..cfcfbcf7ca2 100644 --- a/m4/aclocal_fc.m4 +++ b/m4/aclocal_fc.m4 @@ -323,6 +323,104 @@ AC_RUN_IFELSE([$TEST_SRC], AC_LANG_POP([Fortran]) ]) + +dnl -------------------------------------------------------------- +dnl Determine the available KINDs for REALs, INTEGERs and LOGICALS +dnl -------------------------------------------------------------- +dnl +dnl This is a runtime test. +dnl +AC_DEFUN([PAC_FC_AVAIL_KINDS_F08],[ +AC_LANG_PUSH([Fortran]) +TEST_SRC="`sed -n '/PROGRAM FC08_AVAIL_KINDS/,/END PROGRAM FC08_AVAIL_KINDS/p' $srcdir/m4/aclocal_fc.f90`" +AC_RUN_IFELSE([$TEST_SRC], + [ + dnl The output from the above program will be: + dnl -- LINE 1 -- valid integer kinds (comma separated list) + dnl -- LINE 2 -- valid real kinds (comma separated list) + dnl -- LINE 3 -- max decimal precision for reals + dnl -- LINE 4 -- number of valid integer kinds + dnl -- LINE 5 -- number of valid real kinds + dnl -- LINE 6 -- number of valid logical kinds + dnl -- LINE 7 -- valid logical kinds (comma separated list) + + pac_validIntKinds=$(./conftest$EXEEXT 2>&1 | sed -n '1p') + pac_validRealKinds=$(./conftest$EXEEXT 2>&1 | sed -n '2p') + PAC_FC_MAX_REAL_PRECISION=$(./conftest$EXEEXT 2>&1 | sed -n '3p') + AC_DEFINE_UNQUOTED([PAC_FC_MAX_REAL_PRECISION], $PAC_FC_MAX_REAL_PRECISION, [Define Fortran Maximum Real Decimal Precision]) + + PAC_FC_ALL_INTEGER_KINDS="{`echo $pac_validIntKinds`}" + PAC_FC_ALL_REAL_KINDS="{`echo $pac_validRealKinds`}" + + PAC_FORTRAN_NUM_INTEGER_KINDS=$(./conftest$EXEEXT 2>&1 | sed -n '4p') + H5CONFIG_F_NUM_IKIND="INTEGER, PARAMETER :: num_ikinds = `echo $PAC_FORTRAN_NUM_INTEGER_KINDS`" + H5CONFIG_F_IKIND="INTEGER, DIMENSION(1:num_ikinds) :: ikind = (/`echo $pac_validIntKinds`/)" + H5CONFIG_F_NUM_RKIND="INTEGER, PARAMETER :: num_rkinds = $(./conftest$EXEEXT 2>&1 | sed -n '5p')" + H5CONFIG_F_RKIND="INTEGER, DIMENSION(1:num_rkinds) :: rkind = (/`echo $pac_validRealKinds`/)" + + AC_DEFINE_UNQUOTED([H5CONFIG_F_NUM_RKIND], $H5CONFIG_F_NUM_RKIND, [Define number of valid Fortran REAL KINDs]) + AC_DEFINE_UNQUOTED([H5CONFIG_F_NUM_IKIND], $H5CONFIG_F_NUM_IKIND, [Define number of valid Fortran INTEGER KINDs]) + AC_DEFINE_UNQUOTED([H5CONFIG_F_RKIND], $H5CONFIG_F_RKIND, [Define valid Fortran REAL KINDs]) + AC_DEFINE_UNQUOTED([H5CONFIG_F_IKIND], $H5CONFIG_F_IKIND, [Define valid Fortran INTEGER KINDs]) + + PAC_FORTRAN_NUM_LOGICAL_KINDS=$(./conftest$EXEEXT 2>&1 | sed -n '6p') + pac_validLogicalKinds=$(./conftest$EXEEXT 2>&1 | sed -n '7p') + PAC_FC_ALL_LOGICAL_KINDS="{`echo $pac_validLogicalKinds`}" + + AC_MSG_CHECKING([for Number of Fortran INTEGER KINDs]) + AC_MSG_RESULT([$PAC_FORTRAN_NUM_INTEGER_KINDS]) + AC_MSG_CHECKING([for Fortran INTEGER KINDs]) + AC_MSG_RESULT([$PAC_FC_ALL_INTEGER_KINDS]) + AC_MSG_CHECKING([for Fortran REAL KINDs]) + AC_MSG_RESULT([$PAC_FC_ALL_REAL_KINDS]) + AC_MSG_CHECKING([for Fortran REALs maximum decimal precision]) + AC_MSG_RESULT([$PAC_FC_MAX_REAL_PRECISION]) + AC_MSG_CHECKING([for Number of Fortran LOGICAL KINDs]) + AC_MSG_RESULT([$PAC_FORTRAN_NUM_LOGICAL_KINDS]) + AC_MSG_CHECKING([for Fortran LOGICAL KINDs]) + AC_MSG_RESULT([$PAC_FC_ALL_LOGICAL_KINDS]) +],[ + AC_MSG_RESULT([Error]) + AC_MSG_ERROR([Failed to run Fortran program to determine available KINDs]) +],[]) +AC_LANG_POP([Fortran]) +]) + +AC_DEFUN([PAC_FIND_MPI_LOGICAL_KIND],[ +AC_REQUIRE([PAC_FC_AVAIL_KINDS]) +AC_MSG_CHECKING([default Fortran KIND of LOGICAL in MPI]) +AC_LANG_PUSH([Fortran]) +saved_FCFLAGS=$FCFLAGS +check_Intel="`$FC -V 2>&1 |grep '^Intel'`" +if test X != "X$check_Intel"; then + FCFLAGS="-warn error" +else + FCFLAGS="" +fi +for kind in `echo $pac_validLogicalKinds | sed -e 's/,/ /g'`; do + AC_COMPILE_IFELSE([ + PROGRAM main + USE MPI + IMPLICIT NONE + LOGICAL(KIND=$kind) :: flag + INTEGER(KIND=MPI_INTEGER_KIND) :: info_ret, mpierror + CHARACTER(LEN=3) :: info_val + CALL mpi_info_get(info_ret,"foo", 3_MPI_INTEGER_KIND, info_val, flag, mpierror) + END], + [AC_SUBST([PAC_MPI_LOGICAL_KIND]) PAC_MPI_LOGICAL_KIND=$kind], + [] + ) +done +if test "X$PAC_MPI_LOGICAL_KIND" = "X"; then + AC_MSG_ERROR([Failed to find Fortran KIND of LOGICAL in MPI]) +else + AC_DEFINE_UNQUOTED([MPI_LOGICAL_KIND], [$PAC_MPI_LOGICAL_KIND], [Define MPI Fortran KIND of LOGICAL]) + AC_MSG_RESULT([$PAC_MPI_LOGICAL_KIND]) +fi +FCFLAGS=$saved_FCFLAGS +AC_LANG_POP([Fortran]) +]) + AC_DEFUN([PAC_FC_SIZEOF_INT_KINDS],[ AC_REQUIRE([PAC_FC_AVAIL_KINDS]) AC_MSG_CHECKING([sizeof of available INTEGER KINDs]) diff --git a/release_docs/INSTALL_Cygwin.txt b/release_docs/INSTALL_Cygwin.txt index 34a57ee0a41..6061c38f833 100644 --- a/release_docs/INSTALL_Cygwin.txt +++ b/release_docs/INSTALL_Cygwin.txt @@ -2,20 +2,21 @@ HDF5 Build and Install Instructions for Cygwin ************************************************************************ -This document is an instruction on how to build, test and install HDF5 library on -Cygwin. See detailed information in hdf5/INSTALL. +This document is an instruction on how to build, test and install HDF5 library +on Cygwin. See detailed information in hdf5/INSTALL. -NOTE: hdf5 can be built with CMake, see the INSTALL_CMake.txt file for more guidance. +NOTE: hdf5 can be built with CMake, see the INSTALL_CMake.txt file for more +guidance. Preconditions: -------------- -1. Installed Cygwin 1.7.25 or higher +1. Cygwin 3.5.1 or higher Installed To install the Cygwin net release, go to http://www.cygwin.com and - click on "setup-x86.exe" (32-bit installation) under the heading + click on "setup-x86_64.exe" under the heading "Current Cygwin DLL version". This will download a GUI - installer called setup-x86.exe which can be run to download a complete + installer called setup-x86_64.exe which can be run to download a complete Cygwin installation via the internet. Then follow the instructions on each screen to install Cygwin. @@ -34,10 +35,10 @@ Preconditions: The following compilers are supported by HDF5 and included in the Cygwin package system: - gcc (4.7.3 and 4.9.2), which includes: - gcc4-core : C compiler - gcc4-g++ : C++ compiler - gcc4-fortran : fortran compiler + gcc, which includes: + gcc-core : C compiler + gcc-g++ : C++ compiler + gcc-fortran : Fortran compiler 2.1.1 Using Compilers Not Supported @@ -59,18 +60,13 @@ Preconditions: 2.2 HDF5 External Library Dependencies - 2.2.1 Zlib + 2.2.1 zlib - zlib-1.2.5 or later is supported and tested on Cygwin. + zlib-1.2.8 or later is supported and tested. 2.2.2 Szip - The HDF5 library has a predefined compression filter that uses - the extended-Rice lossless compression algorithm for chunked - datasets. For more information on Szip compression, license terms, - and obtaining the Szip source code, see: - - https://portal.hdfgroup.org/display/HDF5/Szip+Compression+in+HDF+Products + libaec-1.1.2 or later is supported and tested. 2.3 Additional Utilities @@ -91,8 +87,8 @@ Build, Test and Install HDF5 on Cygwin -------------------------------------- 1. Get HDF5 source code package - Users can download HDF5 source code package from HDF website - (http://hdfgroup.org). + Users can download the HDF5 source code from the official GitHub repository + (https://github.com/HDFGroup/hdf5). 2. Unpacking the distribution @@ -116,19 +112,17 @@ Build, Test and Install HDF5 on Cygwin 2. Setup Environment In Cygwin, most compilers and setting are automatically detected during - the configure script. However, if you are building Fortran we recommend + the configure script. However, if you are building Fortran, we recommend that you explicitly set the "FC" variable in your environment to use the - gfortran compiler. For example, issue the command: + gfortran compiler. For example, issue the command: - $ export FC=gfortran + $ export FC=gfortran 4. Configuring - Notes: See detailed information in hdf5/release_docs/INSTALL, - part 5. Full installation instructions for source - distributions + Notes: See detailed information in hdf5/release_docs/INSTALL_Auto.txt. - The host configuration file for cygwin i686-pc-cygwin is located + The host configuration file for Cygwin is located in the `config' directory and are based on architecture name, vendor name, and operating system which are displayed near the beginning of the `configure' output. The host config file influences @@ -138,34 +132,34 @@ Build, Test and Install HDF5 on Cygwin To configure HDF5 C Library, using - $ ./configure + $ ./configure To configure HDF5 C/C++ Library, using - $ ./configure --enable-cxx + $ ./configure --enable-cxx To configure HDF5 C/Fortran Library, using - $ ./configure --enable-fortran + $ ./configure --enable-fortran To configure HDF5 C with Szip library, using - $ ./configure --with-szlib="path to szlib" + $ ./configure --with-szlib="path to szlib" - For example, if szip library was installed in the directory + For example, if Szip library was installed in the directory /cygdrive/c/szip, which is parent directory of "include" and "lib", then the following command will configure HDF5 C library - with szip enabled: + with Szip enabled: - $ ./configure --with-szlib=/cygdrive/c/szip + $ ./configure --with-szlib=/cygdrive/c/szip - To configure HDF5 C without Zlib, + To configure HDF5 C without zlib, To disable zlib, using $ ./configure --without-zlib - Two ways to configure HDF5 C with specified Zlib + Two ways to configure HDF5 C with specified zlib Using @@ -175,7 +169,7 @@ Build, Test and Install HDF5 on Cygwin /cygdrive/c/usr, which is the parent directory of directories "include" and "lib", - $ ./configure --with-zlib=/cygdrive/c/usr/include,/cygdrive/c/usr/lib + $ ./configure --with-zlib=/cygdrive/c/usr/include,/cygdrive/c/usr/lib Through the CPPFLAGS and LDFLAGS Variables @@ -189,7 +183,7 @@ Build, Test and Install HDF5 on Cygwin To specify the installation directories, using - $ ./configure --prefix="path for installation" + $ ./configure --prefix="path for installation" By default, HDF5 library, header files, examples, and support programs will be installed in /usr/local/lib, @@ -202,7 +196,7 @@ Build, Test and Install HDF5 on Cygwin All of the above switches can be combined together. For example, if users want to configure HDF5 C/C++/Fortran - library with szip library enabled, with zlib library at + library with Szip library enabled, with zlib library at /cygdrive/c/usr/, and install HDF5 into directory /cygdrive/c/hdf5 using gcc/g++ as C/C++ compiler and gfortran as fortran compiler @@ -238,15 +232,15 @@ Build, Test and Install HDF5 on Cygwin After configuration is done successfully, run the following series of commands to build, test and install HDF5 - $ make > "output file name" - $ make check > "output file name" + $ make > "output file name" + $ make check > "output file name" Before run "make install", check output file for "make check", there should be no failures at all. 6. Make Install - $ make install > "output file name" + $ make install > "output file name" 7. Check installed HDF5 library @@ -256,8 +250,7 @@ Build, Test and Install HDF5 on Cygwin 8. Known Problems - dt_arith tests may fail due to the use of fork. This is a known issue - with cygwin on Windows. + cache_api tests may fail. This is a known issue with Cygwin. "make check" fails when building shared lib files is enabled. The default on Cygwin has been changed to disable shared. It can be enabled with diff --git a/release_docs/NEWSLETTER.txt b/release_docs/NEWSLETTER.txt index 19c11ac4b39..55b7dcd8203 100644 --- a/release_docs/NEWSLETTER.txt +++ b/release_docs/NEWSLETTER.txt @@ -17,3 +17,4 @@ This is a maintenance release with a few changes and updates: ---------------------------------------------------------------------------- Please see the full release notes for detailed information regarding this release, including a detailed list of changes. + diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index ef3f1af4912..d10c6d7f0b3 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -461,17 +461,37 @@ Bug Fixes since HDF5-1.14.3 release Library ------- - - Fixed a cache assert with too-large metadata objects + - Fixed potential buffer read overflows in H5PB_read - If the library tries to load a metadata object that is above the - library's hard-coded limits, the size will trip an assert in debug - builds. In HDF5 1.14.4, this can happen if you create a very large - number of links in an old-style group that uses local heaps. + H5PB_read previously did not account for the fact that the size of the + read it's performing could overflow the page buffer pointer, depending + on the calculated offset for the read. This has been fixed by adjusting + the size of the read if it's determined that it would overflow the page. - The library will now emit a normal error when it tries to load a - metadata object that is too large. + - Fixed CVE-2017-17507 - Partially addresses GitHub #3762 + This CVE was previously declared fixed, but later testing with a static + build of HDF5 showed that it was not fixed. + + When parsing a malformed (fuzzed) compound type containing variable-length + string members, the library could produce a segmentation fault, crashing + the library. + + This was fixed after GitHub PR #4234 + + Fixes GitHub issue #3446 + + - Fixed a cache assert with very large metadata objects + + If the library tries to load a metadata object that is above a + certain size, this would trip an assert in debug builds. This could + happen if you create a very large number of links in an old-style + group that uses local heaps. + + There is no need for this assert. The library's metadata cache + can handle large objects. The assert has been removed. + + Fixes GitHub #3762 - Fixed an issue with the Subfiling VFD and multiple opens of a file @@ -552,6 +572,27 @@ Bug Fixes since HDF5-1.14.3 release overwriting data with a shorter (top level) variable length sequence, an error could occur. This has been fixed. + - Take user block into account in H5Dchunk_iter() and H5Dget_chunk_info() + + The address reported by the following functions did not correctly + take the user block into account: + + * H5Dchunk_iter() <-- addr passed to callback + * H5Dget_chunk_info() <-- addr parameter + * H5Dget_chunk_info_by_coord() <-- addr parameter + + This means that these functions reported logical HDF5 file addresses, + which would only be equal to the physical addresses when there is no + user block prepended to the HDF5 file. This is unfortunate, as the + primary use of these functions is to get physical addresses in order + to directly access the chunks. + + The listed functions now correctly take the user block into account, + so they will emit physical addresses that can be used to directly + access the chunks. + + Fixes #3003 + - Fixed asserts raised by large values of H5Pset_est_link_info() parameters If large values for est_num_entries and/or est_name_len were passed @@ -959,6 +1000,11 @@ Platforms Tested Known Problems ============== + When building with the NAG Fortran compiler using the Autotools and libtool + 2.4.2 or earlier, the -shared flag will be missing '-Wl,', which will cause + compilation to fail. This is due to a bug in libtool that was fixed in 2012 + and released in 2.4.4 in 2014. + When HDF5 is compiled with NVHPC versions 23.5 - 23.9 (additional versions may also be applicable) and with -O2 (or higher) and -DNDEBUG, test failures occur in the following tests: @@ -1000,6 +1046,9 @@ Known Problems implemented: (1) derived type argument passed by value (H5VLff.F90), and (2) support for REAL with KIND = 2 in intrinsic SPACING used in testing. + Fortran tests HDF5_1_8.F90 and HDF5_F03.F90 will fail with Cray compilers greater than + version 16.0 due to a compiler bug. The latest version verified as failing was version 17.0. + Several tests currently fail on certain platforms: MPI_TEST-t_bigio fails with spectrum-mpi on ppc64le platforms. diff --git a/src/H5Adense.c b/src/H5Adense.c index 80c3c94f733..48004d2aa70 100644 --- a/src/H5Adense.c +++ b/src/H5Adense.c @@ -1104,12 +1104,12 @@ H5A__dense_iterate(H5F_t *f, hid_t loc_id, const H5O_ainfo_t *ainfo, H5_index_t H5_iter_order_t order, hsize_t skip, hsize_t *last_attr, const H5A_attr_iter_op_t *attr_op, void *op_data) { - H5HF_t *fheap = NULL; /* Fractal heap handle */ - H5HF_t *shared_fheap = NULL; /* Fractal heap handle for shared header messages */ - H5A_attr_table_t atable = {0, NULL}; /* Table of attributes */ - H5B2_t *bt2 = NULL; /* v2 B-tree handle for index */ - haddr_t bt2_addr; /* Address of v2 B-tree to use for lookup */ - herr_t ret_value = FAIL; /* Return value */ + H5HF_t *fheap = NULL; /* Fractal heap handle */ + H5HF_t *shared_fheap = NULL; /* Fractal heap handle for shared header messages */ + H5A_attr_table_t atable = {0, 0, NULL}; /* Table of attributes */ + H5B2_t *bt2 = NULL; /* v2 B-tree handle for index */ + haddr_t bt2_addr; /* Address of v2 B-tree to use for lookup */ + herr_t ret_value = FAIL; /* Return value */ FUNC_ENTER_PACKAGE @@ -1499,12 +1499,12 @@ herr_t H5A__dense_remove_by_idx(H5F_t *f, const H5O_ainfo_t *ainfo, H5_index_t idx_type, H5_iter_order_t order, hsize_t n) { - H5HF_t *fheap = NULL; /* Fractal heap handle */ - H5HF_t *shared_fheap = NULL; /* Fractal heap handle for shared header messages */ - H5A_attr_table_t atable = {0, NULL}; /* Table of attributes */ - H5B2_t *bt2 = NULL; /* v2 B-tree handle for index */ - haddr_t bt2_addr; /* Address of v2 B-tree to use for operation */ - herr_t ret_value = SUCCEED; /* Return value */ + H5HF_t *fheap = NULL; /* Fractal heap handle */ + H5HF_t *shared_fheap = NULL; /* Fractal heap handle for shared header messages */ + H5A_attr_table_t atable = {0, 0, NULL}; /* Table of attributes */ + H5B2_t *bt2 = NULL; /* v2 B-tree handle for index */ + haddr_t bt2_addr; /* Address of v2 B-tree to use for operation */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -1586,7 +1586,7 @@ H5A__dense_remove_by_idx(H5F_t *f, const H5O_ainfo_t *ainfo, H5_index_t idx_type HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "error building table of attributes"); /* Check for skipping too many attributes */ - if (n >= atable.nattrs) + if (n >= atable.num_attrs) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid index specified"); /* Delete appropriate attribute from dense storage */ diff --git a/src/H5Aint.c b/src/H5Aint.c index b5d2601ba7f..3e8fa343f33 100644 --- a/src/H5Aint.c +++ b/src/H5Aint.c @@ -51,16 +51,9 @@ typedef struct { H5F_t *f; /* Pointer to file that fractal heap is in */ H5A_attr_table_t *atable; /* Pointer to attribute table to build */ - size_t curr_attr; /* Current attribute to operate on */ bool bogus_crt_idx; /* Whether bogus creation index values need to be set */ } H5A_compact_bt_ud_t; -/* Data exchange structure to use when building table of dense attributes for an object */ -typedef struct { - H5A_attr_table_t *atable; /* Pointer to attribute table to build */ - size_t curr_attr; /* Current attribute to operate on */ -} H5A_dense_bt_ud_t; - /* Data exchange structure to use when copying an attribute from _SRC to _DST */ typedef struct { const H5O_ainfo_t *ainfo; /* dense information */ @@ -1454,30 +1447,31 @@ H5A__compact_build_table_cb(H5O_t H5_ATTR_UNUSED *oh, H5O_mesg_t *mesg /*in,out* assert(mesg); /* Re-allocate the table if necessary */ - if (udata->curr_attr == udata->atable->nattrs) { + if (udata->atable->num_attrs == udata->atable->max_attrs) { H5A_t **new_table; /* New table for attributes */ size_t new_table_size; /* Number of attributes in new table */ /* Allocate larger table */ - new_table_size = MAX(1, 2 * udata->atable->nattrs); + new_table_size = MAX(1, 2 * udata->atable->max_attrs); if (NULL == (new_table = (H5A_t **)H5FL_SEQ_REALLOC(H5A_t_ptr, udata->atable->attrs, new_table_size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "unable to extend attribute table"); /* Update table information in user data */ - udata->atable->attrs = new_table; - udata->atable->nattrs = new_table_size; + udata->atable->attrs = new_table; + udata->atable->max_attrs = new_table_size; } /* end if */ /* Copy attribute into table */ - if (NULL == (udata->atable->attrs[udata->curr_attr] = H5A__copy(NULL, (const H5A_t *)mesg->native))) + if (NULL == + (udata->atable->attrs[udata->atable->num_attrs] = H5A__copy(NULL, (const H5A_t *)mesg->native))) HGOTO_ERROR(H5E_ATTR, H5E_CANTCOPY, H5_ITER_ERROR, "can't copy attribute"); /* Assign [somewhat arbitrary] creation order value, if requested */ if (udata->bogus_crt_idx) - ((udata->atable->attrs[udata->curr_attr])->shared)->crt_idx = sequence; + ((udata->atable->attrs[udata->atable->num_attrs])->shared)->crt_idx = sequence; - /* Increment current attribute */ - udata->curr_attr++; + /* Increment attribute count */ + udata->atable->num_attrs++; done: FUNC_LEAVE_NOAPI(ret_value) @@ -1500,9 +1494,10 @@ herr_t H5A__compact_build_table(H5F_t *f, H5O_t *oh, H5_index_t idx_type, H5_iter_order_t order, H5A_attr_table_t *atable) { - H5A_compact_bt_ud_t udata; /* User data for iteration callback */ - H5O_mesg_operator_t op; /* Wrapper for operator */ - herr_t ret_value = SUCCEED; /* Return value */ + H5A_compact_bt_ud_t udata; /* User data for iteration callback */ + H5O_mesg_operator_t op; /* Wrapper for operator */ + bool iter_set_up = false; /* Is everything set up for iteration */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -1512,13 +1507,13 @@ H5A__compact_build_table(H5F_t *f, H5O_t *oh, H5_index_t idx_type, H5_iter_order assert(atable); /* Initialize table */ - atable->attrs = NULL; - atable->nattrs = 0; + atable->attrs = NULL; + atable->num_attrs = 0; + atable->max_attrs = 0; /* Set up user data for iteration */ - udata.f = f; - udata.atable = atable; - udata.curr_attr = 0; + udata.f = f; + udata.atable = atable; udata.bogus_crt_idx = (bool)((oh->version == H5O_VERSION_1 || !(oh->flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED)) ? true : false); @@ -1526,20 +1521,23 @@ H5A__compact_build_table(H5F_t *f, H5O_t *oh, H5_index_t idx_type, H5_iter_order /* Iterate over existing attributes, checking for attribute with same name */ op.op_type = H5O_MESG_OP_LIB; op.u.lib_op = H5A__compact_build_table_cb; + iter_set_up = true; if (H5O__msg_iterate_real(f, oh, H5O_MSG_ATTR, &op, &udata) < 0) HGOTO_ERROR(H5E_ATTR, H5E_BADITER, FAIL, "error building attribute table"); - /* Correct # of attributes in table */ - atable->nattrs = udata.curr_attr; - /* Don't sort an empty table. */ - if (atable->nattrs > 0) { + if (atable->num_attrs > 0) /* Sort attribute table in correct iteration order */ if (H5A__attr_sort_table(atable, idx_type, order) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTSORT, FAIL, "error sorting attribute table"); - } /* end if */ done: + if (ret_value < 0) + /* Clean up partially built table on error */ + if (iter_set_up) + if (atable->attrs && H5A__attr_release_table(atable) < 0) + HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, FAIL, "unable to release attribute table"); + FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__compact_build_table() */ @@ -1556,26 +1554,26 @@ H5A__compact_build_table(H5F_t *f, H5O_t *oh, H5_index_t idx_type, H5_iter_order static herr_t H5A__dense_build_table_cb(const H5A_t *attr, void *_udata) { - H5A_dense_bt_ud_t *udata = (H5A_dense_bt_ud_t *)_udata; /* 'User data' passed in */ - herr_t ret_value = H5_ITER_CONT; /* Return value */ + H5A_attr_table_t *atable = (H5A_attr_table_t *)_udata; /* 'User data' passed in */ + herr_t ret_value = H5_ITER_CONT; /* Return value */ FUNC_ENTER_PACKAGE /* check arguments */ assert(attr); - assert(udata); - assert(udata->curr_attr < udata->atable->nattrs); + assert(atable); + assert(atable->num_attrs < atable->max_attrs); /* Allocate attribute for entry in the table */ - if (NULL == (udata->atable->attrs[udata->curr_attr] = H5FL_CALLOC(H5A_t))) + if (NULL == (atable->attrs[atable->num_attrs] = H5FL_CALLOC(H5A_t))) HGOTO_ERROR(H5E_ATTR, H5E_CANTALLOC, H5_ITER_ERROR, "can't allocate attribute"); /* Copy attribute information. Share the attribute object in copying. */ - if (NULL == H5A__copy(udata->atable->attrs[udata->curr_attr], attr)) + if (NULL == H5A__copy(atable->attrs[atable->num_attrs], attr)) HGOTO_ERROR(H5E_ATTR, H5E_CANTCOPY, H5_ITER_ERROR, "can't copy attribute"); /* Increment number of attributes stored */ - udata->curr_attr++; + atable->num_attrs++; done: FUNC_LEAVE_NOAPI(ret_value) @@ -1621,22 +1619,18 @@ H5A__dense_build_table(H5F_t *f, const H5O_ainfo_t *ainfo, H5_index_t idx_type, if (H5B2_get_nrec(bt2_name, &nrec) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "can't retrieve # of records in index"); - /* Set size of table */ - H5_CHECK_OVERFLOW(nrec, /* From: */ hsize_t, /* To: */ size_t); - atable->nattrs = (size_t)nrec; - /* Allocate space for the table entries */ - if (atable->nattrs > 0) { - H5A_dense_bt_ud_t udata; /* User data for iteration callback */ + if (nrec > 0) { H5A_attr_iter_op_t attr_op; /* Attribute operator */ - /* Allocate the table to store the attributes */ - if ((atable->attrs = (H5A_t **)H5FL_SEQ_CALLOC(H5A_t_ptr, atable->nattrs)) == NULL) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); + /* Check for overflow on the downcast */ + H5_CHECK_OVERFLOW(nrec, /* From: */ hsize_t, /* To: */ size_t); - /* Set up user data for iteration */ - udata.atable = atable; - udata.curr_attr = 0; + /* Allocate the table to store the attributes */ + if (NULL == (atable->attrs = (H5A_t **)H5FL_SEQ_CALLOC(H5A_t_ptr, (size_t)nrec))) + HGOTO_ERROR(H5E_ATTR, H5E_CANTALLOC, FAIL, "memory allocation failed"); + atable->num_attrs = 0; + atable->max_attrs = (size_t)nrec; /* Build iterator operator */ attr_op.op_type = H5A_ATTR_OP_LIB; @@ -1644,7 +1638,7 @@ H5A__dense_build_table(H5F_t *f, const H5O_ainfo_t *ainfo, H5_index_t idx_type, /* Iterate over the links in the group, building a table of the link messages */ if (H5A__dense_iterate(f, (hid_t)0, ainfo, H5_INDEX_NAME, H5_ITER_NATIVE, (hsize_t)0, NULL, &attr_op, - &udata) < 0) + atable) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "error building attribute table"); /* Sort attribute table in correct iteration order */ @@ -1790,18 +1784,18 @@ H5A__attr_sort_table(H5A_attr_table_t *atable, H5_index_t idx_type, H5_iter_orde /* Pick appropriate comparison routine */ if (idx_type == H5_INDEX_NAME) { if (order == H5_ITER_INC) - qsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_name_inc); + qsort(atable->attrs, atable->num_attrs, sizeof(H5A_t *), H5A__attr_cmp_name_inc); else if (order == H5_ITER_DEC) - qsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_name_dec); + qsort(atable->attrs, atable->num_attrs, sizeof(H5A_t *), H5A__attr_cmp_name_dec); else assert(order == H5_ITER_NATIVE); } /* end if */ else { assert(idx_type == H5_INDEX_CRT_ORDER); if (order == H5_ITER_INC) - qsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_corder_inc); + qsort(atable->attrs, atable->num_attrs, sizeof(H5A_t *), H5A__attr_cmp_corder_inc); else if (order == H5_ITER_DEC) - qsort(atable->attrs, atable->nattrs, sizeof(H5A_t *), H5A__attr_cmp_corder_dec); + qsort(atable->attrs, atable->num_attrs, sizeof(H5A_t *), H5A__attr_cmp_corder_dec); else assert(order == H5_ITER_NATIVE); } /* end else */ @@ -1838,7 +1832,7 @@ H5A__attr_iterate_table(const H5A_attr_table_t *atable, hsize_t skip, hsize_t *l /* Iterate over attribute messages */ H5_CHECKED_ASSIGN(u, size_t, skip, hsize_t); - for (; u < atable->nattrs && !ret_value; u++) { + for (; u < atable->num_attrs && !ret_value; u++) { /* Check which type of callback to make */ switch (attr_op->op_type) { case H5A_ATTR_OP_APP2: { @@ -1905,19 +1899,20 @@ H5A__attr_release_table(H5A_attr_table_t *atable) assert(atable); /* Release attribute info, if any. */ - if (atable->nattrs > 0) { + if (atable->num_attrs > 0) { size_t u; /* Local index variable */ /* Free attribute message information */ - for (u = 0; u < atable->nattrs; u++) + for (u = 0; u < atable->num_attrs; u++) if (atable->attrs[u] && H5A__close(atable->attrs[u]) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTFREE, FAIL, "unable to release attribute"); + + /* Release array */ + atable->attrs = (H5A_t **)H5FL_SEQ_FREE(H5A_t_ptr, atable->attrs); } /* end if */ else assert(atable->attrs == NULL); - atable->attrs = (H5A_t **)H5FL_SEQ_FREE(H5A_t_ptr, atable->attrs); - done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5A__attr_release_table() */ diff --git a/src/H5Apkg.h b/src/H5Apkg.h index 64549c818c0..239d7550c46 100644 --- a/src/H5Apkg.h +++ b/src/H5Apkg.h @@ -145,8 +145,9 @@ typedef struct H5A_bt2_ud_ins_t { /* Data structure to hold table of attributes for an object */ typedef struct { - size_t nattrs; /* # of attributes in table */ - H5A_t **attrs; /* Pointer to array of attribute pointers */ + size_t num_attrs; /* Curr. # of attributes in table */ + size_t max_attrs; /* Max. # of attributes in table */ + H5A_t **attrs; /* Pointer to array of attribute pointers */ } H5A_attr_table_t; /*****************************/ diff --git a/src/H5Centry.c b/src/H5Centry.c index a799c4bb97d..6883e897186 100644 --- a/src/H5Centry.c +++ b/src/H5Centry.c @@ -1288,14 +1288,6 @@ H5C__load_entry(H5F_t *f, H5C__RESET_CACHE_ENTRY_STATS(entry); - /* This is a temporary fix for a problem identified in GitHub #3762, where - * it looks like a local heap entry can grow to a size that is larger - * than the metadata cache will allow. This doesn't fix the underlying - * problem, but it at least prevents the library from crashing. - */ - if (entry->size >= H5C_MAX_ENTRY_SIZE) - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cache entry size is too large"); - ret_value = thing; done: diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index e5b690e95c1..310f774e8da 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -250,9 +250,10 @@ typedef struct H5D_chunk_coll_fill_info_t { #endif /* H5_HAVE_PARALLEL */ typedef struct H5D_chunk_iter_ud_t { - H5D_chunk_iter_op_t op; /* User defined callback */ - void *op_data; /* User data for user defined callback */ - H5O_layout_chunk_t *chunk; /* Chunk layout */ + H5D_chunk_iter_op_t op; /* User defined callback */ + void *op_data; /* User data for user defined callback */ + H5O_layout_chunk_t *chunk; /* Chunk layout */ + haddr_t base_addr; /* Base address of the file, taking user block into account */ } H5D_chunk_iter_ud_t; /********************/ @@ -7850,7 +7851,7 @@ H5D__get_num_chunks(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_ /*------------------------------------------------------------------------- * Function: H5D__get_chunk_info_cb * - * Purpose: Get the chunk info of the queried chunk, given by its index. + * Purpose: Get the chunk info of the queried chunk, given by its index * * Return: Success: H5_ITER_CONT or H5_ITER_STOP * H5_ITER_STOP indicates the queried chunk is found @@ -7901,21 +7902,18 @@ H5D__get_chunk_info_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) * Note: Currently, the domain of the index in this function is of all * the written chunks, regardless the dataspace. * - * Return: Success: SUCCEED - * Failure: FAIL - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ herr_t H5D__get_chunk_info(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_t chk_index, hsize_t *offset, unsigned *filter_mask, haddr_t *addr, hsize_t *size) { - H5D_chk_idx_info_t idx_info; /* Chunked index info */ - H5D_chunk_info_iter_ud_t udata; /* User data for callback */ - const H5D_rdcc_t *rdcc = NULL; /* Raw data chunk cache */ - H5D_rdcc_ent_t *ent; /* Cache entry index */ - hsize_t ii = 0; /* Dimension index */ - herr_t ret_value = SUCCEED; /* Return value */ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + const H5D_rdcc_t *rdcc = NULL; /* Raw data chunk cache */ + H5D_rdcc_ent_t *ent; /* Cache entry index */ + hsize_t ii = 0; /* Dimension index */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr) @@ -7947,6 +7945,9 @@ H5D__get_chunk_info(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_ /* If the chunk is written, get its info, otherwise, return without error */ if (H5_addr_defined(idx_info.storage->idx_addr)) { + + H5D_chunk_info_iter_ud_t udata; + /* Initialize before iteration */ udata.chunk_idx = chk_index; udata.curr_idx = 0; @@ -7967,14 +7968,14 @@ H5D__get_chunk_info(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_ if (filter_mask) *filter_mask = udata.filter_mask; if (addr) - *addr = udata.chunk_addr; + *addr = udata.chunk_addr + H5F_BASE_ADDR(dset->oloc.file); if (size) *size = udata.nbytes; if (offset) for (ii = 0; ii < udata.ndims; ii++) offset[ii] = udata.scaled[ii] * dset->shared->layout.u.chunk.dim[ii]; - } /* end if */ - } /* end if H5_addr_defined */ + } + } done: FUNC_LEAVE_NOAPI_TAG(ret_value) @@ -8039,12 +8040,11 @@ herr_t H5D__get_chunk_info_by_coord(const H5D_t *dset, const hsize_t *offset, unsigned *filter_mask, haddr_t *addr, hsize_t *size) { - const H5O_layout_t *layout = NULL; /* Dataset layout */ - const H5D_rdcc_t *rdcc = NULL; /* Raw data chunk cache */ - H5D_rdcc_ent_t *ent; /* Cache entry index */ - H5D_chk_idx_info_t idx_info; /* Chunked index info */ - H5D_chunk_info_iter_ud_t udata; /* User data for callback */ - herr_t ret_value = SUCCEED; /* Return value */ + const H5O_layout_t *layout = NULL; /* Dataset layout */ + const H5D_rdcc_t *rdcc = NULL; /* Raw data chunk cache */ + H5D_rdcc_ent_t *ent; /* Cache entry index */ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr) @@ -8080,6 +8080,9 @@ H5D__get_chunk_info_by_coord(const H5D_t *dset, const hsize_t *offset, unsigned /* If the dataset is not written, return without errors */ if (H5_addr_defined(idx_info.storage->idx_addr)) { + + H5D_chunk_info_iter_ud_t udata; + /* Calculate the scaled of this chunk */ H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, udata.scaled); udata.scaled[dset->shared->ndims] = 0; @@ -8102,11 +8105,11 @@ H5D__get_chunk_info_by_coord(const H5D_t *dset, const hsize_t *offset, unsigned if (filter_mask) *filter_mask = udata.filter_mask; if (addr) - *addr = udata.chunk_addr; + *addr = udata.chunk_addr + H5F_BASE_ADDR(dset->oloc.file); if (size) *size = udata.nbytes; - } /* end if */ - } /* end if H5_addr_defined */ + } + } done: FUNC_LEAVE_NOAPI_TAG(ret_value) @@ -8115,33 +8118,32 @@ H5D__get_chunk_info_by_coord(const H5D_t *dset, const hsize_t *offset, unsigned /*------------------------------------------------------------------------- * Function: H5D__chunk_iter_cb * - * Purpose: Call the user-defined function with the chunk data. The iterator continues if - * the user-defined function returns H5_ITER_CONT, and stops if H5_ITER_STOP is - * returned. + * Purpose: Call the user-defined function with the chunk data. The + * iterator continues if the user-defined function returns + * H5_ITER_CONT, and stops if H5_ITER_STOP is returned. * * Return: Success: H5_ITER_CONT or H5_ITER_STOP * Failure: Negative (H5_ITER_ERROR) - * *------------------------------------------------------------------------- */ static int H5D__chunk_iter_cb(const H5D_chunk_rec_t *chunk_rec, void *udata) { - const H5D_chunk_iter_ud_t *data = (H5D_chunk_iter_ud_t *)udata; - const H5O_layout_chunk_t *chunk = data->chunk; - int ret_value = H5_ITER_CONT; + const H5D_chunk_iter_ud_t *data = (H5D_chunk_iter_ud_t *)udata; + const H5O_layout_chunk_t *chunk = data->chunk; hsize_t offset[H5O_LAYOUT_NDIMS]; - unsigned ii; /* Match H5O_layout_chunk_t.ndims */ + int ret_value = H5_ITER_CONT; /* Similar to H5D__get_chunk_info */ - for (ii = 0; ii < chunk->ndims; ii++) - offset[ii] = chunk_rec->scaled[ii] * chunk->dim[ii]; + for (unsigned i = 0; i < chunk->ndims; i++) + offset[i] = chunk_rec->scaled[i] * chunk->dim[i]; FUNC_ENTER_PACKAGE_NOERR /* Check for callback failure and pass along return value */ - if ((ret_value = (data->op)(offset, (unsigned)chunk_rec->filter_mask, chunk_rec->chunk_addr, - (hsize_t)chunk_rec->nbytes, data->op_data)) < 0) + if ((ret_value = + (data->op)(offset, (unsigned)chunk_rec->filter_mask, data->base_addr + chunk_rec->chunk_addr, + (hsize_t)chunk_rec->nbytes, data->op_data)) < 0) HERROR(H5E_DATASET, H5E_CANTNEXT, "iteration operator failed"); FUNC_LEAVE_NOAPI(ret_value) @@ -8150,11 +8152,9 @@ H5D__chunk_iter_cb(const H5D_chunk_rec_t *chunk_rec, void *udata) /*------------------------------------------------------------------------- * Function: H5D__chunk_iter * - * Purpose: Iterate over all the chunks in the dataset with given callback. - * - * Return: Success: Non-negative - * Failure: Negative + * Purpose: Iterate over all the chunks in the dataset with given callback * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ herr_t @@ -8196,14 +8196,15 @@ H5D__chunk_iter(H5D_t *dset, H5D_chunk_iter_op_t op, void *op_data) H5D_chunk_iter_ud_t ud; /* Set up info for iteration callback */ - ud.op = op; - ud.op_data = op_data; - ud.chunk = &dset->shared->layout.u.chunk; + ud.op = op; + ud.op_data = op_data; + ud.chunk = &dset->shared->layout.u.chunk; + ud.base_addr = H5F_BASE_ADDR(dset->oloc.file); /* Iterate over the allocated chunks calling the iterator callback */ if ((ret_value = (layout->storage.u.chunk.ops->iterate)(&idx_info, H5D__chunk_iter_cb, &ud)) < 0) HERROR(H5E_DATASET, H5E_CANTNEXT, "chunk iteration failed"); - } /* end if H5_addr_defined */ + } done: FUNC_LEAVE_NOAPI_TAG(ret_value) diff --git a/src/H5Dint.c b/src/H5Dint.c index 8f363ebadbe..3b9d000f523 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -946,6 +946,13 @@ H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id) if (NULL == (oh = H5O_pin(oloc))) HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header"); + /* Check for creating dataset with unusual datatype */ + if (!(H5O_has_chksum(oh) || (H5F_RFIC_FLAGS(file) & H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS)) && + H5T_is_numeric_with_unusual_unused_bits(type)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, + "creating dataset with unusual datatype, see documentation for " + "H5Pset_relax_file_integrity_checks for details."); + /* Write the dataspace header message */ if (H5S_append(file, oh, dset->shared->space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update dataspace header message"); diff --git a/src/H5Dio.c b/src/H5Dio.c index 62bb48c2a26..312c7fd854b 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -395,8 +395,12 @@ H5D__read(size_t count, H5D_dset_io_info_t *dset_info) H5AC_tag(dset_info[i].dset->oloc.addr, &prev_tag); /* Invoke correct "high level" I/O routine */ - if ((*dset_info[i].io_ops.multi_read)(&io_info, &dset_info[i]) < 0) + if ((*dset_info[i].io_ops.multi_read)(&io_info, &dset_info[i]) < 0) { + /* Reset metadata tagging */ + H5AC_tag(prev_tag, NULL); + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data"); + } /* Reset metadata tagging */ H5AC_tag(prev_tag, NULL); diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 35d0edfe211..eda38863d89 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -224,7 +224,7 @@ typedef herr_t (*H5D_gather_func_t)(const void *dst_buf, size_t dst_buf_bytes_us * * \param[in] offset Logical position of the chunk's first element in units of dataset elements * \param[in] filter_mask Bitmask indicating the filters used when the chunk was written - * \param[in] addr Chunk address in the file + * \param[in] addr Chunk address in the file, taking the user block (if any) into account * \param[in] size Chunk size in bytes, 0 if the chunk does not exist * \param[in,out] op_data Pointer to any user-defined data associated with * the operation. @@ -669,7 +669,7 @@ H5_DLL herr_t H5Dget_num_chunks(hid_t dset_id, hid_t fspace_id, hsize_t *nchunks * \dset_id * \param[in] offset Logical position of the chunk's first element in units of dataset elements * \param[out] filter_mask Bitmask indicating the filters used when the chunk was written - * \param[out] addr Chunk address in the file + * \param[out] addr Chunk address in the file, taking the user block (if any) into account * \param[out] size Chunk size in bytes, 0 if the chunk does not exist * * \return \herr_t @@ -686,6 +686,9 @@ H5_DLL herr_t H5Dget_num_chunks(hid_t dset_id, hid_t fspace_id, hsize_t *nchunks * equal to the dataset's rank. Each element is the logical * position of the chunk's first element in a dimension. * + * \note Prior to HDF5 1.14.4, the reported address did not take the + * user block into account. + * * \since 1.10.5 * */ @@ -709,6 +712,9 @@ H5_DLL herr_t H5Dget_chunk_info_by_coord(hid_t dset_id, const hsize_t *offset, u * user supplied callback with the details of the chunk and the supplied * context \p op_data. * + * \note Prior to HDF5 1.14.4, the address passed to the callback did not take + * the user block into account. + * * \par Example * For each chunk, print the allocated chunk size (0 for unallocated chunks). * \snippet H5D_examples.c H5Dchunk_iter_cb @@ -731,7 +737,7 @@ H5_DLL herr_t H5Dchunk_iter(hid_t dset_id, hid_t dxpl_id, H5D_chunk_iter_op_t cb * \param[in] chk_idx Index of the chunk * \param[out] offset Logical position of the chunk's first element in units of dataset elements * \param[out] filter_mask Bitmask indicating the filters used when the chunk was written - * \param[out] addr Chunk address in the file + * \param[out] addr Chunk address in the file, taking the user block (if any) into account * \param[out] size Chunk size in bytes, 0 if the chunk does not exist * * \return \herr_t @@ -745,6 +751,9 @@ H5_DLL herr_t H5Dchunk_iter(hid_t dset_id, hid_t dxpl_id, H5D_chunk_iter_op_t cb * address to #HADDR_UNDEF. The value pointed to by filter_mask will * not be modified. \c NULL can be passed in for any \p out parameters. * + * \note Prior to HDF5 1.14.4, the reported address did not take the + * user block into account. + * * \p chk_idx is the chunk index in the selection. The index value * may have a value of 0 up to the number of chunks stored in * the file that has a nonempty intersection with the file diff --git a/src/H5Eint.c b/src/H5Eint.c index a4ba5b2d4b9..70848ecd7c2 100644 --- a/src/H5Eint.c +++ b/src/H5Eint.c @@ -730,13 +730,13 @@ H5E__push_stack(H5E_t *estack, const char *file, const char *func, unsigned line if (estack->nused < H5E_NSLOTS) { /* Increment the IDs to indicate that they are used in this stack */ - if (H5I_inc_ref(cls_id, false) < 0) + if (H5I_inc_ref_noherr(cls_id, false) < 0) HGOTO_DONE(FAIL); estack->slot[estack->nused].cls_id = cls_id; - if (H5I_inc_ref(maj_id, false) < 0) + if (H5I_inc_ref_noherr(maj_id, false) < 0) HGOTO_DONE(FAIL); estack->slot[estack->nused].maj_num = maj_id; - if (H5I_inc_ref(min_id, false) < 0) + if (H5I_inc_ref_noherr(min_id, false) < 0) HGOTO_DONE(FAIL); estack->slot[estack->nused].min_num = min_id; /* The 'func' & 'file' strings are statically allocated (by the compiler) diff --git a/src/H5Fint.c b/src/H5Fint.c index 325947656bf..3f5a1379834 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -438,6 +438,8 @@ H5F_get_access_plist(H5F_t *f, bool app_ref) 0) HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set initial metadata cache resize config."); + if (H5P_set(new_plist, H5F_ACS_RFIC_FLAGS_NAME, &(f->shared->rfic_flags)) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set RFIC flags value"); /* Prepare the driver property */ driver_prop.driver_id = f->shared->lf->driver_id; @@ -1230,6 +1232,8 @@ H5F__new(H5F_shared_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5F if (H5P_get(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get initial metadata cache resize config"); + if (H5P_get(plist, H5F_ACS_RFIC_FLAGS_NAME, &(f->shared->rfic_flags)) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get RFIC flags value"); /* Get the VFD values to cache */ f->shared->maxaddr = H5FD_get_maxaddr(lf); diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index 60de31ebdfd..7e12ff111d0 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -304,6 +304,7 @@ struct H5F_shared_t { bool use_file_locking; /* Whether or not to use file locking */ bool ignore_disabled_locks; /* Whether or not to ignore disabled file locking */ bool closing; /* File is in the process of being closed */ + uint64_t rfic_flags; /* Relaxed file integrity check (RFIC) flags */ /* Cached VOL connector ID & info */ hid_t vol_id; /* ID of VOL connector for the container */ diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index 682e938120c..d2b1b887a7f 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -101,6 +101,7 @@ typedef struct H5F_t H5F_t; #define H5F_VOL_CLS(F) ((F)->shared->vol_cls) #define H5F_VOL_OBJ(F) ((F)->vol_obj) #define H5F_USE_FILE_LOCKING(F) ((F)->shared->use_file_locking) +#define H5F_RFIC_FLAGS(F) ((F)->shared->rfic_flags) #else /* H5F_MODULE */ #define H5F_LOW_BOUND(F) (H5F_get_low_bound(F)) #define H5F_HIGH_BOUND(F) (H5F_get_high_bound(F)) @@ -165,6 +166,7 @@ typedef struct H5F_t H5F_t; #define H5F_VOL_CLS(F) (H5F_get_vol_cls(F)) #define H5F_VOL_OBJ(F) (H5F_get_vol_obj(F)) #define H5F_USE_FILE_LOCKING(F) (H5F_get_use_file_locking(F)) +#define H5F_RFIC_FLAGS(F) (H5F_get_rfic_flags(F)) #endif /* H5F_MODULE */ /* Macros to encode/decode offset/length's for storing in the file */ @@ -282,6 +284,7 @@ typedef struct H5F_t H5F_t; #define H5F_ACS_MPI_PARAMS_COMM_NAME "mpi_params_comm" /* the MPI communicator */ #define H5F_ACS_MPI_PARAMS_INFO_NAME "mpi_params_info" /* the MPI info struct */ #endif /* H5_HAVE_PARALLEL */ +#define H5F_ACS_RFIC_FLAGS_NAME "rfic_flags" /* Relaxed file integrity check (RFIC) flags */ /* ======================== File Mount properties ====================*/ #define H5F_MNT_SYM_LOCAL_NAME "local" /* Whether absolute symlinks local to file. */ @@ -525,7 +528,8 @@ H5_DLL bool H5F_get_min_dset_ohdr(const H5F_t *f); H5_DLL herr_t H5F_set_min_dset_ohdr(H5F_t *f, bool minimize); H5_DLL const H5VL_class_t *H5F_get_vol_cls(const H5F_t *f); H5_DLL H5VL_object_t *H5F_get_vol_obj(const H5F_t *f); -H5_DLL bool H5F_get_file_locking(const H5F_t *f); +H5_DLL bool H5F_get_use_file_locking(const H5F_t *f); +H5_DLL uint64_t H5F_get_rfic_flags(const H5F_t *f); /* Functions than retrieve values set/cached from the superblock/FCPL */ H5_DLL haddr_t H5F_get_base_addr(const H5F_t *f); diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index 551a345d85a..39f13929060 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -241,6 +241,20 @@ typedef struct H5F_retry_info_t { */ typedef herr_t (*H5F_flush_cb_t)(hid_t object_id, void *udata); +/* + * These are the bits that can be passed to the `flags' argument of + * H5Pset_relax_file_integrity_checks(). Use the bit-wise OR operator (|) to + * combine them as needed. + */ +#define H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS \ + (0x0001u) /**< Suppress errors for numeric datatypes with an unusually \ + * high number of unused bits. See documentation for \ + * H5Pset_relax_file_integrity_checks for details. */ +#define H5F_RFIC_ALL \ + (H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS) /**< Suppress all format integrity check errors. See \ + * documentation for H5Pset_relax_file_integrity_checks \ + * for details. */ + /*********************/ /* Public Prototypes */ /*********************/ diff --git a/src/H5Fquery.c b/src/H5Fquery.c index 44a52c8dbfc..89181dad479 100644 --- a/src/H5Fquery.c +++ b/src/H5Fquery.c @@ -1356,16 +1356,16 @@ H5F__get_cont_info(const H5F_t *f, H5VL_file_cont_info_t *info) } /* end H5F_get_cont_info */ /*------------------------------------------------------------------------- - * Function: H5F_get_file_locking + * Function: H5F_get_use_file_locking * - * Purpose: Get the file locking flag for the file + * Purpose: Get the 'use file locking' flag for the file * * Return: true/false * *------------------------------------------------------------------------- */ bool -H5F_get_file_locking(const H5F_t *f) +H5F_get_use_file_locking(const H5F_t *f) { FUNC_ENTER_NOAPI_NOINIT_NOERR @@ -1373,7 +1373,7 @@ H5F_get_file_locking(const H5F_t *f) assert(f->shared); FUNC_LEAVE_NOAPI(f->shared->use_file_locking) -} /* end H5F_get_file_locking */ +} /* end H5F_get_use_file_locking */ /*------------------------------------------------------------------------- * Function: H5F_has_vector_select_io @@ -1401,3 +1401,23 @@ H5F_has_vector_select_io(const H5F_t *f, bool is_write) FUNC_LEAVE_NOAPI(ret_value) } /* end H5F_has_vector_select_io */ + +/*------------------------------------------------------------------------- + * Function: H5F_get_rfic_flags + * + * Purpose: Get the relaxed file integrity checks (RFIC) flags for the file + * + * Return: RFIC flags for a file on success (which can be 0), can't fail + * + *------------------------------------------------------------------------- + */ +uint64_t +H5F_get_rfic_flags(const H5F_t *f) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + assert(f); + assert(f->shared); + + FUNC_LEAVE_NOAPI(f->shared->rfic_flags) +} /* end H5F_get_rfic_flags */ diff --git a/src/H5HG.c b/src/H5HG.c index 7037376118d..3709c705566 100644 --- a/src/H5HG.c +++ b/src/H5HG.c @@ -559,9 +559,13 @@ H5HG_read(H5F_t *f, H5HG_t *hobj, void *object /*out*/, size_t *buf_size) /* Load the heap */ if (NULL == (heap = H5HG__protect(f, hobj->addr, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap"); + if (hobj->idx >= heap->nused) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad heap index, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); + if (NULL == heap->obj[hobj->idx].begin) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad heap pointer, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); - assert(hobj->idx < heap->nused); - assert(heap->obj[hobj->idx].begin); size = heap->obj[hobj->idx].size; p = heap->obj[hobj->idx].begin + H5HG_SIZEOF_OBJHDR(f); @@ -631,8 +635,12 @@ H5HG_link(H5F_t *f, const H5HG_t *hobj, int adjust) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap"); if (adjust != 0) { - assert(hobj->idx < heap->nused); - assert(heap->obj[hobj->idx].begin); + if (hobj->idx >= heap->nused) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap index, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); + if (NULL == heap->obj[hobj->idx].begin) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap pointer, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); if ((heap->obj[hobj->idx].nrefs + adjust) < 0) HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, FAIL, "new link count would be out of range"); if ((heap->obj[hobj->idx].nrefs + adjust) > H5HG_MAXLINK) @@ -678,8 +686,13 @@ H5HG_get_obj_size(H5F_t *f, H5HG_t *hobj, size_t *obj_size) if (NULL == (heap = H5HG__protect(f, hobj->addr, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap"); - assert(hobj->idx < heap->nused); - assert(heap->obj[hobj->idx].begin); + /* Sanity check the heap object */ + if (hobj->idx >= heap->nused) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap index, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); + if (NULL == heap->obj[hobj->idx].begin) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap pointer, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); /* Set object size */ *obj_size = heap->obj[hobj->idx].size; @@ -722,14 +735,22 @@ H5HG_remove(H5F_t *f, H5HG_t *hobj) if (NULL == (heap = H5HG__protect(f, hobj->addr, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap"); - assert(hobj->idx < heap->nused); + /* Sanity check the heap object (split around bugfix below) */ + if (hobj->idx >= heap->nused) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap index, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); /* When the application selects the same location to rewrite the VL element by using H5Sselect_elements, * it can happen that the entry has been removed by first rewrite. Here we simply skip the removal of * the entry and let the second rewrite happen (see HDFFV-10635). In the future, it'd be nice to handle * this situation in H5T_conv_vlen in H5Tconv.c instead of this level (HDFFV-10648). */ if (heap->obj[hobj->idx].nrefs == 0 && heap->obj[hobj->idx].size == 0 && !heap->obj[hobj->idx].begin) - HGOTO_DONE(ret_value); + HGOTO_DONE(SUCCEED); + + /* Finish sanity checking the heap object */ + if (NULL == heap->obj[hobj->idx].begin) + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap pointer, heap object = {%" PRIxHADDR ", %zu}", + hobj->addr, hobj->idx); obj_start = heap->obj[hobj->idx].begin; /* Include object header size */ diff --git a/src/H5Iint.c b/src/H5Iint.c index fe3b90c2454..1df3ae907a8 100644 --- a/src/H5Iint.c +++ b/src/H5Iint.c @@ -1230,6 +1230,27 @@ H5I_dec_app_ref_always_close_async(hid_t id, void **token) FUNC_LEAVE_NOAPI(ret_value) } /* end H5I_dec_app_ref_always_close_async() */ +/*------------------------------------------------------------------------- + * Function: H5I_do_inc_ref + * + * Purpose: Helper function for H5I_inc_ref/H5I_inc_ref_noherr to + * actually increment the reference count for an object. + * + * Return: The new reference count (can't fail) + * + *------------------------------------------------------------------------- + */ +static inline int +H5I_do_inc_ref(H5I_id_info_t *info, bool app_ref) +{ + /* Adjust reference counts */ + ++(info->count); + if (app_ref) + ++(info->app_count); + + return (int)(app_ref ? info->app_count : info->count); +} + /*------------------------------------------------------------------------- * Function: H5I_inc_ref * @@ -1255,18 +1276,59 @@ H5I_inc_ref(hid_t id, bool app_ref) if (NULL == (info = H5I__find_id(id))) HGOTO_ERROR(H5E_ID, H5E_BADID, (-1), "can't locate ID"); - /* Adjust reference counts */ - ++(info->count); - if (app_ref) - ++(info->app_count); - /* Set return value */ - ret_value = (int)(app_ref ? info->app_count : info->count); + ret_value = H5I_do_inc_ref(info, app_ref); done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5I_inc_ref() */ +/*------------------------------------------------------------------------- + * Function: H5I_inc_ref_noherr + * + * Purpose: Increment the reference count for an object. Exactly like + * H5I_inc_ref, except that it makes use of HGOTO_DONE on + * failure instead of HGOTO_ERROR. This function is + * specifically meant to be used in the H5E package, where we + * have to avoid calling any function or macro that may call + * HGOTO_ERROR and similar. Otherwise, we can cause a stack + * overflow that looks like (for example): + * + * H5E_printf_stack() + * H5E__push_stack() + * H5I_inc_ref() + * H5I__find_id() (FAIL) + * HGOTO_ERROR() + * H5E_printf_stack() + * ... + * + * Return: Success: The new reference count + * Failure: -1 + * + *------------------------------------------------------------------------- + */ +int +H5I_inc_ref_noherr(hid_t id, bool app_ref) +{ + H5I_id_info_t *info = NULL; /* Pointer to the ID info */ + int ret_value = 0; /* Return value */ + + FUNC_ENTER_NOAPI_NOERR + + /* Sanity check */ + assert(id >= 0); + + /* General lookup of the ID */ + if (NULL == (info = H5I__find_id(id))) + HGOTO_DONE((-1)); + + /* Set return value */ + ret_value = H5I_do_inc_ref(info, app_ref); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5I_inc_ref_noherr() */ + /*------------------------------------------------------------------------- * Function: H5I_get_ref * diff --git a/src/H5Iprivate.h b/src/H5Iprivate.h index 75a5787b616..83fdacc686f 100644 --- a/src/H5Iprivate.h +++ b/src/H5Iprivate.h @@ -68,6 +68,7 @@ H5_DLL H5I_type_t H5I_get_type(hid_t id); H5_DLL herr_t H5I_iterate(H5I_type_t type, H5I_search_func_t func, void *udata, bool app_ref); H5_DLL int H5I_get_ref(hid_t id, bool app_ref); H5_DLL int H5I_inc_ref(hid_t id, bool app_ref); +H5_DLL int H5I_inc_ref_noherr(hid_t id, bool app_ref); H5_DLL int H5I_dec_ref(hid_t id); H5_DLL int H5I_dec_app_ref(hid_t id); H5_DLL int H5I_dec_app_ref_async(hid_t id, void **token); diff --git a/src/H5Mpublic.h b/src/H5Mpublic.h index 48625a55382..e108a5c25d7 100644 --- a/src/H5Mpublic.h +++ b/src/H5Mpublic.h @@ -32,33 +32,37 @@ /* Macros defining operation IDs for map VOL callbacks (implemented using the * "optional" VOL callback) */ -#define H5VL_MAP_CREATE 1 -#define H5VL_MAP_OPEN 2 -#define H5VL_MAP_GET_VAL 3 -#define H5VL_MAP_EXISTS 4 -#define H5VL_MAP_PUT 5 -#define H5VL_MAP_GET 6 -#define H5VL_MAP_SPECIFIC 7 -#define H5VL_MAP_OPTIONAL 8 -#define H5VL_MAP_CLOSE 9 +#define H5VL_MAP_CREATE 1 /**< Callback operation ID for map create */ +#define H5VL_MAP_OPEN 2 /**< Callback operation ID for map open */ +#define H5VL_MAP_GET_VAL 3 /**< Callback operation ID for getting an associated value from a map */ +#define H5VL_MAP_EXISTS 4 /**< Callback operation ID for checking if a value exists in a map */ +#define H5VL_MAP_PUT 5 /**< Callback operation ID for putting a key-value pair to a map */ +#define H5VL_MAP_GET 6 /**< Callback operation ID for map get callback */ +#define H5VL_MAP_SPECIFIC 7 /**< Callback operation ID for map specific operation */ +#define H5VL_MAP_OPTIONAL 8 /**< Currently unused */ +#define H5VL_MAP_CLOSE 9 /**< Callback operation ID for terminating access to a map */ /*******************/ /* Public Typedefs */ /*******************/ -/* types for map GET callback */ +/** + * Types for map GET callback + */ typedef enum H5VL_map_get_t { - H5VL_MAP_GET_MAPL, /* map access property list */ - H5VL_MAP_GET_MCPL, /* map creation property list */ - H5VL_MAP_GET_KEY_TYPE, /* key type */ - H5VL_MAP_GET_VAL_TYPE, /* value type */ - H5VL_MAP_GET_COUNT /* key count */ + H5VL_MAP_GET_MAPL, /**< Callback operation ID for getting map access property list */ + H5VL_MAP_GET_MCPL, /**< Callback operation ID for getting map creation property list */ + H5VL_MAP_GET_KEY_TYPE, /**< Callback operation ID for getting the key datatype for a map */ + H5VL_MAP_GET_VAL_TYPE, /**< Callback operation ID for getting the value datatype for a map */ + H5VL_MAP_GET_COUNT /**< Callback operation ID for getting the number of key-value pairs stored in a map */ } H5VL_map_get_t; -/* types for map SPECIFIC callback */ +/** + * Types for map SPECIFIC callback + */ typedef enum H5VL_map_specific_t { - H5VL_MAP_ITER, /* H5Miterate */ - H5VL_MAP_DELETE /* H5Mdelete */ + H5VL_MAP_ITER, /**< Callback operation ID for iterating over all key-value pairs stored in the map */ + H5VL_MAP_DELETE /**< Callback operation ID for deleting a key-value pair stored in the map */ } H5VL_map_specific_t; //! @@ -68,112 +72,116 @@ typedef enum H5VL_map_specific_t { typedef herr_t (*H5M_iterate_t)(hid_t map_id, const void *key, void *op_data); //! -/* Parameters for map operations */ +/** + * Parameters for map operations + */ typedef union H5VL_map_args_t { - /* H5VL_MAP_CREATE */ + + /** H5VL_MAP_CREATE */ struct { - H5VL_loc_params_t loc_params; /* Location parameters for object */ - const char *name; /* Name of new map object */ - hid_t lcpl_id; /* Link creation property list for map */ - hid_t key_type_id; /* Datatype for map keys */ - hid_t val_type_id; /* Datatype for map values */ - hid_t mcpl_id; /* Map creation property list */ - hid_t mapl_id; /* Map access property list */ - void *map; /* Pointer to newly created map object (OUT) */ + H5VL_loc_params_t loc_params; /**< Location parameters for object */ + const char *name; /**< Name of new map object */ + hid_t lcpl_id; /**< Link creation property list for map */ + hid_t key_type_id; /**< Datatype for map keys */ + hid_t val_type_id; /**< Datatype for map values */ + hid_t mcpl_id; /**< Map creation property list */ + hid_t mapl_id; /**< Map access property list */ + void *map; /**< Pointer to newly created map object (OUT) */ } create; - /* H5VL_MAP_OPEN */ + /** H5VL_MAP_OPEN */ struct { - H5VL_loc_params_t loc_params; /* Location parameters for object */ - const char *name; /* Name of new map object */ - hid_t mapl_id; /* Map access property list */ - void *map; /* Pointer to newly created map object (OUT) */ + H5VL_loc_params_t loc_params; /**< Location parameters for object */ + const char *name; /**< Name of new map object */ + hid_t mapl_id; /**< Map access property list */ + void *map; /**< Pointer to newly created map object (OUT) */ } open; - /* H5VL_MAP_GET_VAL */ + /** H5VL_MAP_GET_VAL */ struct { - hid_t key_mem_type_id; /* Memory datatype for key */ - const void *key; /* Pointer to key */ - hid_t value_mem_type_id; /* Memory datatype for value */ - void *value; /* Buffer for value (OUT) */ + hid_t key_mem_type_id; /**< Memory datatype for key */ + const void *key; /**< Pointer to key */ + hid_t value_mem_type_id; /**< Memory datatype for value */ + void *value; /**< Buffer for value (OUT) */ } get_val; - /* H5VL_MAP_EXISTS */ + /** H5VL_MAP_EXISTS */ struct { - hid_t key_mem_type_id; /* Memory datatype for key */ - const void *key; /* Pointer to key */ - hbool_t exists; /* Flag indicating whether key exists in map (OUT) */ + hid_t key_mem_type_id; /**< Memory datatype for key */ + const void *key; /**< Pointer to key */ + hbool_t exists; /**< Flag indicating whether key exists in map (OUT) */ } exists; - /* H5VL_MAP_PUT */ + /** H5VL_MAP_PUT */ struct { - hid_t key_mem_type_id; /* Memory datatype for key */ - const void *key; /* Pointer to key */ - hid_t value_mem_type_id; /* Memory datatype for value */ - const void *value; /* Pointer to value */ + hid_t key_mem_type_id; /**< Memory datatype for key */ + const void *key; /**< Pointer to key */ + hid_t value_mem_type_id; /**< Memory datatype for value */ + const void *value; /**< Pointer to value */ } put; - /* H5VL_MAP_GET */ + /** H5VL_MAP_GET */ struct { - H5VL_map_get_t get_type; /* 'get' operation to perform */ + H5VL_map_get_t get_type; /**< 'get' operation to perform */ - /* Parameters for each operation */ + /** Parameters for each operation */ union { - /* H5VL_MAP_GET_MAPL */ + /** H5VL_MAP_GET_MAPL */ struct { - hid_t mapl_id; /* Map access property list ID (OUT) */ + hid_t mapl_id; /**< Get map access property list ID (OUT) */ } get_mapl; - /* H5VL_MAP_GET_MCPL */ + /** H5VL_MAP_GET_MCPL */ struct { - hid_t mcpl_id; /* Map creation property list ID (OUT) */ + hid_t mcpl_id; /**< Get map creation property list ID (OUT) */ } get_mcpl; - /* H5VL_MAP_GET_KEY_TYPE */ + /** H5VL_MAP_GET_KEY_TYPE */ struct { - hid_t type_id; /* Datatype ID for map's keys (OUT) */ + hid_t type_id; /**< Get datatype ID for map's keys (OUT) */ } get_key_type; - /* H5VL_MAP_GET_VAL_TYPE */ + /** H5VL_MAP_GET_VAL_TYPE */ struct { - hid_t type_id; /* Datatype ID for map's values (OUT) */ + hid_t type_id; /**< Get datatype ID for map's values (OUT) */ } get_val_type; - /* H5VL_MAP_GET_COUNT */ + /** H5VL_MAP_GET_COUNT */ struct { - hsize_t count; /* # of KV pairs in map (OUT) */ + hsize_t count; /**< Get number of key-value pairs in the map (OUT) */ } get_count; } args; } get; - /* H5VL_MAP_SPECIFIC */ + /** H5VL_MAP_SPECIFIC */ struct { - H5VL_map_specific_t specific_type; /* 'specific' operation to perform */ + H5VL_map_specific_t specific_type; + /**< 'specific' operation to perform */ - /* Parameters for each operation */ + /** Parameters for each operation */ union { - /* H5VL_MAP_ITER */ + /* H5VL_MAP_ITER specific operation */ struct { - H5VL_loc_params_t loc_params; /* Location parameters for object */ - hsize_t idx; /* Start/end iteration index (IN/OUT) */ - hid_t key_mem_type_id; /* Memory datatype for key */ - H5M_iterate_t op; /* Iteration callback routine */ - void *op_data; /* Pointer to callback context */ + H5VL_loc_params_t loc_params; /**< Location parameters for object */ + hsize_t idx; /**< Start/end iteration index (IN/OUT) */ + hid_t key_mem_type_id; /**< Memory datatype for key */ + H5M_iterate_t op; /**< Iteration callback routine */ + void *op_data; /**< Pointer to callback context */ } iterate; - /* H5VL_MAP_DELETE */ + /* H5VL_MAP_DELETE specific operation */ struct { - H5VL_loc_params_t loc_params; /* Location parameters for object */ - hid_t key_mem_type_id; /* Memory datatype for key */ - const void *key; /* Pointer to key */ + H5VL_loc_params_t loc_params; /**< Location parameters for object */ + hid_t key_mem_type_id; /**< Memory datatype for key */ + const void *key; /**< Pointer to key */ } del; } args; } specific; - /* H5VL_MAP_OPTIONAL */ + /** H5VL_MAP_OPTIONAL */ /* Unused */ - /* H5VL_MAP_CLOSE */ + /** H5VL_MAP_CLOSE */ /* No args */ } H5VL_map_args_t; diff --git a/src/H5Oainfo.c b/src/H5Oainfo.c index 8b82e39e2a6..8b4340d2392 100644 --- a/src/H5Oainfo.c +++ b/src/H5Oainfo.c @@ -138,19 +138,25 @@ H5O__ainfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS ainfo->max_crt_idx = H5O_MAX_CRT_ORDER_IDX; /* Address of fractal heap to store "dense" attributes */ + H5_GCC_CLANG_DIAG_OFF("type-limits") if (H5_IS_BUFFER_OVERFLOW(p, sizeof_addr, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5_GCC_CLANG_DIAG_ON("type-limits") H5F_addr_decode(f, &p, &(ainfo->fheap_addr)); /* Address of v2 B-tree to index names of attributes (names are always indexed) */ + H5_GCC_CLANG_DIAG_OFF("type-limits") if (H5_IS_BUFFER_OVERFLOW(p, sizeof_addr, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5_GCC_CLANG_DIAG_ON("type-limits") H5F_addr_decode(f, &p, &(ainfo->name_bt2_addr)); /* Address of v2 B-tree to index creation order of links, if there is one */ if (ainfo->index_corder) { + H5_GCC_CLANG_DIAG_OFF("type-limits") if (H5_IS_BUFFER_OVERFLOW(p, sizeof_addr, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5_GCC_CLANG_DIAG_ON("type-limits") H5F_addr_decode(f, &p, &(ainfo->corder_bt2_addr)); } else diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c index 88c595ea213..4929be56717 100644 --- a/src/H5Oattribute.c +++ b/src/H5Oattribute.c @@ -211,6 +211,13 @@ H5O__attr_create(const H5O_loc_t *loc, H5A_t *attr) if (NULL == (oh = H5O_pin(loc))) HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header"); + /* Check for creating attribute with unusual datatype */ + if (!(H5O_has_chksum(oh) || (H5F_RFIC_FLAGS(loc->file) & H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS)) && + H5T_is_numeric_with_unusual_unused_bits(attr->shared->dt)) + HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, + "creating attribute with unusual datatype, see documentation for " + "H5Pset_relax_file_integrity_checks for details."); + /* Check if this object already has attribute information */ if (oh->version > H5O_VERSION_1) { bool new_ainfo = false; /* Flag to indicate that the attribute information is new */ @@ -1171,10 +1178,10 @@ herr_t H5O_attr_iterate_real(hid_t loc_id, const H5O_loc_t *loc, H5_index_t idx_type, H5_iter_order_t order, hsize_t skip, hsize_t *last_attr, const H5A_attr_iter_op_t *attr_op, void *op_data) { - H5O_t *oh = NULL; /* Pointer to actual object header */ - H5O_ainfo_t ainfo; /* Attribute information for object */ - H5A_attr_table_t atable = {0, NULL}; /* Table of attributes */ - herr_t ret_value = FAIL; /* Return value */ + H5O_t *oh = NULL; /* Pointer to actual object header */ + H5O_ainfo_t ainfo; /* Attribute information for object */ + H5A_attr_table_t atable = {0, 0, NULL}; /* Table of attributes */ + herr_t ret_value = FAIL; /* Return value */ FUNC_ENTER_NOAPI_NOINIT_TAG(loc->addr) @@ -1223,7 +1230,7 @@ H5O_attr_iterate_real(hid_t loc_id, const H5O_loc_t *loc, H5_index_t idx_type, H oh = NULL; /* Check for skipping too many attributes */ - if (skip > 0 && skip >= atable.nattrs) + if (skip > 0 && skip >= atable.num_attrs) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid index specified"); /* Iterate over attributes in table */ @@ -1293,8 +1300,8 @@ H5O__attr_iterate(hid_t loc_id, H5_index_t idx_type, H5_iter_order_t order, hsiz static herr_t H5O__attr_remove_update(const H5O_loc_t *loc, H5O_t *oh, H5O_ainfo_t *ainfo) { - H5A_attr_table_t atable = {0, NULL}; /* Table of attributes */ - herr_t ret_value = SUCCEED; /* Return value */ + H5A_attr_table_t atable = {0, 0, NULL}; /* Table of attributes */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -1530,11 +1537,11 @@ H5O__attr_remove(const H5O_loc_t *loc, const char *name) herr_t H5O__attr_remove_by_idx(const H5O_loc_t *loc, H5_index_t idx_type, H5_iter_order_t order, hsize_t n) { - H5O_t *oh = NULL; /* Pointer to actual object header */ - H5O_ainfo_t ainfo; /* Attribute information for object */ - htri_t ainfo_exists = false; /* Whether the attribute info exists in the file */ - H5A_attr_table_t atable = {0, NULL}; /* Table of attributes */ - herr_t ret_value = SUCCEED; /* Return value */ + H5O_t *oh = NULL; /* Pointer to actual object header */ + H5O_ainfo_t ainfo; /* Attribute information for object */ + htri_t ainfo_exists = false; /* Whether the attribute info exists in the file */ + H5A_attr_table_t atable = {0, 0, NULL}; /* Table of attributes */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(loc->addr) @@ -1568,7 +1575,7 @@ H5O__attr_remove_by_idx(const H5O_loc_t *loc, H5_index_t idx_type, H5_iter_order HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "error building attribute table"); /* Check for skipping too many attributes */ - if (n >= atable.nattrs) + if (n >= atable.num_attrs) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid index specified"); /* Set up user data for callback, to remove the attribute by name */ diff --git a/src/H5Odtype.c b/src/H5Odtype.c index 05652df0fe2..674d8d4ea1c 100644 --- a/src/H5Odtype.c +++ b/src/H5Odtype.c @@ -176,6 +176,14 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT16DECODE(*pp, dt->shared->u.atomic.offset); UINT16DECODE(*pp, dt->shared->u.atomic.prec); + + /* Sanity checks */ + if (dt->shared->u.atomic.offset >= (dt->shared->size * 8)) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADRANGE, FAIL, "integer offset out of bounds"); + if (0 == dt->shared->u.atomic.prec) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "precision is zero"); + if (((dt->shared->u.atomic.offset + dt->shared->u.atomic.prec) - 1) >= (dt->shared->size * 8)) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADRANGE, FAIL, "integer offset+precision out of bounds"); break; case H5T_FLOAT: @@ -212,6 +220,8 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "unknown floating-point normalization"); } dt->shared->u.atomic.u.f.sign = (flags >> 8) & 0xff; + if (dt->shared->u.atomic.u.f.sign >= (dt->shared->size * 8)) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADRANGE, FAIL, "sign bit position out of bounds"); if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 2 + 2, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); @@ -224,6 +234,11 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t dt->shared->u.atomic.u.f.esize = *(*pp)++; if (dt->shared->u.atomic.u.f.esize == 0) HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "exponent size can't be zero"); + if (dt->shared->u.atomic.u.f.epos >= (dt->shared->size * 8)) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADRANGE, FAIL, "exponent starting position out of bounds"); + if (((dt->shared->u.atomic.u.f.epos + dt->shared->u.atomic.u.f.esize) - 1) >= + (dt->shared->size * 8)) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADRANGE, FAIL, "exponent range out of bounds"); if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 1 + 1, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); @@ -231,10 +246,30 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t dt->shared->u.atomic.u.f.msize = *(*pp)++; if (dt->shared->u.atomic.u.f.msize == 0) HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "mantissa size can't be zero"); + if (dt->shared->u.atomic.u.f.mpos >= (dt->shared->size * 8)) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADRANGE, FAIL, "mantissa starting position out of bounds"); + if (((dt->shared->u.atomic.u.f.mpos + dt->shared->u.atomic.u.f.msize) - 1) >= + (dt->shared->size * 8)) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADRANGE, FAIL, "mantissa range out of bounds"); if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); UINT32DECODE(*pp, dt->shared->u.atomic.u.f.ebias); + + /* Sanity check bits don't overlap */ + if (H5_RANGE_OVERLAP(dt->shared->u.atomic.u.f.sign, dt->shared->u.atomic.u.f.sign, + dt->shared->u.atomic.u.f.epos, + ((dt->shared->u.atomic.u.f.epos + dt->shared->u.atomic.u.f.esize) - 1))) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "exponent and sign positions overlap"); + if (H5_RANGE_OVERLAP(dt->shared->u.atomic.u.f.sign, dt->shared->u.atomic.u.f.sign, + dt->shared->u.atomic.u.f.mpos, + ((dt->shared->u.atomic.u.f.mpos + dt->shared->u.atomic.u.f.msize) - 1))) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "mantissa and sign positions overlap"); + if (H5_RANGE_OVERLAP(dt->shared->u.atomic.u.f.epos, + ((dt->shared->u.atomic.u.f.epos + dt->shared->u.atomic.u.f.esize) - 1), + dt->shared->u.atomic.u.f.mpos, + ((dt->shared->u.atomic.u.f.mpos + dt->shared->u.atomic.u.f.msize) - 1))) + HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "mantissa and exponent positions overlap"); break; case H5T_TIME: @@ -378,9 +413,11 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t /* Decode the field offset */ /* (starting with version 3 of the datatype message, use the minimum # of bytes required) */ if (version >= H5O_DTYPE_VERSION_3) { + H5_GCC_CLANG_DIAG_OFF("type-limits") if (H5_IS_KNOWN_BUFFER_OVERFLOW(skip, *pp, offset_nbytes, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, FAIL, "ran off end of input buffer while decoding"); + H5_GCC_CLANG_DIAG_ON("type-limits") UINT32DECODE_VAR(*pp, dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset, offset_nbytes); } @@ -452,6 +489,13 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t } if (temp_type->shared->size == 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "type size can't be zero"); + if ((dt->shared->u.compnd.memb[dt->shared->u.compnd.nmembs].offset + + temp_type->shared->size) > dt->shared->size) { + if (H5T_close_real(temp_type) < 0) + HDONE_ERROR(H5E_DATATYPE, H5E_CANTRELEASE, FAIL, "can't release datatype info"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, + "member type extends outside its parent compound type"); + } /* Upgrade the version if we can and it is necessary */ if (can_upgrade && temp_type->shared->version > version) { @@ -770,6 +814,19 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "unknown datatype class found"); } + /* Check for numeric type w/unusual # of unused bits */ + if (H5T_is_numeric_with_unusual_unused_bits(dt)) + /* Throw an error if the object header is not checksummed, unless the + * H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS flag is set with + * H5Pset_relax_file_integrity_checks() to suppress it. + */ + if (!(*ioflags & H5O_DECODEIO_RFIC_UNUBNT)) + HGOTO_ERROR( + H5E_DATATYPE, H5E_BADVALUE, FAIL, + "datatype has unusually large # of unused bits (prec = %zu bits, size = %zu bytes), possibly " + "corrupted file. See documentation for H5Pset_relax_file_integrity_checks for details.", + dt->shared->u.atomic.prec, dt->shared->size); + done: /* Cleanup on error */ if (ret_value < 0) @@ -1307,8 +1364,8 @@ H5O__dtype_encode_helper(uint8_t **pp, const H5T_t *dt) function using malloc() and is returned to the caller. --------------------------------------------------------------------------*/ static void * -H5O__dtype_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, - unsigned *ioflags /*in,out*/, size_t p_size, const uint8_t *p) +H5O__dtype_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned *ioflags /*in,out*/, + size_t p_size, const uint8_t *p) { bool skip; H5T_t *dt = NULL; @@ -1331,6 +1388,17 @@ H5O__dtype_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsign */ skip = (p_size == SIZE_MAX ? true : false); + /* Indicate if the object header has a checksum, or if the + * H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS flag is set */ + if (open_oh) { + if (H5O_SIZEOF_CHKSUM_OH(open_oh) > 0 || + (f && (H5F_RFIC_FLAGS(f) & H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS))) + *ioflags |= H5O_DECODEIO_RFIC_UNUBNT; + } + else + /* Decode operations from non-object headers are assumed to be checksummed */ + *ioflags |= H5O_DECODEIO_RFIC_UNUBNT; + /* Perform actual decode of message */ if (H5O__dtype_decode_helper(ioflags, &p, dt, skip, p_end) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, NULL, "can't decode type"); diff --git a/src/H5Oefl.c b/src/H5Oefl.c index ebd92a733ba..57e5e6991df 100644 --- a/src/H5Oefl.c +++ b/src/H5Oefl.c @@ -140,6 +140,9 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED #endif for (size_t u = 0; u < mesg->nused; u++) { + + hsize_t offset = 0; + /* Name */ if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); @@ -156,7 +159,8 @@ H5O__efl_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED /* File offset */ if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); - H5F_DECODE_LENGTH(f, p, mesg->slot[u].offset); + H5F_DECODE_LENGTH(f, p, offset); /* Decode into an hsize_t to avoid sign warnings */ + mesg->slot[u].offset = (HDoff_t)offset; /* Size */ if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f), p_end)) diff --git a/src/H5Oint.c b/src/H5Oint.c index 16fea4b3cd7..022ee439947 100644 --- a/src/H5Oint.c +++ b/src/H5Oint.c @@ -2918,3 +2918,23 @@ H5O__reset_info2(H5O_info2_t *oinfo) FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5O__reset_info2() */ + +/*------------------------------------------------------------------------- + * Function: H5O_has_chksum + * + * Purpose: Returns true if object header is checksummed + * + * Return: true/false on success, can't fail + * + *------------------------------------------------------------------------- + */ +bool +H5O_has_chksum(const H5O_t *oh) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Check args */ + assert(oh); + + FUNC_LEAVE_NOAPI(H5O_SIZEOF_CHKSUM_OH(oh) > 0) +} /* end H5O_has_chksum() */ diff --git a/src/H5Olayout.c b/src/H5Olayout.c index fc0f59ee60d..d14e0009953 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -392,10 +392,16 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNU case H5D_CHUNK_IDX_SINGLE: /* Single Chunk Index */ if (mesg->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) { + uint64_t nbytes = 0; + if (H5_IS_BUFFER_OVERFLOW(p, H5F_sizeof_size(f) + 4, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); - H5F_DECODE_LENGTH(f, p, mesg->storage.u.chunk.u.single.nbytes); + + H5F_DECODE_LENGTH(f, p, nbytes); + H5_CHECKED_ASSIGN(mesg->storage.u.chunk.u.single.nbytes, uint32_t, nbytes, + uint64_t); + UINT32DECODE(p, mesg->storage.u.chunk.u.single.filter_mask); } diff --git a/src/H5Olinfo.c b/src/H5Olinfo.c index 830e4e3113c..6b5dc0b89b5 100644 --- a/src/H5Olinfo.c +++ b/src/H5Olinfo.c @@ -140,6 +140,9 @@ H5O__linfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS if (H5_IS_BUFFER_OVERFLOW(p, 8, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); INT64DECODE(p, linfo->max_corder); + if (linfo->max_corder < 0) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, + "invalid max creation order value for message: %" PRId64, linfo->max_corder); } else linfo->max_corder = 0; @@ -156,8 +159,10 @@ H5O__linfo_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUS /* Address of v2 B-tree to index creation order of links, if there is one */ if (linfo->index_corder) { + H5_GCC_CLANG_DIAG_OFF("type-limits") if (H5_IS_BUFFER_OVERFLOW(p, addr_size, p_end)) HGOTO_ERROR(H5E_OHDR, H5E_OVERFLOW, NULL, "ran off end of input buffer while decoding"); + H5_GCC_CLANG_DIAG_ON("type-limits") H5F_addr_decode(f, &p, &(linfo->corder_bt2_addr)); } else diff --git a/src/H5Opkg.h b/src/H5Opkg.h index 4c719bf340e..8e32f3ae13a 100644 --- a/src/H5Opkg.h +++ b/src/H5Opkg.h @@ -150,6 +150,8 @@ /* Input/output flags for decode functions */ #define H5O_DECODEIO_NOCHANGE 0x01u /* IN: do not modify values */ #define H5O_DECODEIO_DIRTY 0x02u /* OUT: message has been changed */ +#define H5O_DECODEIO_RFIC_UNUBNT \ + 0x04u /* IN: Relax file integrity checks for unusual numbers of unused bits in numeric datatypes */ /* Macro to incremend ndecode_dirtied (only if we are debugging) */ #ifndef NDEBUG diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 6f3d39d1e83..968a23caada 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -932,6 +932,7 @@ H5_DLL time_t H5O_get_oh_mtime(const H5O_t *oh); H5_DLL uint8_t H5O_get_oh_version(const H5O_t *oh); H5_DLL herr_t H5O_get_rc_and_type(const H5O_loc_t *oloc, unsigned *rc, H5O_type_t *otype); H5_DLL H5AC_proxy_entry_t *H5O_get_proxy(const H5O_t *oh); +H5_DLL bool H5O_has_chksum(const H5O_t *oh); /* Object header message routines */ H5_DLL herr_t H5O_msg_create(const H5O_loc_t *loc, unsigned type_id, unsigned mesg_flags, diff --git a/src/H5PB.c b/src/H5PB.c index fc09cd56e96..69707d14cba 100644 --- a/src/H5PB.c +++ b/src/H5PB.c @@ -726,7 +726,7 @@ H5PB_read(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_t addr, size_t size, void * if (H5FD_MEM_DRAW == type) { last_page_addr = ((addr + size - 1) / page_buf->page_size) * page_buf->page_size; - /* How many pages does this write span */ + /* How many pages does this read span */ num_touched_pages = (last_page_addr / page_buf->page_size + 1) - (first_page_addr / page_buf->page_size); if (first_page_addr == last_page_addr) { @@ -835,6 +835,10 @@ H5PB_read(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_t addr, size_t size, void * offset = (0 == i ? addr - page_entry->addr : 0); buf_offset = (0 == i ? 0 : size - access_size); + /* Account for reads that would overflow a page */ + if (offset + access_size > page_buf->page_size) + access_size = page_buf->page_size - offset; + /* copy the requested data from the page into the input buffer */ H5MM_memcpy((uint8_t *)buf + buf_offset, (uint8_t *)page_entry->page_buf_ptr + offset, access_size); @@ -905,6 +909,11 @@ H5PB_read(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_t addr, size_t size, void * /* Copy the requested data from the page into the input buffer */ offset = (0 == i ? addr - search_addr : 0); buf_offset = (0 == i ? 0 : size - access_size); + + /* Account for reads that would overflow a page */ + if (offset + access_size > page_buf->page_size) + access_size = page_buf->page_size - offset; + H5MM_memcpy((uint8_t *)buf + buf_offset, (uint8_t *)new_page_buf + offset, access_size); /* Create the new PB entry */ diff --git a/src/H5Pencdec.c b/src/H5Pencdec.c index 4d1d8187591..19d453279ee 100644 --- a/src/H5Pencdec.c +++ b/src/H5Pencdec.c @@ -272,6 +272,41 @@ H5P__encode_double(const void *value, void **_pp, size_t *size) FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5P__encode_double() */ +/*------------------------------------------------------------------------- + * Function: H5P__encode_uint64_t + * + * Purpose: Generic encoding callback routine for 'uint64_t' properties. + * + * Return: Success: Non-negative + * Failure: Negative + * + *------------------------------------------------------------------------- + */ +herr_t +H5P__encode_uint64_t(const void *value, void **_pp, size_t *size) +{ + uint8_t **pp = (uint8_t **)_pp; + + FUNC_ENTER_PACKAGE_NOERR + + /* Sanity checks */ + assert(value); + assert(size); + + if (NULL != *pp) { + /* Encode the size */ + *(*pp)++ = (uint8_t)sizeof(uint64_t); + + /* Encode the value */ + UINT64ENCODE(*pp, *(const unsigned *)value); + } /* end if */ + + /* Set size needed for encoding */ + *size += (1 + sizeof(uint64_t)); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5P__encode_uint64_t() */ + /*-------------------------------------------------------------------------- NAME H5P__encode_cb @@ -611,6 +646,42 @@ H5P__decode_double(const void **_pp, void *_value) FUNC_LEAVE_NOAPI(ret_value) } /* end H5P__decode_double() */ +/*------------------------------------------------------------------------- + * Function: H5P__decode_uint64_t + * + * Purpose: Generic decoding callback routine for 'uint64_t' properties. + * + * Return: Success: Non-negative + * Failure: Negative + * + *------------------------------------------------------------------------- + */ +herr_t +H5P__decode_uint64_t(const void **_pp, void *_value) +{ + uint64_t *value = (uint64_t *)_value; /* Property value to return */ + const uint8_t **pp = (const uint8_t **)_pp; + unsigned enc_size; /* Size of encoded property */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity checks */ + assert(pp); + assert(*pp); + assert(value); + + /* Decode the size */ + enc_size = *(*pp)++; + if (enc_size != sizeof(uint64_t)) + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "uint64_t value can't be decoded"); + + UINT64DECODE(*pp, *value); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5P__decode_uint64_t() */ + /*------------------------------------------------------------------------- NAME H5P__decode diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index 65c21408a3f..80922bd9d1d 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -333,6 +333,11 @@ #endif #define H5F_ACS_IGNORE_DISABLED_FILE_LOCKS_ENC H5P__encode_bool #define H5F_ACS_IGNORE_DISABLED_FILE_LOCKS_DEC H5P__decode_bool +/* Definition for 'rfic' flags */ +#define H5F_ACS_RFIC_FLAGS_SIZE sizeof(uint64_t) +#define H5F_ACS_RFIC_FLAGS_DEF 0 +#define H5F_ACS_RFIC_FLAGS_ENC H5P__encode_uint64_t +#define H5F_ACS_RFIC_FLAGS_DEC H5P__decode_uint64_t /******************/ /* Local Typedefs */ @@ -529,6 +534,7 @@ static const bool H5F_def_use_file_locking_g = H5F_ACS_USE_FILE_LOCKING_DEF; /* Default use file locking flag */ static const bool H5F_def_ignore_disabled_file_locks_g = H5F_ACS_IGNORE_DISABLED_FILE_LOCKS_DEF; /* Default ignore disabled file locks flag */ +static const uint64_t H5F_def_rfic_flags_g = H5F_ACS_RFIC_FLAGS_DEF; /* Default 'rfic' flags */ /*------------------------------------------------------------------------- * Function: H5P__facc_reg_prop @@ -826,6 +832,12 @@ H5P__facc_reg_prop(H5P_genclass_t *pclass) H5F_ACS_IGNORE_DISABLED_FILE_LOCKS_DEC, NULL, NULL, NULL, NULL) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class"); + /* Register the 'rfic' flags. */ + if (H5P__register_real(pclass, H5F_ACS_RFIC_FLAGS_NAME, H5F_ACS_RFIC_FLAGS_SIZE, &H5F_def_rfic_flags_g, + NULL, NULL, NULL, H5F_ACS_RFIC_FLAGS_ENC, H5F_ACS_RFIC_FLAGS_DEC, NULL, NULL, NULL, + NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class"); + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5P__facc_reg_prop() */ @@ -6264,3 +6276,75 @@ H5P__facc_vol_close(const char H5_ATTR_UNUSED *name, size_t H5_ATTR_UNUSED size, done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5P__facc_vol_close() */ + +/*------------------------------------------------------------------------- + * Function: H5Pset_relax_file_integrity_checks + * + * Purpose: Relax certain file integrity checks that may issue errors + * for valid files that have the potential for incorrect library + * behavior when data is incorrect or corrupted. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pset_relax_file_integrity_checks(hid_t plist_id, uint64_t flags) +{ + H5P_genplist_t *plist; /* Property list pointer */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "iUL", plist_id, flags); + + /* Check arguments */ + if (H5P_DEFAULT == plist_id) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "can't modify default property list"); + if (flags & (uint64_t)~H5F_RFIC_ALL) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid flags"); + + /* Get the property list structure */ + if (NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS))) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "plist_id is not a file access property list"); + + /* Set value */ + if (H5P_set(plist, H5F_ACS_RFIC_FLAGS_NAME, &flags) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set relaxed file integrity check flags"); + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pset_relax_file_integrity_checks() */ + +/*------------------------------------------------------------------------- + * Function: H5Pget_relax_file_integrity_checks + * + * Purpose: Retrieve relaxed file integrity check flags + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_relax_file_integrity_checks(hid_t plist_id, uint64_t *flags /*out*/) +{ + H5P_genplist_t *plist; /* Property list pointer */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "i*UL", plist_id, flags); + + if (H5P_DEFAULT == plist_id) + plist_id = H5P_FILE_ACCESS_DEFAULT; + + /* Get the property list structure */ + if (NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS))) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "plist_id is not a file access property list"); + + /* Get value */ + if (flags) + if (H5P_get(plist, H5F_ACS_RFIC_FLAGS_NAME, flags) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get relaxed file integrity check flags"); + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_relax_file_integrity_checks() */ diff --git a/src/H5Ppkg.h b/src/H5Ppkg.h index e249c7f9b2a..63baced637a 100644 --- a/src/H5Ppkg.h +++ b/src/H5Ppkg.h @@ -175,12 +175,14 @@ H5_DLL herr_t H5P__encode_unsigned(const void *value, void **_pp, size_t *size); H5_DLL herr_t H5P__encode_uint8_t(const void *value, void **_pp, size_t *size); H5_DLL herr_t H5P__encode_bool(const void *value, void **_pp, size_t *size); H5_DLL herr_t H5P__encode_double(const void *value, void **_pp, size_t *size); +H5_DLL herr_t H5P__encode_uint64_t(const void *value, void **_pp, size_t *size); H5_DLL herr_t H5P__decode_hsize_t(const void **_pp, void *value); H5_DLL herr_t H5P__decode_size_t(const void **_pp, void *value); H5_DLL herr_t H5P__decode_unsigned(const void **_pp, void *value); H5_DLL herr_t H5P__decode_uint8_t(const void **_pp, void *value); H5_DLL herr_t H5P__decode_bool(const void **_pp, void *value); H5_DLL herr_t H5P__decode_double(const void **_pp, void *value); +H5_DLL herr_t H5P__decode_uint64_t(const void **_pp, void *value); H5_DLL herr_t H5P__encode_coll_md_read_flag_t(const void *value, void **_pp, size_t *size); H5_DLL herr_t H5P__decode_coll_md_read_flag_t(const void **_pp, void *value); diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index ab809a58654..97f7ad13f7a 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -46,56 +46,182 @@ * The library's property list classes */ -#define H5P_ROOT (H5OPEN H5P_CLS_ROOT_ID_g) -#define H5P_OBJECT_CREATE (H5OPEN H5P_CLS_OBJECT_CREATE_ID_g) -#define H5P_FILE_CREATE (H5OPEN H5P_CLS_FILE_CREATE_ID_g) -#define H5P_FILE_ACCESS (H5OPEN H5P_CLS_FILE_ACCESS_ID_g) -#define H5P_DATASET_CREATE (H5OPEN H5P_CLS_DATASET_CREATE_ID_g) -#define H5P_DATASET_ACCESS (H5OPEN H5P_CLS_DATASET_ACCESS_ID_g) -#define H5P_DATASET_XFER (H5OPEN H5P_CLS_DATASET_XFER_ID_g) -#define H5P_FILE_MOUNT (H5OPEN H5P_CLS_FILE_MOUNT_ID_g) -#define H5P_GROUP_CREATE (H5OPEN H5P_CLS_GROUP_CREATE_ID_g) -#define H5P_GROUP_ACCESS (H5OPEN H5P_CLS_GROUP_ACCESS_ID_g) -#define H5P_DATATYPE_CREATE (H5OPEN H5P_CLS_DATATYPE_CREATE_ID_g) -#define H5P_DATATYPE_ACCESS (H5OPEN H5P_CLS_DATATYPE_ACCESS_ID_g) -#define H5P_MAP_CREATE (H5OPEN H5P_CLS_MAP_CREATE_ID_g) -#define H5P_MAP_ACCESS (H5OPEN H5P_CLS_MAP_ACCESS_ID_g) -#define H5P_STRING_CREATE (H5OPEN H5P_CLS_STRING_CREATE_ID_g) +/** + * Property list class root, is not user-accessible + */ +#define H5P_ROOT (H5OPEN H5P_CLS_ROOT_ID_g) +/** + * Object creation property list class, is not user-accessible + */ +#define H5P_OBJECT_CREATE (H5OPEN H5P_CLS_OBJECT_CREATE_ID_g) +/** + * File creation property list class + */ +#define H5P_FILE_CREATE (H5OPEN H5P_CLS_FILE_CREATE_ID_g) +/** + * File access property list class + */ +#define H5P_FILE_ACCESS (H5OPEN H5P_CLS_FILE_ACCESS_ID_g) +/** + * Dataset creation property list class + */ +#define H5P_DATASET_CREATE (H5OPEN H5P_CLS_DATASET_CREATE_ID_g) +/** + * Dataset access property list class + */ +#define H5P_DATASET_ACCESS (H5OPEN H5P_CLS_DATASET_ACCESS_ID_g) +/** + * Dataset transfer property list class + */ +#define H5P_DATASET_XFER (H5OPEN H5P_CLS_DATASET_XFER_ID_g) +/** + * File mount property list class + */ +#define H5P_FILE_MOUNT (H5OPEN H5P_CLS_FILE_MOUNT_ID_g) +/** + * Group creation property list class + */ +#define H5P_GROUP_CREATE (H5OPEN H5P_CLS_GROUP_CREATE_ID_g) +/** + * Group access property list class + */ +#define H5P_GROUP_ACCESS (H5OPEN H5P_CLS_GROUP_ACCESS_ID_g) +/** + * Datatype creation property list class + */ +#define H5P_DATATYPE_CREATE (H5OPEN H5P_CLS_DATATYPE_CREATE_ID_g) +/** + * Datatype access property list class + */ +#define H5P_DATATYPE_ACCESS (H5OPEN H5P_CLS_DATATYPE_ACCESS_ID_g) +/** + * Map creation property list class + */ +#define H5P_MAP_CREATE (H5OPEN H5P_CLS_MAP_CREATE_ID_g) +/** + * Map access property list class + */ +#define H5P_MAP_ACCESS (H5OPEN H5P_CLS_MAP_ACCESS_ID_g) +/** + * String creation property list class, is not user-accessible + */ +#define H5P_STRING_CREATE (H5OPEN H5P_CLS_STRING_CREATE_ID_g) +/** + * Attribute creation property list class + */ #define H5P_ATTRIBUTE_CREATE (H5OPEN H5P_CLS_ATTRIBUTE_CREATE_ID_g) +/** + * Attribute access property list class + */ #define H5P_ATTRIBUTE_ACCESS (H5OPEN H5P_CLS_ATTRIBUTE_ACCESS_ID_g) -#define H5P_OBJECT_COPY (H5OPEN H5P_CLS_OBJECT_COPY_ID_g) -#define H5P_LINK_CREATE (H5OPEN H5P_CLS_LINK_CREATE_ID_g) -#define H5P_LINK_ACCESS (H5OPEN H5P_CLS_LINK_ACCESS_ID_g) -#define H5P_VOL_INITIALIZE (H5OPEN H5P_CLS_VOL_INITIALIZE_ID_g) +/** + * Object copy property list class + */ +#define H5P_OBJECT_COPY (H5OPEN H5P_CLS_OBJECT_COPY_ID_g) +/** + * Link creation property list class + */ +#define H5P_LINK_CREATE (H5OPEN H5P_CLS_LINK_CREATE_ID_g) +/** + * Link access property list class + */ +#define H5P_LINK_ACCESS (H5OPEN H5P_CLS_LINK_ACCESS_ID_g) +/** + * VOL initialization property list class + */ +#define H5P_VOL_INITIALIZE (H5OPEN H5P_CLS_VOL_INITIALIZE_ID_g) +/** + * Reference access property list class + */ #define H5P_REFERENCE_ACCESS (H5OPEN H5P_CLS_REFERENCE_ACCESS_ID_g) /* * The library's default property lists */ -#define H5P_FILE_CREATE_DEFAULT (H5OPEN H5P_LST_FILE_CREATE_ID_g) -#define H5P_FILE_ACCESS_DEFAULT (H5OPEN H5P_LST_FILE_ACCESS_ID_g) -#define H5P_DATASET_CREATE_DEFAULT (H5OPEN H5P_LST_DATASET_CREATE_ID_g) -#define H5P_DATASET_ACCESS_DEFAULT (H5OPEN H5P_LST_DATASET_ACCESS_ID_g) -#define H5P_DATASET_XFER_DEFAULT (H5OPEN H5P_LST_DATASET_XFER_ID_g) -#define H5P_FILE_MOUNT_DEFAULT (H5OPEN H5P_LST_FILE_MOUNT_ID_g) -#define H5P_GROUP_CREATE_DEFAULT (H5OPEN H5P_LST_GROUP_CREATE_ID_g) -#define H5P_GROUP_ACCESS_DEFAULT (H5OPEN H5P_LST_GROUP_ACCESS_ID_g) -#define H5P_DATATYPE_CREATE_DEFAULT (H5OPEN H5P_LST_DATATYPE_CREATE_ID_g) -#define H5P_DATATYPE_ACCESS_DEFAULT (H5OPEN H5P_LST_DATATYPE_ACCESS_ID_g) -#define H5P_MAP_CREATE_DEFAULT (H5OPEN H5P_LST_MAP_CREATE_ID_g) -#define H5P_MAP_ACCESS_DEFAULT (H5OPEN H5P_LST_MAP_ACCESS_ID_g) +/** + * File creation default property list + */ +#define H5P_FILE_CREATE_DEFAULT (H5OPEN H5P_LST_FILE_CREATE_ID_g) +/** + * File access default property list + */ +#define H5P_FILE_ACCESS_DEFAULT (H5OPEN H5P_LST_FILE_ACCESS_ID_g) +/** + * Dataset creation default property list + */ +#define H5P_DATASET_CREATE_DEFAULT (H5OPEN H5P_LST_DATASET_CREATE_ID_g) +/** + * Dataset access default property list + */ +#define H5P_DATASET_ACCESS_DEFAULT (H5OPEN H5P_LST_DATASET_ACCESS_ID_g) +/** + * Dataset transfer default property list + */ +#define H5P_DATASET_XFER_DEFAULT (H5OPEN H5P_LST_DATASET_XFER_ID_g) +/** + * File mount default property list + */ +#define H5P_FILE_MOUNT_DEFAULT (H5OPEN H5P_LST_FILE_MOUNT_ID_g) +/** + * Group creation default property list + */ +#define H5P_GROUP_CREATE_DEFAULT (H5OPEN H5P_LST_GROUP_CREATE_ID_g) +/** + * Group access default property list + */ +#define H5P_GROUP_ACCESS_DEFAULT (H5OPEN H5P_LST_GROUP_ACCESS_ID_g) +/** + * Datytype creation default property list + */ +#define H5P_DATATYPE_CREATE_DEFAULT (H5OPEN H5P_LST_DATATYPE_CREATE_ID_g) +/** + * Datytype access default property list + */ +#define H5P_DATATYPE_ACCESS_DEFAULT (H5OPEN H5P_LST_DATATYPE_ACCESS_ID_g) +/** + * Map creation default property list + */ +#define H5P_MAP_CREATE_DEFAULT (H5OPEN H5P_LST_MAP_CREATE_ID_g) +/** + * Map access default property list + */ +#define H5P_MAP_ACCESS_DEFAULT (H5OPEN H5P_LST_MAP_ACCESS_ID_g) +/** + * Attribute creation default property list + */ #define H5P_ATTRIBUTE_CREATE_DEFAULT (H5OPEN H5P_LST_ATTRIBUTE_CREATE_ID_g) +/** + * Attribute access default property list + */ #define H5P_ATTRIBUTE_ACCESS_DEFAULT (H5OPEN H5P_LST_ATTRIBUTE_ACCESS_ID_g) -#define H5P_OBJECT_COPY_DEFAULT (H5OPEN H5P_LST_OBJECT_COPY_ID_g) -#define H5P_LINK_CREATE_DEFAULT (H5OPEN H5P_LST_LINK_CREATE_ID_g) -#define H5P_LINK_ACCESS_DEFAULT (H5OPEN H5P_LST_LINK_ACCESS_ID_g) -#define H5P_VOL_INITIALIZE_DEFAULT (H5OPEN H5P_LST_VOL_INITIALIZE_ID_g) +/** + * Object copy default property list + */ +#define H5P_OBJECT_COPY_DEFAULT (H5OPEN H5P_LST_OBJECT_COPY_ID_g) +/** + * Link creation default property list + */ +#define H5P_LINK_CREATE_DEFAULT (H5OPEN H5P_LST_LINK_CREATE_ID_g) +/** + * Link access default property list + */ +#define H5P_LINK_ACCESS_DEFAULT (H5OPEN H5P_LST_LINK_ACCESS_ID_g) +/** + * VOL initialization default property list + */ +#define H5P_VOL_INITIALIZE_DEFAULT (H5OPEN H5P_LST_VOL_INITIALIZE_ID_g) +/** + * Reference access default property list + */ #define H5P_REFERENCE_ACCESS_DEFAULT (H5OPEN H5P_LST_REFERENCE_ACCESS_ID_g) - -/* Common creation order flags (for links in groups and attributes on objects) */ +/** + * Attribute creation order is tracked but not necessarily indexed + */ #define H5P_CRT_ORDER_TRACKED 0x0001 +/** + * Attribute creation order is indexed (requires #H5P_CRT_ORDER_TRACKED) + */ #define H5P_CRT_ORDER_INDEXED 0x0002 - /** * Default value of type \ref hid_t for all property list classes */ @@ -5634,6 +5760,79 @@ H5_DLL herr_t H5Pset_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t H5_DLL herr_t H5Pset_page_buffer_size(hid_t plist_id, size_t buf_size, unsigned min_meta_per, unsigned min_raw_per); +/** + * \ingroup FAPL + * + * \brief Relax file integrity checks that may issue errors for some valid files + * + * \fapl_id{plist_id} + * \param[in] flags Relaxed integrity checks flag. Valid values are: + * \li #H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS + * suppresses integrity checks for detecting + * unusually high values for the number of unused bits in + * numeric datatype classes (H5T_INTEGER, H5T_FLOAT, and + * H5T_BITFIELD). Integrity checks are triggered when + * the precision for a datatype (i.e. the number of bits + * containing actual data) is less than half of the + * datatype's size and the datatype is greater than + * 1 byte in size. For example, a datatype with a + * precision of 15 bits and a size of 4 bytes (i.e. 32 bits) + * will issue an error, but a datatype with 17 bits of + * precision and a size of 4 bytes will not issue an + * error, nor will a datatype with a precision of 1, 2, or + * 3 bits and a size of 1 byte issue an error. + * \li #H5F_RFIC_ALL relaxes all integrity checks above. + * + * \return \herr_t + * + * \details Incorrectly encoded or corrupted metadata in a native HDF5 + * format file can cause incorrect library behavior when the metadata + * has no checksum. Integrity checks within the library detect these + * circumstances and issue errors when incorrect metadata is found. + * Unfortunately, some of the integrity checks for detecting these + * circumstances may incorrectly issue an error for a valid HDF5 file + * that was intentionally created with these configurations. + * Setting the appropriate flag(s) with this routine will relax the + * file integrity checks for these valid files and suppress errors + * when accessing objects with these configurations. + * + * The library will also issue errors when these configurations are + * used to create objects, preventing applications from unintentionally + * creating them. Setting the appropriate flag with this routine will + * also suppress those errors on creation, although using this routine + * and the appropriate flag(s) will still be required when accessing + * files created with these configurations. + * + * A more complete solution that avoids errors on both object creation + * and access is to use the H5Pset_libver_bounds routine with a low + * bound of at least #H5F_LIBVER_V18 when creating objects with these + * configurations. This will cause the library to checksum a file's + * metadata, allowing accidental data corruption to be correctly + * detected and errors correctly issued without ambiguity. + * + * \since 1.14.4 + * + */ +H5_DLL herr_t H5Pset_relax_file_integrity_checks(hid_t plist_id, uint64_t flags); +/** + * \ingroup FAPL + * + * \brief Retrieve relaxed file integrity check flags + * + * \fapl_id{plist_id} + * \param[out] flags Relaxed file integrity check flags + * + * \return \herr_t + * + * \details H5Pget_relax_file_integrity_checks() retrieves the relaxed file + * integrity check value into \p flags for the file access property + * list specified in \p plist_id. + * + * \since 1.14.4 + * + */ +H5_DLL herr_t H5Pget_relax_file_integrity_checks(hid_t plist_id, uint64_t *flags); + /* Dataset creation property list (DCPL) routines */ /** * \ingroup DCPL diff --git a/src/H5Rint.c b/src/H5Rint.c index 4606a57d3f7..3df70ba48c3 100644 --- a/src/H5Rint.c +++ b/src/H5Rint.c @@ -956,7 +956,10 @@ H5R__decode(const unsigned char *buf, size_t *nbytes, H5R_ref_priv_t *ref) const uint8_t *p = (const uint8_t *)buf; size_t buf_size = 0, decode_size = 0; uint8_t flags; - herr_t ret_value = SUCCEED; + bool decoded_filename = false; /* Whether filename was decoded, for error handling */ + bool decoded_attrname = false; /* Whether attribute name was decoded, for error handling */ + bool decoded_dataspace = false; /* Whether dataspace was decoded, for error handling */ + herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE @@ -990,6 +993,7 @@ H5R__decode(const unsigned char *buf, size_t *nbytes, H5R_ref_priv_t *ref) /* Decode file name */ H5R_DECODE(H5R__decode_string, &ref->info.obj.filename, p, buf_size, decode_size, "Cannot decode filename"); + decoded_filename = true; } else ref->info.obj.filename = NULL; @@ -997,22 +1001,28 @@ H5R__decode(const unsigned char *buf, size_t *nbytes, H5R_ref_priv_t *ref) switch (ref->type) { case H5R_OBJECT2: break; + case H5R_DATASET_REGION2: /* Decode dataspace */ H5R_DECODE(H5R__decode_region, &ref->info.reg.space, p, buf_size, decode_size, "Cannot decode region"); + decoded_dataspace = true; break; + case H5R_ATTR: /* Decode attribute name */ H5R_DECODE(H5R__decode_string, &ref->info.attr.name, p, buf_size, decode_size, "Cannot decode attribute name"); + decoded_attrname = true; break; + case H5R_OBJECT1: case H5R_DATASET_REGION1: case H5R_BADTYPE: case H5R_MAXTYPE: assert("invalid reference type" && 0); HGOTO_ERROR(H5E_REFERENCE, H5E_UNSUPPORTED, FAIL, "internal error (invalid reference type)"); + default: assert("unknown reference type" && 0); HGOTO_ERROR(H5E_REFERENCE, H5E_UNSUPPORTED, FAIL, "internal error (unknown reference type)"); @@ -1031,6 +1041,22 @@ H5R__decode(const unsigned char *buf, size_t *nbytes, H5R_ref_priv_t *ref) *nbytes = decode_size; done: + if (ret_value < 0) { + if (decoded_filename) { + H5MM_xfree(ref->info.obj.filename); + ref->info.obj.filename = NULL; + } + if (decoded_attrname) { + H5MM_xfree(ref->info.attr.name); + ref->info.attr.name = NULL; + } + if (decoded_dataspace) { + if (H5S_close(ref->info.reg.space) < 0) + HDONE_ERROR(H5E_REFERENCE, H5E_CLOSEERROR, FAIL, "unable to release dataspace"); + ref->info.reg.space = NULL; + } + } + FUNC_LEAVE_NOAPI(ret_value) } /* end H5R__decode() */ @@ -1175,7 +1201,7 @@ H5R__decode_region(const unsigned char *buf, size_t *nbytes, H5S_t **space_ptr) const uint8_t *p_end = p + *nbytes - 1; size_t buf_size = 0; unsigned rank; - H5S_t *space; + H5S_t *space = NULL; herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE @@ -1216,6 +1242,10 @@ H5R__decode_region(const unsigned char *buf, size_t *nbytes, H5S_t **space_ptr) *space_ptr = space; done: + if (ret_value < 0) + if (space && H5S_close(space) < 0) + HDONE_ERROR(H5E_REFERENCE, H5E_CLOSEERROR, FAIL, "unable to release dataspace"); + FUNC_LEAVE_NOAPI(ret_value) } /* end H5R__decode_region() */ diff --git a/src/H5S.c b/src/H5S.c index 1345e0d5634..8d64426edd8 100644 --- a/src/H5S.c +++ b/src/H5S.c @@ -614,6 +614,10 @@ H5S__extent_copy_real(H5S_extent_t *dst, const H5S_extent_t *src, bool copy_max) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "can't copy shared information"); done: + if (ret_value < 0) + if (dst->size) + dst->size = H5FL_ARR_FREE(hsize_t, dst->size); + FUNC_LEAVE_NOAPI(ret_value) } /* end H5S__extent_copy_real() */ diff --git a/src/H5Shyper.c b/src/H5Shyper.c index 291742e37cb..6a1bf58e0dc 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -9177,8 +9177,8 @@ H5S__check_spans_overlap(const H5S_hyper_span_info_t *spans1, const H5S_hyper_sp assert(spans2); /* Use low & high bounds to try to avoid spinning through the span lists */ - if (H5S_RANGE_OVERLAP(spans1->low_bounds[0], spans1->high_bounds[0], spans2->low_bounds[0], - spans2->high_bounds[0])) { + if (H5_RANGE_OVERLAP(spans1->low_bounds[0], spans1->high_bounds[0], spans2->low_bounds[0], + spans2->high_bounds[0])) { H5S_hyper_span_t *span1, *span2; /* Hyperslab spans */ /* Walk over spans, comparing them for overlap */ @@ -9186,7 +9186,7 @@ H5S__check_spans_overlap(const H5S_hyper_span_info_t *spans1, const H5S_hyper_sp span2 = spans2->head; while (span1 && span2) { /* Check current two spans for overlap */ - if (H5S_RANGE_OVERLAP(span1->low, span1->high, span2->low, span2->high)) { + if (H5_RANGE_OVERLAP(span1->low, span1->high, span2->low, span2->high)) { /* Check for spans in lowest dimension already */ if (span1->down) { /* Sanity check */ @@ -9764,8 +9764,8 @@ H5S__hyper_regular_and_single_block(H5S_t *space, const hsize_t start[], const h block_end = (start[u] + block[u]) - 1; /* Check for overlap */ - if (!H5S_RANGE_OVERLAP(space->select.sel_info.hslab->diminfo.opt[u].start, select_end, start[u], - block_end)) { + if (!H5_RANGE_OVERLAP(space->select.sel_info.hslab->diminfo.opt[u].start, select_end, start[u], + block_end)) { overlap = false; break; } /* end if */ @@ -9811,8 +9811,8 @@ H5S__hyper_regular_and_single_block(H5S_t *space, const hsize_t start[], const h block_end = (start[u] + block[u]) - 1; /* Check for overlap */ - if (!H5S_RANGE_OVERLAP(space->select.sel_info.hslab->diminfo.opt[u].start, select_end, start[u], - block_end)) { + if (!H5_RANGE_OVERLAP(space->select.sel_info.hslab->diminfo.opt[u].start, select_end, start[u], + block_end)) { overlap = false; break; } /* end if */ @@ -10445,7 +10445,7 @@ H5S_combine_hyperslab(const H5S_t *old_space, H5S_seloper_t op, const hsize_t st } /* end for */ /* Check bound box of both spaces to see if they overlap */ - if (H5S_RANGE_OVERLAP(old_low_bounds[0], old_high_bounds[0], new_low_bounds[0], new_high_bounds[0])) + if (H5_RANGE_OVERLAP(old_low_bounds[0], old_high_bounds[0], new_low_bounds[0], new_high_bounds[0])) overlapped = true; /* Non-overlapping situations can be handled in special ways */ @@ -11414,8 +11414,8 @@ H5S__hyper_proj_int_iterate(H5S_hyper_span_info_t *ss_span_info, const H5S_hyper /* Check for non-overlapping bounds */ check_intersect = true; for (u = 0; u < (udata->ss_rank - depth); u++) - if (!H5S_RANGE_OVERLAP(ss_span_info->low_bounds[u], ss_span_info->high_bounds[u], - sis_span_info->low_bounds[u], sis_span_info->high_bounds[u])) { + if (!H5_RANGE_OVERLAP(ss_span_info->low_bounds[u], ss_span_info->high_bounds[u], + sis_span_info->low_bounds[u], sis_span_info->high_bounds[u])) { check_intersect = false; break; } /* end if */ @@ -11440,7 +11440,7 @@ H5S__hyper_proj_int_iterate(H5S_hyper_span_info_t *ss_span_info, const H5S_hyper /* Main loop */ do { /* Check if spans overlap */ - if (H5S_RANGE_OVERLAP(ss_low, ss_span->high, sis_low, sis_span->high)) { + if (H5_RANGE_OVERLAP(ss_low, ss_span->high, sis_low, sis_span->high)) { high = MIN(ss_span->high, sis_span->high); if (ss_span->down) { /* Add skipped elements if there's a pre-gap */ diff --git a/src/H5Spkg.h b/src/H5Spkg.h index fddd5e3c5fa..e851f548f94 100644 --- a/src/H5Spkg.h +++ b/src/H5Spkg.h @@ -89,15 +89,6 @@ * H5S_UNLIMITED) */ #define H5S_MAX_SIZE ((hsize_t)(hssize_t)(-2)) -/* Macro for checking if two ranges overlap one another */ -/* - * Check for the inverse of whether the ranges are disjoint. If they are - * disjoint, then the low bound of one of the ranges must be greater than the - * high bound of the other. - */ -/* (Assumes that low & high bounds are _inclusive_) */ -#define H5S_RANGE_OVERLAP(L1, H1, L2, H2) (!((L1) > (H2) || (L2) > (H1))) - /* * Dataspace extent information */ diff --git a/src/H5Spoint.c b/src/H5Spoint.c index 0e07869ea25..d642ea2bed0 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -575,8 +575,14 @@ H5S__point_add(H5S_t *space, H5S_seloper_t op, size_t num_elem, const hsize_t *c for (u = 0; u < num_elem; u++) { unsigned dim; /* Counter for dimensions */ + /* The following allocation relies on the size of an hcoords_t being + * the same as an 'H5S_pnt_node_t *', so fail now if that's not true + */ + HDcompile_assert(sizeof(hcoords_t) >= sizeof(H5S_pnt_node_t *)); + /* Allocate space for the new node */ - if (NULL == (new_node = (H5S_pnt_node_t *)H5FL_ARR_MALLOC(hcoords_t, space->extent.rank))) + /* Note: allocating "rank + 1" to allow for 'next' pointer */ + if (NULL == (new_node = (H5S_pnt_node_t *)H5FL_ARR_MALLOC(hcoords_t, space->extent.rank + 1))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node"); /* Initialize fields in node */ @@ -795,7 +801,7 @@ H5S__copy_pnt_list(const H5S_pnt_list_t *src, unsigned rank) assert(rank > 0); /* Allocate room for the head of the point list */ - if (NULL == (dst = H5FL_MALLOC(H5S_pnt_list_t))) + if (NULL == (dst = H5FL_CALLOC(H5S_pnt_list_t))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "can't allocate point list node"); curr = src->head; @@ -803,8 +809,14 @@ H5S__copy_pnt_list(const H5S_pnt_list_t *src, unsigned rank) while (curr) { H5S_pnt_node_t *new_node; /* New point information node */ + /* The following allocation relies on the size of an hcoords_t being + * the same as an 'H5S_pnt_node_t *', so fail now if that's not true + */ + HDcompile_assert(sizeof(hcoords_t) >= sizeof(H5S_pnt_node_t *)); + /* Create new point */ - if (NULL == (new_node = (H5S_pnt_node_t *)H5FL_ARR_MALLOC(hcoords_t, rank))) + /* Note: allocating "rank + 1" to allow for 'next' pointer */ + if (NULL == (new_node = (H5S_pnt_node_t *)H5FL_ARR_MALLOC(hcoords_t, rank + 1))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "can't allocate point node"); new_node->next = NULL; @@ -2274,7 +2286,7 @@ H5S__point_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *of HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection"); /* Allocate room for the head of the point list */ - if (NULL == (new_space->select.sel_info.pnt_lst = H5FL_MALLOC(H5S_pnt_list_t))) + if (NULL == (new_space->select.sel_info.pnt_lst = H5FL_CALLOC(H5S_pnt_list_t))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point list node"); /* Check if the new space's rank is < or > base space's rank */ @@ -2293,8 +2305,14 @@ H5S__point_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *of base_node = base_space->select.sel_info.pnt_lst->head; prev_node = NULL; while (base_node) { + /* The following allocation relies on the size of an hcoords_t being + * the same as an 'H5S_pnt_node_t *', so fail now if that's not true + */ + HDcompile_assert(sizeof(hcoords_t) >= sizeof(H5S_pnt_node_t *)); + /* Create new point */ - if (NULL == (new_node = (H5S_pnt_node_t *)H5FL_ARR_MALLOC(hcoords_t, new_space->extent.rank))) + /* Note: allocating "rank + 1" to allow for 'next' pointer */ + if (NULL == (new_node = (H5S_pnt_node_t *)H5FL_ARR_MALLOC(hcoords_t, new_space->extent.rank + 1))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node"); new_node->next = NULL; @@ -2335,8 +2353,14 @@ H5S__point_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *of base_node = base_space->select.sel_info.pnt_lst->head; prev_node = NULL; while (base_node) { + /* The following allocation relies on the size of an hcoords_t being + * the same as an 'H5S_pnt_node_t *', so fail now if that's not true + */ + HDcompile_assert(sizeof(hcoords_t) >= sizeof(H5S_pnt_node_t *)); + /* Create new point */ - if (NULL == (new_node = (H5S_pnt_node_t *)H5FL_ARR_MALLOC(hcoords_t, new_space->extent.rank))) + /* Note: allocating "rank + 1" to allow for 'next' pointer */ + if (NULL == (new_node = (H5S_pnt_node_t *)H5FL_ARR_MALLOC(hcoords_t, new_space->extent.rank + 1))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node"); new_node->next = NULL; diff --git a/src/H5Sselect.c b/src/H5Sselect.c index d67b8e6f7e0..ef9994ec1b2 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -1945,7 +1945,7 @@ H5S_select_intersect_block(H5S_t *space, const hsize_t *start, const hsize_t *en /* Loop over selection bounds and block, checking for overlap */ for (u = 0; u < space->extent.rank; u++) /* If selection bounds & block don't overlap, can leave now */ - if (!H5S_RANGE_OVERLAP(low[u], high[u], start[u], end[u])) + if (!H5_RANGE_OVERLAP(low[u], high[u], start[u], end[u])) HGOTO_DONE(false); } /* end if */ diff --git a/src/H5T.c b/src/H5T.c index 91d9b42d95b..81a49e717f4 100644 --- a/src/H5T.c +++ b/src/H5T.c @@ -41,6 +41,7 @@ #include "H5Pprivate.h" /* Property lists */ #include "H5Tpkg.h" /* Datatypes */ #include "H5VLprivate.h" /* Virtual Object Layer */ +#include "H5VMprivate.h" /* Vectors and arrays */ /****************/ /* Local Macros */ @@ -48,6 +49,12 @@ #define H5T_ENCODE_VERSION 0 +/* + * The default number of slots allocated in the + * datatype conversion path table. + */ +#define H5T_DEF_CONV_TABLE_SLOTS 128 + /* * Type initialization macros * @@ -372,6 +379,10 @@ static herr_t H5T__set_size(H5T_t *dt, size_t size); static herr_t H5T__close_cb(H5T_t *dt, void **request); static H5T_path_t *H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_conv_func_t *conv); +static herr_t H5T__path_find_init_path_table(void); +static herr_t H5T__path_find_init_new_path(H5T_path_t *path, const H5T_t *src, const H5T_t *dst, + H5T_conv_func_t *conv, H5T_conv_ctx_t *conv_ctx); +static herr_t H5T__path_free(H5T_path_t *path, H5T_conv_ctx_t *conv_ctx); static bool H5T_path_match(H5T_path_t *path, H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5VL_object_t *owned_vol_obj, H5T_conv_t func); static bool H5T_path_match_find_type_with_volobj(const H5T_t *datatype, const H5VL_object_t *owned_vol_obj); @@ -1661,52 +1672,16 @@ H5T_top_term_package(void) /* Unregister all conversion functions */ if (H5T_g.path) { - int i, nprint = 0; - - for (i = 0; i < H5T_g.npaths; i++) { - H5T_path_t *path; - - path = H5T_g.path[i]; - assert(path); - if (path->conv.u.app_func) { - H5T__print_stats(path, &nprint /*in,out*/); - path->cdata.command = H5T_CONV_FREE; - if (path->conv.is_app) { - if ((path->conv.u.app_func)(H5I_INVALID_HID, H5I_INVALID_HID, &(path->cdata), 0, 0, 0, - NULL, NULL, H5CX_get_dxpl()) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) { - fprintf(H5DEBUG(T), - "H5T: conversion function " - "0x%016zx failed to free private data for " - "%s (ignored)\n", - (size_t)path->conv.u.app_func, path->name); - } /* end if */ -#endif - H5E_clear_stack(NULL); /*ignore the error*/ - } /* end if */ - } /* end if */ - else { - if ((path->conv.u.lib_func)(NULL, NULL, &(path->cdata), NULL, 0, 0, 0, NULL, NULL) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) { - fprintf(H5DEBUG(T), - "H5T: conversion function " - "0x%016zx failed to free private data for " - "%s (ignored)\n", - (size_t)path->conv.u.lib_func, path->name); - } /* end if */ -#endif - H5E_clear_stack(NULL); /*ignore the error*/ - } /* end if */ - } /* end else */ - } /* end if */ - - if (path->src) - (void)H5T_close_real(path->src); - if (path->dst) - (void)H5T_close_real(path->dst); - path = H5FL_FREE(H5T_path_t, path); + H5T_conv_ctx_t conv_ctx = {0}; + + conv_ctx.u.free.src_type_id = H5I_INVALID_HID; + conv_ctx.u.free.dst_type_id = H5I_INVALID_HID; + + for (int i = 0; i < H5T_g.npaths; i++) { + H5T_path_t *path = H5T_g.path[i]; + + (void)H5T__path_free(path, &conv_ctx); + H5T_g.path[i] = NULL; } /* end for */ @@ -2654,7 +2629,6 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con H5T_t *tmp_dtype = NULL; /*temporary destination datatype */ hid_t tmp_sid = H5I_INVALID_HID; /*temporary datatype ID */ hid_t tmp_did = H5I_INVALID_HID; /*temporary datatype ID */ - int nprint = 0; /*number of paths shut down */ int i; /*counter */ herr_t ret_value = SUCCEED; /*return value */ @@ -2686,7 +2660,7 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con } /* end if */ } /* end if */ else { - H5T_conv_ctx_t tmp_ctx = {0}; + H5T_conv_ctx_t conv_ctx = {0}; /* * Get the datatype conversion exception callback structure. @@ -2694,7 +2668,7 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con * pushed, since we could have arrived here during library * initialization of the H5T package. */ - if (!conv->is_app && H5CX_pushed() && (H5CX_get_dt_conv_cb(&tmp_ctx.u.init.cb_struct) < 0)) + if (!conv->is_app && H5CX_pushed() && (H5CX_get_dt_conv_cb(&conv_ctx.u.init.cb_struct) < 0)) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, FAIL, "unable to get conversion exception callback"); /* Add function to end of soft list */ @@ -2757,8 +2731,8 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con continue; } /* end if */ } /* end if */ - else if ((conv->u.lib_func)(old_path->src, old_path->dst, &cdata, &tmp_ctx, 0, 0, 0, NULL, NULL) < - 0) { + else if ((conv->u.lib_func)(old_path->src, old_path->dst, &cdata, &conv_ctx, 0, 0, 0, NULL, + NULL) < 0) { if (H5E_clear_stack(NULL) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTRESET, FAIL, "unable to clear current error stack"); continue; @@ -2781,37 +2755,10 @@ H5T__register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5T_con new_path = NULL; /*so we don't free it on error*/ /* Free old path */ - H5T__print_stats(old_path, &nprint); - old_path->cdata.command = H5T_CONV_FREE; - if (old_path->conv.is_app) { - if ((old_path->conv.u.app_func)(tmp_sid, tmp_did, &(old_path->cdata), 0, 0, 0, NULL, NULL, - H5CX_get_dxpl()) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) - fprintf(H5DEBUG(T), - "H5T: conversion function 0x%016zx " - "failed to free private data for %s (ignored)\n", - (size_t)old_path->conv.u.app_func, old_path->name); -#endif - } /* end if */ - } /* end if */ - else if ((old_path->conv.u.lib_func)(old_path->src, old_path->dst, &(old_path->cdata), NULL, 0, 0, - 0, NULL, NULL) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) - fprintf(H5DEBUG(T), - "H5T: conversion function 0x%016zx " - "failed to free private data for %s (ignored)\n", - (size_t)old_path->conv.u.lib_func, old_path->name); -#endif - } /* end if */ - - if (H5T_close_real(old_path->src) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "unable to close datatype"); - if (H5T_close_real(old_path->dst) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "unable to close datatype"); - - old_path = H5FL_FREE(H5T_path_t, old_path); + conv_ctx.u.free.src_type_id = tmp_sid; + conv_ctx.u.free.dst_type_id = tmp_did; + if (H5T__path_free(old_path, &conv_ctx) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFREE, FAIL, "unable to free datatype conversion path"); /* Release temporary atoms */ if (tmp_sid >= 0) { @@ -2931,12 +2878,15 @@ herr_t H5T_unregister(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5VL_object_t *owned_vol_obj, H5T_conv_t func) { - H5T_path_t *path = NULL; /*conversion path */ - H5T_soft_t *soft = NULL; /*soft conversion information */ - int nprint = 0; /*number of paths shut down */ - int i; /*counter */ + H5T_conv_ctx_t conv_ctx = {0}; /* Conversion context object */ + H5T_path_t *path = NULL; /* Conversion path */ + H5T_soft_t *soft = NULL; /* Soft conversion information */ + herr_t ret_value = SUCCEED; - FUNC_ENTER_NOAPI_NOERR + FUNC_ENTER_NOAPI(FAIL) + + conv_ctx.u.free.src_type_id = H5I_INVALID_HID; + conv_ctx.u.free.dst_type_id = H5I_INVALID_HID; /* * Remove matching entries from the soft list if: @@ -2954,7 +2904,7 @@ H5T_unregister(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5VL_o * source or destination types matches the given VOL object. */ if ((H5T_PERS_DONTCARE == pers || H5T_PERS_SOFT == pers) && !owned_vol_obj) { - for (i = H5T_g.nsoft - 1; i >= 0; --i) { + for (int i = H5T_g.nsoft - 1; i >= 0; --i) { soft = H5T_g.soft + i; assert(soft); if (name && *name && strcmp(name, soft->name) != 0) @@ -2968,11 +2918,11 @@ H5T_unregister(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5VL_o memmove(H5T_g.soft + i, H5T_g.soft + i + 1, (size_t)(H5T_g.nsoft - (i + 1)) * sizeof(H5T_soft_t)); --H5T_g.nsoft; - } /* end for */ - } /* end if */ + } + } /* Remove matching conversion paths, except no-op path */ - for (i = H5T_g.npaths - 1; i > 0; --i) { + for (int i = H5T_g.npaths - 1; i > 0; --i) { bool nomatch; path = H5T_g.path[i]; @@ -2990,45 +2940,20 @@ H5T_unregister(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst, H5VL_o * the list to be recalculated to avoid the removed function. */ path->cdata.recalc = true; - } /* end if */ + } else { /* Remove from table */ memmove(H5T_g.path + i, H5T_g.path + i + 1, (size_t)(H5T_g.npaths - (i + 1)) * sizeof(H5T_path_t *)); --H5T_g.npaths; - /* Shut down path */ - H5T__print_stats(path, &nprint); - path->cdata.command = H5T_CONV_FREE; - if (path->conv.is_app) { - if ((path->conv.u.app_func)(H5I_INVALID_HID, H5I_INVALID_HID, &(path->cdata), 0, 0, 0, NULL, - NULL, H5CX_get_dxpl()) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) - fprintf(H5DEBUG(T), - "H5T: conversion function 0x%016zx failed " - "to free private data for %s (ignored)\n", - (size_t)path->conv.u.app_func, path->name); -#endif - } /* end if */ - } /* end if */ - else if ((path->conv.u.lib_func)(NULL, NULL, &(path->cdata), NULL, 0, 0, 0, NULL, NULL) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) - fprintf(H5DEBUG(T), - "H5T: conversion function 0x%016zx failed " - "to free private data for %s (ignored)\n", - (size_t)path->conv.u.lib_func, path->name); -#endif - } /* end if */ - (void)H5T_close_real(path->src); - (void)H5T_close_real(path->dst); - path = H5FL_FREE(H5T_path_t, path); - H5E_clear_stack(NULL); /*ignore all shutdown errors*/ - } /* end else */ - } /* end for */ + if (H5T__path_free(path, &conv_ctx) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFREE, FAIL, "unable to free datatype conversion path"); + } + } - FUNC_LEAVE_NOAPI(SUCCEED) +done: + FUNC_LEAVE_NOAPI(ret_value) } /* end H5T_unregister() */ /*------------------------------------------------------------------------- @@ -5055,6 +4980,60 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, bool superset) FUNC_LEAVE_NOAPI(ret_value) } /* end H5T_cmp() */ +/*------------------------------------------------------------------------- + * Function: H5T__bsearch_path_table + * + * Purpose: Performs a binary search on the type conversion path table. + * If `last_cmp` is non-NULL, the value of the last comparison + * is returned through it. If `idx` is non-NULL, the idx into + * the path table where the matching path was found is returned + * through it. If no matching path is found, the value for + * `idx` will be the index into the path table where a path + * entry with source and destination datatypes matching src and + * dst should be inserted. In this case, the caller should be + * sure to increment the index value by 1 if the value of the + * last comparison is > 0. + * + * Return: Success: Pointer to the path in the path table + * Failure: NULL if no matching path is found in the table + * + *------------------------------------------------------------------------- + */ +static void * +H5T__bsearch_path_table(const H5T_t *src, const H5T_t *dst, int *last_cmp, int *idx) +{ + int cmp; + int lt, rt, md; + void *ret_value = NULL; + + FUNC_ENTER_PACKAGE_NOERR + + lt = md = 1; + rt = H5T_g.npaths; + cmp = -1; + + while (cmp && lt < rt) { + md = (lt + rt) / 2; + assert(H5T_g.path[md]); + cmp = H5T_cmp(src, H5T_g.path[md]->src, false); + if (0 == cmp) + cmp = H5T_cmp(dst, H5T_g.path[md]->dst, false); + if (cmp < 0) + rt = md; + else if (cmp > 0) + lt = md + 1; + else + ret_value = H5T_g.path[md]; + } + + if (last_cmp) + *last_cmp = cmp; + if (idx) + *idx = md; + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5T__bsearch_path_table() */ + /*------------------------------------------------------------------------- * Function: H5T_path_find * @@ -5102,16 +5081,20 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst) * Function: H5T__path_find_real * * Purpose: Finds the path which converts type SRC to type DST, creating - * a new path if necessary. If FUNC is non-zero then it is set - * as the hard conversion function for that path regardless of - * whether the path previously existed. Changing the conversion - * function of a path causes statistics to be reset to zero - * after printing them. The NAME is used only when creating a - * new path and is just for debugging. + * a new path if necessary. * - * If SRC and DST are both null pointers then the special no-op - * conversion path is used. This path is always stored as the - * first path in the path table. + * If `conv->u.app_func`/`conv->u.lib_func` is non-NULL then it + * is set as the hard conversion function for that path + * regardless of whether the path previously existed. Changing + * the conversion function of a path causes statistics to be + * reset to zero after printing them. `name` is used only when + * creating a new path and is just for debugging. + * + * If no "force conversion" flags are set for either the source + * or destination datatype and the two datatypes compare equal + * to each other, then the special no-op conversion path is + * used. This path is always stored as the first path in the + * path table. * * Return: Success: Pointer to the path, valid until the path * database is modified. @@ -5124,20 +5107,17 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst) static H5T_path_t * H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_conv_func_t *conv) { - H5T_conv_ctx_t tmp_ctx = {0}; /* temporary conversion context object */ - int lt, rt; /* left and right edges */ - int md; /* middle */ - int cmp; /* comparison result */ - int old_npaths; /* Previous number of paths in table */ - H5T_path_t *table = NULL; /* path existing in the table */ - H5T_path_t *path = NULL; /* new path */ - H5T_t *tmp_stype = NULL; /* temporary source datatype */ - H5T_t *tmp_dtype = NULL; /* temporary destination datatype */ - hid_t src_id = H5I_INVALID_HID; /* source datatype identifier */ - hid_t dst_id = H5I_INVALID_HID; /* destination datatype identifier */ - int i; /* counter */ - int nprint = 0; /* lines of output printed */ - H5T_path_t *ret_value = NULL; /* Return value */ + H5T_conv_ctx_t tmp_ctx = {0}; /* Temporary conversion context object */ + H5T_path_t *matched_path = NULL; /* Path existing in the table */ + H5T_path_t *path = NULL; /* Pointer to current path */ + bool noop_conv = false; /* Whether this is a no-op conversion */ + bool new_path = false; /* Whether we're creating a new path */ + bool new_api_func = false; /* If the caller is an API function specifying a new conversion function */ + bool new_lib_func = false; /* If the caller is a private function specifying a new conversion function */ + int old_npaths; /* Previous number of paths in table */ + int last_cmp = 0; /* Value of last comparison during binary search */ + int path_idx = 0; /* Index into path table for path */ + H5T_path_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -5156,66 +5136,21 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co if (H5CX_pushed() && (H5CX_get_dt_conv_cb(&tmp_ctx.u.init.cb_struct) < 0)) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, NULL, "unable to get conversion exception callback"); - /* - * Make sure the first entry in the table is the no-op conversion path. - */ - if (0 == H5T_g.npaths) { - if (NULL == (H5T_g.path = (H5T_path_t **)H5MM_malloc(128 * sizeof(H5T_path_t *)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, - "memory allocation failed for type conversion path table"); - H5T_g.apaths = 128; - if (NULL == (H5T_g.path[0] = H5FL_CALLOC(H5T_path_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, - "memory allocation failed for no-op conversion path"); - snprintf(H5T_g.path[0]->name, sizeof(H5T_g.path[0]->name), "no-op"); - H5T_g.path[0]->conv.is_app = false; - H5T_g.path[0]->conv.u.lib_func = H5T__conv_noop; - H5T_g.path[0]->cdata.command = H5T_CONV_INIT; - if (H5T__conv_noop(NULL, NULL, &(H5T_g.path[0]->cdata), &tmp_ctx, 0, 0, 0, NULL, NULL) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) - fprintf(H5DEBUG(T), "H5T: unable to initialize no-op conversion function (ignored)\n"); -#endif - /* Ignore any errors from the conversion function */ - if (H5E_clear_stack(NULL) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTRESET, NULL, "unable to clear current error stack"); - } /* end if */ - H5T_g.path[0]->is_noop = true; - H5T_g.npaths = 1; - } /* end if */ + /* Make sure the path table is initialized */ + if ((0 == H5T_g.npaths) && (H5T__path_find_init_path_table() < 0)) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to initialize type conversion path table"); - /* Find the conversion path. If source and destination types are equal - * then use entry[0], otherwise do a binary search over the - * remaining entries. - * - * Only allow the no-op conversion to occur if no "force conversion" flags - * are set + /* Find the conversion path. If no "force conversion" flags are + * set and the source and destination types are equal, then use + * the no-op conversion path. Otherwise, do a binary search over + * the remaining entries. */ - if (src->shared->force_conv == false && dst->shared->force_conv == false && - 0 == H5T_cmp(src, dst, true)) { - table = H5T_g.path[0]; - cmp = 0; - md = 0; - } /* end if */ - else { - lt = md = 1; - rt = H5T_g.npaths; - cmp = -1; - - while (cmp && lt < rt) { - md = (lt + rt) / 2; - assert(H5T_g.path[md]); - cmp = H5T_cmp(src, H5T_g.path[md]->src, false); - if (0 == cmp) - cmp = H5T_cmp(dst, H5T_g.path[md]->dst, false); - if (cmp < 0) - rt = md; - else if (cmp > 0) - lt = md + 1; - else - table = H5T_g.path[md]; - } /* end while */ - } /* end else */ + noop_conv = + src->shared->force_conv == false && dst->shared->force_conv == false && 0 == H5T_cmp(src, dst, true); + if (noop_conv) + matched_path = H5T_g.path[0]; + else + matched_path = H5T__bsearch_path_table(src, dst, &last_cmp, &path_idx); /* Keep a record of the number of paths in the table, in case one of the * initialization calls below (hard or soft) causes more entries to be @@ -5223,13 +5158,18 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co */ old_npaths = H5T_g.npaths; + /* Set a few convenience variables */ + new_api_func = (matched_path && conv->is_app && conv->u.app_func); + new_lib_func = (matched_path && !conv->is_app && conv->u.lib_func); + /* If we didn't find the path, if the caller is an API function specifying * a new hard conversion function, or if the caller is a private function * specifying a new hard conversion and the path is a soft conversion, then * create a new path and add the new function to the path. */ - if (!table || (table && conv->is_app && conv->u.app_func) || - (table && !table->is_hard && !conv->is_app && conv->u.lib_func)) { + new_path = !matched_path || new_api_func || (new_lib_func && !matched_path->is_hard); + + if (new_path) { if (NULL == (path = H5FL_CALLOC(H5T_path_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for type conversion path"); if (name && *name) { @@ -5244,18 +5184,168 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy datatype for conversion path"); } /* end if */ else - path = table; + path = matched_path; + + /* Initialize the path if it's a new path */ + if (new_path && H5T__path_find_init_new_path(path, src, dst, conv, &tmp_ctx) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to initialize new conversion path"); + + /* Fail if the path still doesn't have a conversion function at this point */ + if (!path->conv.u.app_func) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "no appropriate function for conversion path"); + + /* Check if paths were inserted into the table through a recursive call + * and re-compute the correct location for this path if so. - QAK, 1/26/02 + */ + if (old_npaths != H5T_g.npaths) + matched_path = H5T__bsearch_path_table(src, dst, &last_cmp, &path_idx); + + /* Replace an existing table entry or add a new entry */ + if (matched_path && new_path) { + assert(matched_path == H5T_g.path[path_idx]); + + tmp_ctx.u.free.src_type_id = H5I_INVALID_HID; + tmp_ctx.u.free.dst_type_id = H5I_INVALID_HID; + if (H5T__path_free(matched_path, &tmp_ctx) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFREE, NULL, "unable to free datatype conversion path"); + + H5T_g.path[path_idx] = path; + } + else if (new_path) { + /* Make space in the table for the new path if necessary */ + if ((size_t)H5T_g.npaths >= H5T_g.apaths) { + size_t na = MAX(H5T_DEF_CONV_TABLE_SLOTS, 2 * H5T_g.apaths); + H5T_path_t **x; + + if (NULL == (x = H5MM_realloc(H5T_g.path, na * sizeof(H5T_path_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); + H5T_g.apaths = na; + H5T_g.path = x; + } + + /* Adjust final location in table for new path if the last comparison + * of paths during binary search was > 0, then shift down all path + * entries in the table starting at that location to make room for + * the new path + */ + assert(last_cmp != 0); + if (last_cmp > 0) + path_idx++; + memmove(H5T_g.path + path_idx + 1, H5T_g.path + path_idx, + (size_t)(H5T_g.npaths - path_idx) * sizeof(H5T_path_t *)); + + H5T_g.npaths++; + H5T_g.path[path_idx] = path; + } + + ret_value = path; + +done: + if (!ret_value && path && new_path) { + if (path->src && (H5T_close_real(path->src) < 0)) + HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "unable to close datatype"); + if (path->dst && (H5T_close_real(path->dst) < 0)) + HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "unable to close datatype"); + path = H5FL_FREE(H5T_path_t, path); + } - /* If a hard conversion function is specified and none is defined for the - * path, or the caller is an API function, or the caller is a private function but - * the existing path is a soft function, then add the new conversion to the path - * and initialize its conversion data. + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5T__path_find_real() */ + +/*------------------------------------------------------------------------- + * Function: H5T__path_find_init_path_table + * + * Purpose: Helper function to allocate and initialize the table holding + * pointers to datatype conversion paths. Sets the no-op + * conversion path as the first entry in the table. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5T__path_find_init_path_table(void) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + assert(0 == H5T_g.npaths); + + if (NULL == (H5T_g.path = H5MM_malloc(H5T_DEF_CONV_TABLE_SLOTS * sizeof(H5T_path_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, + "memory allocation failed for type conversion path table"); + H5T_g.apaths = H5T_DEF_CONV_TABLE_SLOTS; + H5T_g.path[0] = NULL; + + /* + * Allocate a path for the no-op conversion function + * and set it as the first entry in the table */ - if (conv->u.app_func && - (!table || (table && conv->is_app) || (table && !table->is_hard && !conv->is_app))) { - assert(path != table); - assert(NULL == path->conv.u.app_func); + if (NULL == (H5T_g.path[0] = H5FL_CALLOC(H5T_path_t))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for no-op conversion path"); + + /* Initialize the no-op path */ + snprintf(H5T_g.path[0]->name, sizeof(H5T_g.path[0]->name), "no-op"); + H5T_g.path[0]->conv.is_app = false; + H5T_g.path[0]->conv.u.lib_func = H5T__conv_noop; + H5T_g.path[0]->cdata.command = H5T_CONV_INIT; + + if (H5T__conv_noop(NULL, NULL, &(H5T_g.path[0]->cdata), NULL, 0, 0, 0, NULL, NULL) < 0) { +#ifdef H5T_DEBUG + if (H5DEBUG(T)) + fprintf(H5DEBUG(T), "H5T: unable to initialize no-op conversion function (ignored)\n"); +#endif + /* Ignore any errors from the conversion function */ + if (H5E_clear_stack(NULL) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTRESET, FAIL, "unable to clear current error stack"); + } /* end if */ + + H5T_g.path[0]->is_noop = true; + H5T_g.npaths = 1; + +done: + if (ret_value < 0) { + if (H5T_g.path) + H5FL_FREE(H5T_path_t, H5T_g.path[0]); + H5MM_free(H5T_g.path); + } + + FUNC_LEAVE_NOAPI(ret_value) +} + +/*------------------------------------------------------------------------- + * Function: H5T__path_find_init_new_path + * + * Purpose: Helper function to initialize a new conversion path that's + * being added to the path conversion table. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5T__path_find_init_new_path(H5T_path_t *path, const H5T_t *src, const H5T_t *dst, H5T_conv_func_t *conv, + H5T_conv_ctx_t *conv_ctx) +{ + H5T_t *tmp_stype = NULL; /* temporary source datatype */ + H5T_t *tmp_dtype = NULL; /* temporary destination datatype */ + hid_t src_id = H5I_INVALID_HID; /* source datatype identifier */ + hid_t dst_id = H5I_INVALID_HID; /* destination datatype identifier */ + herr_t status = SUCCEED; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + assert(path); + assert(conv); + assert(conv_ctx); + assert(NULL == path->conv.u.app_func); + /* If a hard conversion function was specified, initialize that + * function and finish setting up the new path. + */ + if (conv->u.app_func) { path->cdata.command = H5T_CONV_INIT; if (conv->is_app) { /* Copy the conversion path's source and destination datatypes and @@ -5263,48 +5353,48 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co * conversion function */ if (path->src && (NULL == (tmp_stype = H5T_copy(path->src, H5T_COPY_ALL)))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "unable to copy source datatype"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, FAIL, "unable to copy source datatype"); if (path->dst && (NULL == (tmp_dtype = H5T_copy(path->dst, H5T_COPY_ALL)))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "unable to copy destination datatype"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, FAIL, "unable to copy destination datatype"); if (tmp_stype && ((src_id = H5I_register(H5I_DATATYPE, tmp_stype, false)) < 0)) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register ID for source datatype"); if (tmp_dtype && ((dst_id = H5I_register(H5I_DATATYPE, tmp_dtype, false)) < 0)) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register ID for destination datatype"); - if ((conv->u.app_func)(src_id, dst_id, &(path->cdata), 0, 0, 0, NULL, NULL, H5CX_get_dxpl()) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to initialize conversion function"); - } /* end if */ - else if ((conv->u.lib_func)(path->src, path->dst, &(path->cdata), &tmp_ctx, 0, 0, 0, NULL, NULL) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to initialize conversion function"); + status = (conv->u.app_func)(src_id, dst_id, &(path->cdata), 0, 0, 0, NULL, NULL, H5CX_get_dxpl()); + } + else + status = (conv->u.lib_func)(path->src, path->dst, &(path->cdata), conv_ctx, 0, 0, 0, NULL, NULL); + + if (status < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to initialize conversion function"); if (src_id >= 0) { if (H5I_dec_ref(src_id) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDEC, NULL, "can't decrement reference on temporary ID"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDEC, FAIL, "can't decrement reference on temporary ID"); src_id = H5I_INVALID_HID; tmp_stype = NULL; } if (dst_id >= 0) { if (H5I_dec_ref(dst_id) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDEC, NULL, "can't decrement reference on temporary ID"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDEC, FAIL, "can't decrement reference on temporary ID"); dst_id = H5I_INVALID_HID; tmp_dtype = NULL; } path->conv = *conv; path->is_hard = true; - } /* end if */ + } /* - * If the path doesn't have a function by now (because it's a new path - * and the caller didn't supply a hard function) then scan the soft list - * for an applicable function and add it to the path. This can't happen - * for the no-op conversion path. + * Otherwise, scan the soft list for an applicable function + * and add it to the path. */ assert(path->conv.u.app_func || (src && dst)); - for (i = H5T_g.nsoft - 1; i >= 0 && !path->conv.u.app_func; --i) { + for (int i = H5T_g.nsoft - 1; i >= 0 && !path->conv.u.app_func; --i) { bool path_init_error = false; if (src->shared->type != H5T_g.soft[i].src || dst->shared->type != H5T_g.soft[i].dst) @@ -5316,39 +5406,32 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co * register an ID for them so we can pass these to the application * conversion function */ - assert(tmp_stype == NULL); - assert(tmp_dtype == NULL); if (NULL == (tmp_stype = H5T_copy(path->src, H5T_COPY_ALL))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "unable to copy source datatype"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, FAIL, "unable to copy source datatype"); if (NULL == (tmp_dtype = H5T_copy(path->dst, H5T_COPY_ALL))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "unable to copy destination datatype"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, FAIL, "unable to copy destination datatype"); - assert(src_id == H5I_INVALID_HID); - assert(dst_id == H5I_INVALID_HID); if ((src_id = H5I_register(H5I_DATATYPE, tmp_stype, false)) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register ID for source datatype"); if ((dst_id = H5I_register(H5I_DATATYPE, tmp_dtype, false)) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register ID for destination datatype"); - if ((H5T_g.soft[i].conv.u.app_func)(src_id, dst_id, &(path->cdata), 0, 0, 0, NULL, NULL, - H5CX_get_dxpl()) < 0) { - memset(&(path->cdata), 0, sizeof(H5T_cdata_t)); - /*ignore the error*/ - if (H5E_clear_stack(NULL) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTRESET, NULL, "unable to clear current error stack"); - path_init_error = true; - } /* end if */ - } /* end if */ - else if ((H5T_g.soft[i].conv.u.lib_func)(path->src, path->dst, &(path->cdata), &tmp_ctx, 0, 0, 0, - NULL, NULL) < 0) { + status = (H5T_g.soft[i].conv.u.app_func)(src_id, dst_id, &(path->cdata), 0, 0, 0, NULL, NULL, + H5CX_get_dxpl()); + } + else + status = (H5T_g.soft[i].conv.u.lib_func)(path->src, path->dst, &(path->cdata), conv_ctx, 0, 0, 0, + NULL, NULL); + + if (status < 0) { memset(&(path->cdata), 0, sizeof(H5T_cdata_t)); - /*ignore the error*/ + /* ignore the error */ if (H5E_clear_stack(NULL) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTRESET, NULL, "unable to clear current error stack"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTRESET, FAIL, "unable to clear current error stack"); path_init_error = true; - } /* end if */ + } /* Finish operation, if no error */ if (!path_init_error) { @@ -5356,133 +5439,105 @@ H5T__path_find_real(const H5T_t *src, const H5T_t *dst, const char *name, H5T_co path->name[H5T_NAMELEN - 1] = '\0'; path->conv = H5T_g.soft[i].conv; path->is_hard = false; - } /* end else */ + } if (src_id >= 0) { if (H5I_dec_ref(src_id) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDEC, NULL, "can't decrement reference on temporary ID"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDEC, FAIL, "can't decrement reference on temporary ID"); src_id = H5I_INVALID_HID; tmp_stype = NULL; } if (dst_id >= 0) { if (H5I_dec_ref(dst_id) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDEC, NULL, "can't decrement reference on temporary ID"); + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDEC, FAIL, "can't decrement reference on temporary ID"); dst_id = H5I_INVALID_HID; tmp_dtype = NULL; } - } /* end for */ - if (!path->conv.u.app_func) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "no appropriate function for conversion path"); - - /* Check if paths were inserted into the table through a recursive call - * and re-compute the correct location for this path if so. - QAK, 1/26/02 - */ - if (old_npaths != H5T_g.npaths) { - lt = md = 1; - rt = H5T_g.npaths; - cmp = -1; - - while (cmp && lt < rt) { - md = (lt + rt) / 2; - assert(H5T_g.path[md]); - cmp = H5T_cmp(src, H5T_g.path[md]->src, false); - if (0 == cmp) - cmp = H5T_cmp(dst, H5T_g.path[md]->dst, false); - if (cmp < 0) - rt = md; - else if (cmp > 0) - lt = md + 1; - else - table = H5T_g.path[md]; - } /* end while */ - } /* end if */ - - /* Replace an existing table entry or add a new entry */ - if (table && path != table) { - assert(table == H5T_g.path[md]); - H5T__print_stats(table, &nprint /*in,out*/); - table->cdata.command = H5T_CONV_FREE; - if (table->conv.is_app) { - if ((table->conv.u.app_func)(H5I_INVALID_HID, H5I_INVALID_HID, &(table->cdata), 0, 0, 0, NULL, - NULL, H5CX_get_dxpl()) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) - fprintf(H5DEBUG(T), "H5T: conversion function 0x%016zx free failed for %s (ignored)\n", - (size_t)path->conv.u.app_func, path->name); -#endif - /*ignore the failure*/ - if (H5E_clear_stack(NULL) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTRESET, NULL, "unable to clear current error stack"); - } /* end if */ - } /* end if */ - else if ((table->conv.u.lib_func)(NULL, NULL, &(table->cdata), NULL, 0, 0, 0, NULL, NULL) < 0) { -#ifdef H5T_DEBUG - if (H5DEBUG(T)) - fprintf(H5DEBUG(T), "H5T: conversion function 0x%016zx free failed for %s (ignored)\n", - (size_t)path->conv.u.lib_func, path->name); -#endif - /*ignore the failure*/ - if (H5E_clear_stack(NULL) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTRESET, NULL, "unable to clear current error stack"); - } /* end if */ - if (table->src && (H5T_close_real(table->src) < 0)) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "unable to close datatype"); - if (table->dst && (H5T_close_real(table->dst) < 0)) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "unable to close datatype"); - table = H5FL_FREE(H5T_path_t, table); - table = path; - H5T_g.path[md] = path; - } /* end if */ - else if (path != table) { - assert(cmp); - if ((size_t)H5T_g.npaths >= H5T_g.apaths) { - size_t na = MAX(128, 2 * H5T_g.apaths); - H5T_path_t **x; - - if (NULL == (x = (H5T_path_t **)H5MM_realloc(H5T_g.path, na * sizeof(H5T_path_t *)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); - H5T_g.apaths = na; - H5T_g.path = x; - } /* end if */ - if (cmp > 0) - md++; - memmove(H5T_g.path + md + 1, H5T_g.path + md, (size_t)(H5T_g.npaths - md) * sizeof(H5T_path_t *)); - H5T_g.npaths++; - H5T_g.path[md] = path; - table = path; - } /* end else-if */ - - /* Set return value */ - ret_value = path; + } done: - if (!ret_value && path && path != table) { - if (path->src && (H5T_close_real(path->src) < 0)) - HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "unable to close datatype"); - if (path->dst && (H5T_close_real(path->dst) < 0)) - HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "unable to close datatype"); - path = H5FL_FREE(H5T_path_t, path); - } /* end if */ - if (src_id >= 0) { if (H5I_dec_ref(src_id) < 0) - HDONE_ERROR(H5E_DATATYPE, H5E_CANTDEC, NULL, "can't decrement reference on temporary ID"); + HDONE_ERROR(H5E_DATATYPE, H5E_CANTDEC, FAIL, "can't decrement reference on temporary ID"); } else if (tmp_stype) { if (H5T_close(tmp_stype) < 0) - HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "can't close temporary datatype"); + HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); } if (dst_id >= 0) { if (H5I_dec_ref(dst_id) < 0) - HDONE_ERROR(H5E_DATATYPE, H5E_CANTDEC, NULL, "can't decrement reference on temporary ID"); + HDONE_ERROR(H5E_DATATYPE, H5E_CANTDEC, FAIL, "can't decrement reference on temporary ID"); } else if (tmp_dtype) { if (H5T_close(tmp_dtype) < 0) - HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "can't close temporary datatype"); + HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "can't close temporary datatype"); } FUNC_LEAVE_NOAPI(ret_value) -} /* end H5T__path_find_real() */ +} + +/*------------------------------------------------------------------------- + * Function: H5T__path_free + * + * Purpose: Helper function to free a datatype conversion path. This + * function assumes that the 'free' member of the passed in + * 'conv_ctx' has been initialized. + * + * Return: Non-negative on success/Negative on failure + * + *------------------------------------------------------------------------- + */ +static herr_t +H5T__path_free(H5T_path_t *path, H5T_conv_ctx_t *conv_ctx) +{ + herr_t status = SUCCEED; + int nprint = 0; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + assert(path); + assert(conv_ctx); + + if (path->conv.u.app_func) { + H5T__print_stats(path, &nprint); + + path->cdata.command = H5T_CONV_FREE; + + if (path->conv.is_app) + status = (path->conv.u.app_func)(conv_ctx->u.free.src_type_id, conv_ctx->u.free.dst_type_id, + &(path->cdata), 0, 0, 0, NULL, NULL, H5CX_get_dxpl()); + else + status = + (path->conv.u.lib_func)(path->src, path->dst, &(path->cdata), conv_ctx, 0, 0, 0, NULL, NULL); + + if (status < 0) { + /* Ignore any error from shutting down the path */ + if (H5E_clear_stack(NULL) < 0) + /* Push error, but keep going */ + HDONE_ERROR(H5E_DATATYPE, H5E_CANTRESET, FAIL, "unable to clear current error stack"); + +#ifdef H5T_DEBUG + if (H5DEBUG(T)) { + fprintf(H5DEBUG(T), "H5T: conversion function 0x%016zx free failed for %s (ignored)\n", + path->conv.is_app ? (size_t)path->conv.u.app_func : (size_t)path->conv.u.lib_func, + path->name); + } +#endif + } + } + + if (path->src && (H5T_close_real(path->src) < 0)) + /* Push error, but keep going */ + HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "unable to close source datatype"); + if (path->dst && (H5T_close_real(path->dst) < 0)) + /* Push error, but keep going */ + HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "unable to close destination datatype"); + + path = H5FL_FREE(H5T_path_t, path); + + FUNC_LEAVE_NOAPI(ret_value) +} /*------------------------------------------------------------------------- * Function: H5T_path_match @@ -6637,3 +6692,51 @@ H5T__get_path_table_npaths(void) FUNC_LEAVE_NOAPI(ret_value) } + +/*------------------------------------------------------------------------- + * Function: H5T_is_numeric_with_unusual_unused_bits + * + * Purpose: Detect if a datatype is a numeric datatype (int, float, or + * bitfield) with an unusual # of unused bits. This means + * that the precision (i.e. the # of bits used) is less than + * the size of the datatype, at power-of-two boundaries. + * + * Return: true/false on success, can't fail + * + *------------------------------------------------------------------------- + */ +bool +H5T_is_numeric_with_unusual_unused_bits(const H5T_t *dt) +{ + bool ret_value = false; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Sanity check */ + assert(dt); + assert(dt->shared); + + /* Is the correct type? */ + if (H5T_INTEGER == dt->shared->type || H5T_FLOAT == dt->shared->type || + H5T_BITFIELD == dt->shared->type) { +#if LDBL_MANT_DIG == 106 + /* This currently won't work for the IBM long double type */ + if (H5T_FLOAT == dt->shared->type && dt->shared->size == 16 && + (dt->shared->u.atomic.prec == 64 || dt->shared->u.atomic.prec == 128)) + HGOTO_DONE(false); +#endif + + /* Has unused bits? */ + if (dt->shared->u.atomic.prec < (dt->shared->size * 8)) { + unsigned surround_bits = + 1U << (1 + H5VM_log2_gen((dt->shared->u.atomic.prec + dt->shared->u.atomic.offset) - 1)); + + /* Unused bits are unusually large? */ + if (dt->shared->size > 1 && ((dt->shared->size * 8) > surround_bits)) + HGOTO_DONE(true); + } + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5T_is_numeric_with_unusual_unused_bits() */ diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c index 5c4b4be9553..00a88984375 100644 --- a/src/H5Tcommit.c +++ b/src/H5Tcommit.c @@ -424,11 +424,13 @@ H5T__commit_anon(H5F_t *file, H5T_t *type, hid_t tcpl_id) herr_t H5T__commit(H5F_t *file, H5T_t *type, hid_t tcpl_id) { - H5O_loc_t temp_oloc; /* Temporary object header location */ - H5G_name_t temp_path; /* Temporary path */ - bool loc_init = false; /* Have temp_oloc and temp_path been initialized? */ - size_t dtype_size; /* Size of the datatype message */ - herr_t ret_value = SUCCEED; /* Return value */ + H5O_t *oh = NULL; /* Pointer to actual object header */ + H5O_loc_t temp_oloc; /* Temporary object header location */ + H5G_name_t temp_path; /* Temporary path */ + bool loc_init = false; /* Have temp_oloc and temp_path been initialized? */ + bool ohdr_created = false; /* Has the object header been created yet? */ + size_t dtype_size; /* Size of the datatype message */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -481,9 +483,23 @@ H5T__commit(H5F_t *file, H5T_t *type, hid_t tcpl_id) */ if (H5O_create(file, dtype_size, (size_t)1, tcpl_id, &temp_oloc) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to create datatype object header"); - if (H5O_msg_create(&temp_oloc, H5O_DTYPE_ID, H5O_MSG_FLAG_CONSTANT | H5O_MSG_FLAG_DONTSHARE, - H5O_UPDATE_TIME, type) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to update type header message"); + ohdr_created = true; + + /* Pin the object header */ + if (NULL == (oh = H5O_pin(&temp_oloc))) + HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header"); + + /* Check for creating committed datatype with unusual datatype */ + if (!(H5O_has_chksum(oh) || (H5F_RFIC_FLAGS(file) & H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS)) && + H5T_is_numeric_with_unusual_unused_bits(type)) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, + "creating committed datatype with unusual datatype, see documentation for " + "H5Pset_relax_file_integrity_checks for details."); + + /* Insert the datatype message */ + if (H5O_msg_append_oh(file, oh, H5O_DTYPE_ID, H5O_MSG_FLAG_CONSTANT | H5O_MSG_FLAG_DONTSHARE, + H5O_UPDATE_TIME, type) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to insert type header message"); /* Copy the new object header's location into the datatype, taking ownership of it */ if (H5O_loc_copy_shallow(&(type->oloc), &temp_oloc) < 0) @@ -510,23 +526,39 @@ H5T__commit(H5F_t *file, H5T_t *type, hid_t tcpl_id) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "cannot mark datatype in memory"); done: + if (oh && H5O_unpin(oh) < 0) + HDONE_ERROR(H5E_DATATYPE, H5E_CANTUNPIN, FAIL, "unable to unpin object header"); + if (ret_value < 0) { - if (loc_init) { - H5O_loc_free(&temp_oloc); - H5G_name_free(&temp_path); - } /* end if */ - if ((type->shared->state == H5T_STATE_TRANSIENT || type->shared->state == H5T_STATE_RDONLY) && - (type->sh_loc.type == H5O_SHARE_TYPE_COMMITTED)) { - if (H5O_dec_rc_by_loc(&(type->oloc)) < 0) + /* Close & delete the object header on failure */ + if (ohdr_created) { + H5O_loc_t *oloc_ptr; /* Pointer to object header location */ + + /* Point at correct object header location, depending on state when failure occurred */ + if (loc_init) + oloc_ptr = &temp_oloc; + else + oloc_ptr = &(type->oloc); + if (H5O_dec_rc_by_loc(oloc_ptr) < 0) HDONE_ERROR(H5E_DATATYPE, H5E_CANTDEC, FAIL, "unable to decrement refcount on newly created object"); - if (H5O_close(&(type->oloc), NULL) < 0) + if (H5O_close(oloc_ptr, NULL) < 0) HDONE_ERROR(H5E_DATATYPE, H5E_CLOSEERROR, FAIL, "unable to release object header"); - if (H5O_delete(file, type->sh_loc.u.loc.oh_addr) < 0) + if (H5O_delete(file, oloc_ptr->addr) < 0) HDONE_ERROR(H5E_DATATYPE, H5E_CANTDELETE, FAIL, "unable to delete object header"); + } + + /* Release the location info, if the datatype doesn't own it */ + if (loc_init) { + H5O_loc_free(&temp_oloc); + H5G_name_free(&temp_path); + } + + /* Reset the shared state for the datatype */ + if ((type->shared->state == H5T_STATE_TRANSIENT || type->shared->state == H5T_STATE_RDONLY) && + (type->sh_loc.type == H5O_SHARE_TYPE_COMMITTED)) type->sh_loc.type = H5O_SHARE_TYPE_UNSHARED; - } /* end if */ - } /* end if */ + } FUNC_LEAVE_NOAPI(ret_value) } /* H5T__commit() */ diff --git a/src/H5Tconv.c b/src/H5Tconv.c index 422e3110497..55b6d7d01da 100644 --- a/src/H5Tconv.c +++ b/src/H5Tconv.c @@ -19,18 +19,21 @@ /****************/ #include "H5Tmodule.h" /* This source code file is part of the H5T module */ +#define H5R_FRIEND /* Suppress error about including H5Rpkg */ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5CXprivate.h" /* API Contexts */ -#include "H5Dprivate.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5FLprivate.h" /* Free Lists */ -#include "H5Iprivate.h" /* IDs */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5Tpkg.h" /* Datatypes */ +#include "H5private.h" /* Generic Functions */ +#include "H5CXprivate.h" /* API Contexts */ +#include "H5Dprivate.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5Iprivate.h" /* IDs */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5Pprivate.h" /* Property lists */ +#include "H5Rpkg.h" /* References */ +#include "H5Tpkg.h" /* Datatypes */ /****************/ /* Local Macros */ @@ -3445,26 +3448,33 @@ H5T__conv_vlen(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H5T bool write_to_file = false; /* Flag to indicate writing to file */ htri_t parent_is_vlen; /* Flag to indicate parent is vlen datatype */ size_t bg_seq_len = 0; /* The number of elements in the background sequence */ - H5T_t *tsrc_cpy = NULL; /*temporary copy of source base datatype */ - H5T_t *tdst_cpy = NULL; /*temporary copy of destination base datatype */ - hid_t tsrc_id = H5I_INVALID_HID; /*temporary type atom */ - hid_t tdst_id = H5I_INVALID_HID; /*temporary type atom */ - uint8_t *s = NULL; /*source buffer */ - uint8_t *d = NULL; /*destination buffer */ - uint8_t *b = NULL; /*background buffer */ - ssize_t s_stride, d_stride; /*src and dst strides */ - ssize_t b_stride; /*bkg stride */ - size_t safe; /*how many elements are safe to process in each pass */ - size_t src_base_size; /*source base size*/ - size_t dst_base_size; /*destination base size*/ - void *conv_buf = NULL; /*temporary conversion buffer */ - size_t conv_buf_size = 0; /*size of conversion buffer in bytes */ - void *tmp_buf = NULL; /*temporary background buffer */ - size_t tmp_buf_size = 0; /*size of temporary bkg buffer */ - bool nested = false; /*flag of nested VL case */ - bool need_ids = false; /*whether we need IDs for the datatypes */ - size_t elmtno; /*element number counter */ - herr_t ret_value = SUCCEED; /* Return value */ + H5T_t *tsrc_cpy = NULL; /* Temporary copy of source base datatype */ + H5T_t *tdst_cpy = NULL; /* Temporary copy of destination base datatype */ + hid_t tsrc_id = H5I_INVALID_HID; /* Temporary type atom */ + hid_t tdst_id = H5I_INVALID_HID; /* Temporary type atom */ + uint8_t *s = NULL; /* Source buffer */ + uint8_t *d = NULL; /* Destination buffer */ + uint8_t *b = NULL; /* Background buffer */ + ssize_t s_stride = 0; /* Src stride */ + ssize_t d_stride = 0; /* Dst stride */ + ssize_t b_stride; /* Bkg stride */ + size_t safe = 0; /* How many elements are safe to process in each pass */ + size_t src_base_size; /* Source base size*/ + size_t dst_base_size; /* Destination base size*/ + void *conv_buf = NULL; /* Temporary conversion buffer */ + size_t conv_buf_size = 0; /* Size of conversion buffer in bytes */ + void *tmp_buf = NULL; /* Temporary background buffer */ + size_t tmp_buf_size = 0; /* Size of temporary bkg buffer */ + bool nested = false; /* Flag of nested VL case */ + bool need_ids = false; /* Whether we need IDs for the datatypes */ + size_t elmtno = 0; /* Element number counter */ + size_t orig_d_stride = 0; /* Original destination stride (used for error handling) */ + size_t orig_nelmts = nelmts; /* Original # of elements to convert (used for error handling) */ + bool convert_forward = + true; /* Current direction of conversion (forward or backward, used for error handling) */ + bool conversions_made = + false; /* Flag to indicate conversions have been performed, used for error handling */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -3601,6 +3611,10 @@ H5T__conv_vlen(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H5T if (write_to_file && parent_is_vlen && bkg != NULL) nested = true; + /* Save info for unraveling on errors */ + orig_d_stride = (size_t)d_stride; + convert_forward = !(d_stride > s_stride); + /* The outer loop of the type conversion macro, controlling which */ /* direction the buffer is walked */ while (nelmts > 0) { @@ -3782,6 +3796,9 @@ H5T__conv_vlen(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H5T } /* end if */ } /* end else */ + /* Indicate that elements have been converted, in case of error */ + conversions_made = true; + /* Advance pointers */ s += s_stride; d += d_stride; @@ -3801,6 +3818,49 @@ H5T__conv_vlen(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H5T } /* end switch */ done: + /* Release converted elements on error */ + if (ret_value < 0 && conversions_made) { + size_t dest_count; + + /* Set up for first pass to destroy references */ + if (nelmts < orig_nelmts || (convert_forward && elmtno < safe)) { + dest_count = orig_nelmts - nelmts; + + /* Set pointer to correct location, based on direction chosen */ + if (convert_forward) { + d = (uint8_t *)buf; + dest_count += elmtno; /* Include partial iteration in first pass, for forward conversions */ + } + else + d = (uint8_t *)buf + (nelmts * orig_d_stride); + + /* Destroy vlen elements that have already been converted */ + while (dest_count > 0) { + H5T_vlen_reclaim_elmt(d, dst); /* Ignore errors at this point */ + d += orig_d_stride; + dest_count--; + } + } + + /* Do any remaining partial iteration, if converting backwards */ + if (!convert_forward && elmtno < safe) { + dest_count = elmtno; + + /* Set pointer to correct location */ + if (d_stride > 0) + d = (uint8_t *)buf + ((nelmts - safe) * orig_d_stride); + else + d = (uint8_t *)buf + ((nelmts - elmtno) * orig_d_stride); + + /* Destroy references that have already been converted */ + while (dest_count > 0) { + H5T_vlen_reclaim_elmt(d, dst); /* Ignore errors at this point */ + d += orig_d_stride; + dest_count--; + } + } + } + if (tsrc_id >= 0) { if (H5I_dec_ref(tsrc_id) < 0) HDONE_ERROR(H5E_DATATYPE, H5E_CANTDEC, FAIL, "can't decrement reference on temporary ID"); @@ -4033,16 +4093,23 @@ H5T__conv_ref(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, const H5T_conv_ctx_t H5_ATTR_UNUSED *conv_ctx, size_t nelmts, size_t buf_stride, size_t bkg_stride, void *buf, void *bkg) { - uint8_t *s = NULL; /* source buffer */ - uint8_t *d = NULL; /* destination buffer */ - uint8_t *b = NULL; /* background buffer */ - ssize_t s_stride, d_stride; /* src and dst strides */ - ssize_t b_stride; /* bkg stride */ - size_t safe; /* how many elements are safe to process in each pass */ - void *conv_buf = NULL; /* temporary conversion buffer */ - size_t conv_buf_size = 0; /* size of conversion buffer in bytes */ - size_t elmtno; /* element number counter */ - herr_t ret_value = SUCCEED; /* return value */ + uint8_t *s = NULL; /* source buffer */ + uint8_t *d = NULL; /* destination buffer */ + uint8_t *b = NULL; /* background buffer */ + ssize_t s_stride = 0; /* src stride */ + ssize_t d_stride = 0; /* dst stride */ + ssize_t b_stride; /* bkg stride */ + size_t safe = 0; /* how many elements are safe to process in each pass */ + void *conv_buf = NULL; /* temporary conversion buffer */ + size_t conv_buf_size = 0; /* size of conversion buffer in bytes */ + size_t elmtno = 0; /* element number counter */ + size_t orig_d_stride = 0; /* Original destination stride (used for error handling) */ + size_t orig_nelmts = nelmts; /* Original # of elements to convert (used for error handling) */ + bool convert_forward = + true; /* Current direction of conversion (forward or backward, used for error handling) */ + bool conversions_made = + false; /* Flag to indicate conversions have been performed, used for error handling */ + herr_t ret_value = SUCCEED; /* return value */ FUNC_ENTER_PACKAGE @@ -4103,6 +4170,10 @@ H5T__conv_ref(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, else b_stride = 0; + /* Save info for unraveling on errors */ + orig_d_stride = (size_t)d_stride; + convert_forward = !(d_stride > s_stride); + /* The outer loop of the type conversion macro, controlling which */ /* direction the buffer is walked */ while (nelmts > 0) { @@ -4202,6 +4273,9 @@ H5T__conv_ref(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, } /* end else */ } /* end else */ + /* Indicate that elements have been converted, in case of error */ + conversions_made = true; + /* Advance pointers */ s += s_stride; d += d_stride; @@ -4221,6 +4295,52 @@ H5T__conv_ref(const H5T_t *src, const H5T_t *dst, H5T_cdata_t *cdata, } /* end switch */ done: + /* Release converted elements on error */ + if (ret_value < 0 && conversions_made) { + H5R_ref_priv_t ref_priv; + size_t dest_count; + + /* Set up for first pass to destroy references */ + if (nelmts < orig_nelmts || (convert_forward && elmtno < safe)) { + dest_count = orig_nelmts - nelmts; + + /* Set pointer to correct location, based on direction chosen */ + if (convert_forward) { + d = (uint8_t *)buf; + dest_count += elmtno; /* Include partial iteration in first pass, for forward conversions */ + } + else + d = (uint8_t *)buf + (nelmts * orig_d_stride); + + /* Destroy references that have already been converted */ + while (dest_count > 0) { + memcpy(&ref_priv, d, sizeof(H5R_ref_priv_t)); + H5R__destroy(&ref_priv); /* Ignore errors at this point */ + d += orig_d_stride; + dest_count--; + } + } + + /* Do any remaining partial iteration, if converting backwards */ + if (!convert_forward && elmtno < safe) { + dest_count = elmtno; + + /* Set pointer to correct location */ + if (d_stride > 0) + d = (uint8_t *)buf + ((nelmts - safe) * orig_d_stride); + else + d = (uint8_t *)buf + ((nelmts - elmtno) * orig_d_stride); + + /* Destroy references that have already been converted */ + while (dest_count > 0) { + memcpy(&ref_priv, d, sizeof(H5R_ref_priv_t)); + H5R__destroy(&ref_priv); /* Ignore errors at this point */ + d += orig_d_stride; + dest_count--; + } + } + } + /* Release the conversion buffer (always allocated, except on errors) */ if (conv_buf) conv_buf = H5FL_BLK_FREE(ref_seq, conv_buf); diff --git a/src/H5Tpkg.h b/src/H5Tpkg.h index 59600f8eea0..8eb7b639cf0 100644 --- a/src/H5Tpkg.h +++ b/src/H5Tpkg.h @@ -158,7 +158,10 @@ struct H5T_stats_t { H5_timevals_t times; /*total time for conversion */ }; -/* Context struct for information used during datatype conversions */ +/* Context struct for information used during datatype conversions. + * Which union member is valid to read from is dictated by the + * accompanying H5T_cdata_t structure's H5T_cmd_t member value. + */ typedef struct H5T_conv_ctx_t { union { /* @@ -187,7 +190,14 @@ typedef struct H5T_conv_ctx_t { bool recursive; } conv; - /* No fields currently defined for H5T_cmd_t H5T_CONV_FREE */ + /* + * Fields only valid during conversion function free process + * (H5T_cmd_t H5T_CONV_FREE) + */ + struct H5T_conv_ctx_free_fields { + hid_t src_type_id; + hid_t dst_type_id; + } free; } u; } H5T_conv_ctx_t; diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h index 7d8f27615a0..46b2c92fa83 100644 --- a/src/H5Tprivate.h +++ b/src/H5Tprivate.h @@ -139,7 +139,7 @@ H5_DLL herr_t H5T_convert(H5T_path_t *tpath, const H5T_t *src_type, const H5T_t size_t buf_stride, size_t bkg_stride, void *buf, void *bkg); H5_DLL herr_t H5T_reclaim(const H5T_t *type, struct H5S_t *space, void *buf); H5_DLL herr_t H5T_reclaim_cb(void *elem, const H5T_t *dt, unsigned ndim, const hsize_t *point, void *op_data); -H5_DLL herr_t H5T_vlen_reclaim_elmt(void *elem, H5T_t *dt); +H5_DLL herr_t H5T_vlen_reclaim_elmt(void *elem, const H5T_t *dt); H5_DLL htri_t H5T_set_loc(H5T_t *dt, H5VL_object_t *file, H5T_loc_t loc); H5_DLL htri_t H5T_is_sensible(const H5T_t *dt); H5_DLL uint32_t H5T_hash(H5F_t *file, const H5T_t *dt); @@ -157,6 +157,7 @@ H5_DLL bool H5T_already_vol_managed(const H5T_t *dt); H5_DLL htri_t H5T_is_vl_storage(const H5T_t *dt); H5_DLL herr_t H5T_invoke_vol_optional(H5T_t *dt, H5VL_optional_args_t *args, hid_t dxpl_id, void **req, H5VL_object_t **vol_obj_ptr); +H5_DLL bool H5T_is_numeric_with_unusual_unused_bits(const H5T_t *dt); /* Reference specific functions */ H5_DLL H5R_type_t H5T_get_ref_type(const H5T_t *dt); diff --git a/src/H5Tvlen.c b/src/H5Tvlen.c index 6cf11c77234..971dc3c5d22 100644 --- a/src/H5Tvlen.c +++ b/src/H5Tvlen.c @@ -1053,7 +1053,7 @@ H5T__vlen_reclaim(void *elem, const H5T_t *dt, H5T_vlen_alloc_info_t *alloc_info *------------------------------------------------------------------------- */ herr_t -H5T_vlen_reclaim_elmt(void *elem, H5T_t *dt) +H5T_vlen_reclaim_elmt(void *elem, const H5T_t *dt) { H5T_vlen_alloc_info_t vl_alloc_info; /* VL allocation info */ herr_t ret_value = SUCCEED; /* return value */ diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c index fba1c7d6c2e..048344b763c 100644 --- a/src/H5Zscaleoffset.c +++ b/src/H5Zscaleoffset.c @@ -1210,6 +1210,8 @@ H5Z__filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_valu minbits_mask <<= i * 8; minbits |= minbits_mask; } + if (minbits >= p.size * 8) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, 0, "minimum number of bits exceeds size of type"); /* retrieval of minval takes into consideration situation where sizeof * unsigned long long (datatype of minval) may change from compression diff --git a/src/H5encode.h b/src/H5encode.h index 5be75d57f77..690aee104d7 100644 --- a/src/H5encode.h +++ b/src/H5encode.h @@ -212,7 +212,7 @@ n = 0; \ (p) += 8; \ for (_i = 0; _i < sizeof(int64_t); _i++) \ - n = (n << 8) | *(--p); \ + n = (int64_t)(((uint64_t)n << 8) | *(--p)); \ (p) += 8; \ } while (0) @@ -224,7 +224,7 @@ n = 0; \ (p) += 8; \ for (_i = 0; _i < sizeof(uint64_t); _i++) \ - n = (n << 8) | *(--p); \ + n = (uint64_t)(((uint64_t)n << 8) | *(--p)); \ (p) += 8; \ } while (0) diff --git a/src/H5private.h b/src/H5private.h index a3b37e5d834..65a99cfc6ed 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -318,6 +318,15 @@ /* limit the middle value to be within a range (inclusive) */ #define RANGE(LO, X, HI) MAX(LO, MIN(X, HI)) +/* Macro for checking if two ranges overlap one another */ +/* + * Check for the inverse of whether the ranges are disjoint. If they are + * disjoint, then the low bound of one of the ranges must be greater than the + * high bound of the other. + */ +/* (Assumes that low & high bounds are _inclusive_) */ +#define H5_RANGE_OVERLAP(L1, H1, L2, H2) (!((L1) > (H2) || (L2) > (H1))) + /* absolute value */ #ifndef ABS #define ABS(a) (((a) >= 0) ? (a) : -(a)) @@ -336,9 +345,28 @@ #define H5_EXP2(n) (1 << (n)) /* Check if a read of size bytes starting at ptr would overflow past - * the last valid byte, pointed to by buffer_end. + * the last valid byte, pointed to by buffer_end. Note that 'size' + * is expected to be of type size_t. Providing values of other + * datatypes may cause warnings due to the comparison against + * PTRDIFF_MAX and comparison of < 0 after conversion to ptrdiff_t. + * For the time being, these can be suppressed with + * H5_GCC_CLANG_DIAG_OFF("type-limits")/H5_GCC_CLANG_DIAG_ON("type-limits") */ -#define H5_IS_BUFFER_OVERFLOW(ptr, size, buffer_end) (((ptr) + (size)-1) > (buffer_end)) +/* clang-format off */ +#define H5_IS_BUFFER_OVERFLOW(ptr, size, buffer_end) \ + ( \ + /* Trivial case */ \ + ((size) != 0) && \ + ( \ + /* Bad precondition */ \ + ((ptr) > (buffer_end)) || \ + /* Account for (likely unintentional) negative 'size' */ \ + (((size_t)(size) <= PTRDIFF_MAX) && ((ptrdiff_t)(size) < 0)) || \ + /* Typical overflow */ \ + ((size_t)(size) > (size_t)((((const uint8_t *)buffer_end) - ((const uint8_t *)ptr)) + 1)) \ + ) \ + ) +/* clang-format on */ /* Variant of H5_IS_BUFFER_OVERFLOW, used with functions such as H5Tdecode() * that don't take a size parameter, where we need to skip the bounds checks. @@ -347,7 +375,7 @@ * the entire library. */ #define H5_IS_KNOWN_BUFFER_OVERFLOW(skip, ptr, size, buffer_end) \ - (skip ? false : ((ptr) + (size)-1) > (buffer_end)) + (skip ? false : H5_IS_BUFFER_OVERFLOW(ptr, size, buffer_end)) /* * HDF Boolean type. @@ -451,8 +479,7 @@ (X) >= (Y)) #define H5_addr_cmp(X,Y) (H5_addr_eq((X), (Y)) ? 0 : \ (H5_addr_lt((X), (Y)) ? -1 : 1)) -#define H5_addr_overlap(O1,L1,O2,L2) (((O1) < (O2) && ((O1) + (L1)) > (O2)) || \ - ((O1) >= (O2) && (O1) < ((O2) + (L2)))) +#define H5_addr_overlap(O1,L1,O2,L2) H5_RANGE_OVERLAP(O1, ((O1)+(L1)-1), O2, ((O2)+(L2)-1)) /* clang-format on */ /* diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake index ec92a76ad95..4dcbccfd53e 100644 --- a/test/CMakeTests.cmake +++ b/test/CMakeTests.cmake @@ -163,6 +163,8 @@ set (HDF5_REFERENCE_TEST_FILES test_filters_le.h5 th5s.h5 tlayouto.h5 + tmisc38a.h5 + tmisc38b.h5 tmtimen.h5 tmtimeo.h5 tsizeslheap.h5 diff --git a/test/chunk_info.c b/test/chunk_info.c index 9533b2aacee..fba429f2b2b 100644 --- a/test/chunk_info.c +++ b/test/chunk_info.c @@ -120,14 +120,15 @@ static const char *FILENAME[] = {"tchunk_info_earliest", void reinit_vars(unsigned *read_flt_msk, haddr_t *addr, hsize_t *size); /* Helper function containing common code that verifies indexing type - and number of chunks */ -static int verify_idx_nchunks(hid_t dset, hid_t dspace, H5D_chunk_index_t exp_idx_type, + * and number of chunks + */ +static herr_t verify_idx_nchunks(hid_t dset, hid_t dspace, H5D_chunk_index_t exp_idx_type, hsize_t exp_num_chunks); -static int verify_get_chunk_info(hid_t dset, hid_t dspace, hsize_t chk_index, hsize_t exp_chk_size, +static herr_t verify_get_chunk_info(hid_t dset, hid_t dspace, hsize_t chk_index, hsize_t exp_chk_size, const hsize_t *exp_offset, unsigned exp_flt_msk); -static int verify_get_chunk_info_by_coord(hid_t dset, hsize_t *offset, hsize_t exp_chk_size, +static herr_t verify_get_chunk_info_by_coord(hid_t dset, hsize_t *offset, hsize_t exp_chk_size, unsigned exp_flt_msk); -static int verify_empty_chunk_info(hid_t dset, hsize_t *offset); +static herr_t verify_empty_chunk_info(hid_t dset, hsize_t *offset); static const char *index_type_str(H5D_chunk_index_t idx_type); /*------------------------------------------------------------------------- @@ -135,10 +136,7 @@ static const char *index_type_str(H5D_chunk_index_t idx_type); * * Purpose: Wipes out variables for the next use, used in various tests. * - * Return: Won't fail - * - * Date: September 2018 - * + * Return: void *------------------------------------------------------------------------- */ void @@ -158,14 +156,10 @@ reinit_vars(unsigned *read_flt_msk, haddr_t *addr, hsize_t *size) * Purpose: Verifies that H5Dget_chunk_info returns correct * values for a chunk. * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Date: August 2019 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ -static int +static herr_t verify_get_chunk_info(hid_t dset, hid_t dspace, hsize_t chk_index, hsize_t exp_chk_size, const hsize_t *exp_offset, unsigned exp_flt_msk) { @@ -200,14 +194,10 @@ verify_get_chunk_info(hid_t dset, hid_t dspace, hsize_t chk_index, hsize_t exp_c * Purpose: Verifies that H5Dget_chunk_info_by_coord returns correct * values for a chunk. * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Date: August 2019 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ -static int +static herr_t verify_get_chunk_info_by_coord(hid_t dset, hsize_t *offset, hsize_t exp_chk_size, unsigned exp_flt_msk) { uint32_t read_flt_msk = 0; /* Read filter mask */ @@ -237,14 +227,10 @@ verify_get_chunk_info_by_coord(hid_t dset, hsize_t *offset, hsize_t exp_chk_size * Purpose: Verifies that H5Dget_chunk_info_by_coord returns correct * values for an empty chunk. * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Date: August 2018 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ -static int +static herr_t verify_empty_chunk_info(hid_t dset, hsize_t *offset) { uint32_t read_flt_msk = 0; /* Read filter mask */ @@ -274,9 +260,6 @@ verify_empty_chunk_info(hid_t dset, hsize_t *offset) * * Return: Success: a valid indexing scheme string * Failure: a note indicating the indexing type is invalid - * - * Date: August 2019 - * *------------------------------------------------------------------------- */ static const char * @@ -297,7 +280,7 @@ index_type_str(H5D_chunk_index_t idx_type) return ("Version 1 B-tree index type (default)"); case H5D_CHUNK_IDX_NTYPES: default: - return ("invalid index type"); + return "invalid index type"; } } /* index_type_str */ @@ -307,14 +290,10 @@ index_type_str(H5D_chunk_index_t idx_type) * Purpose: Reads the chunks within the boundary {start,end} and verify * the values against the populated data. * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Date: August 2019 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ -static int +static herr_t verify_selected_chunks(hid_t dset, hid_t plist, const hsize_t *start, const hsize_t *end) { int read_buf[CHUNK_NX][CHUNK_NY]; @@ -328,14 +307,16 @@ verify_selected_chunks(hid_t dset, hid_t plist, const hsize_t *start, const hsiz memset(&read_buf, 0, sizeof(read_buf)); /* Initialize the array of chunk data for all NUM_CHUNKS chunks, this is - the same as the written data and will be used to verify the read data */ + * the same as the written data and will be used to verify the read data + */ for (n = 0; n < NUM_CHUNKS; n++) for (ii = 0; ii < CHUNK_NX; ii++) for (jj = 0; jj < CHUNK_NY; jj++) expected_buf[n][ii][jj] = (int)(ii * jj) + 1; /* Read each chunk within the boundary of {start,end} and verify the - values against the expected data */ + * values against the expected data + */ chk_index = 0; for (ii = start[0]; ii < end[0]; ii++) for (jj = start[1]; jj < end[1]; jj++, chk_index++) { @@ -369,14 +350,10 @@ verify_selected_chunks(hid_t dset, hid_t plist, const hsize_t *start, const hsiz * a subset of chunks. This function opens the dataset then * closes it after writing. * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Date: August 2019 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ -static int +static herr_t write_selected_chunks(hid_t dset, hid_t plist, const hsize_t *start, const hsize_t *end, unsigned flt_msk) { int direct_buf[NUM_CHUNKS][CHUNK_NX][CHUNK_NY]; /* Data in chunks */ @@ -392,7 +369,8 @@ write_selected_chunks(hid_t dset, hid_t plist, const hsize_t *start, const hsize direct_buf[n][ii][jj] = (int)(ii * jj) + 1; /* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords: - (0,2) (0,3) (1,2) (1,3) */ + * (0,2) (0,3) (1,2) (1,3) + */ chk_index = 0; for (ii = start[0]; ii < end[0]; ii++) for (jj = start[1]; jj < end[1]; jj++, chk_index++) { @@ -414,14 +392,10 @@ write_selected_chunks(hid_t dset, hid_t plist, const hsize_t *start, const hsize * Purpose: Verifies that chunk indexing scheme and number of chunks of * the dataset match the expected values. * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Date: August 2019 - * + * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ -static int +static herr_t verify_idx_nchunks(hid_t dset, hid_t dspace, H5D_chunk_index_t exp_idx_type, hsize_t exp_num_chunks) { H5D_chunk_index_t idx_type; /* Dataset chunk index type */ @@ -461,8 +435,7 @@ verify_idx_nchunks(hid_t dset, hid_t dspace, H5D_chunk_index_t exp_idx_type, hsi * * Purpose: Test getting various chunk information * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions is * currently not used. The functionality involved the dataspace @@ -472,12 +445,9 @@ verify_idx_nchunks(hid_t dset, hid_t dspace, H5D_chunk_index_t exp_idx_type, hsi * This function tests the new API functions added for EED-343: * H5Dget_num_chunks, H5Dget_chunk_info, and * H5Dget_chunk_info_by_coord for high bound up to 1.8. - * - * Date: September 2018 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_get_chunk_info_highest_v18(hid_t fapl) { char filename[FILENAME_BUF_SIZE]; /* File name */ @@ -517,7 +487,8 @@ test_get_chunk_info_highest_v18(hid_t fapl) h5_fixname(FILENAME[H5F_LIBVER_V18], fapl, filename, sizeof filename); /* Set version bounds for creating the file. High bound to V18 to test - chunked dataset that use B-tree v1 structures to index chunks. */ + * chunked dataset that use B-tree v1 structures to index chunks. + */ if (H5Pset_libver_bounds(fapl, H5F_LIBVER_EARLIEST, H5F_LIBVER_V18) < 0) TEST_ERROR; @@ -589,7 +560,8 @@ test_get_chunk_info_highest_v18(hid_t fapl) #endif /* end H5_HAVE_FILTER_DEFLATE */ /* Write only NUM_CHUNKS_WRITTEN chunks at the following logical coords: - (0,2) (0,3) (1,2) (1,3) */ + * (0,2) (0,3) (1,2) (1,3) + */ n = 0; for (ii = START_CHK_X; ii < END_CHK_X; ii++) for (jj = START_CHK_Y; jj < END_CHK_Y; jj++, n++) { @@ -622,7 +594,8 @@ test_get_chunk_info_highest_v18(hid_t fapl) FAIL_PUTS_ERROR("unexpected number of chunks"); /* Get and verify info of the last written chunk again, passing in H5S_ALL - this time */ + * this time + */ offset[0] = 6; offset[1] = 12; if (verify_get_chunk_info(dset, H5S_ALL, NUM_CHUNKS_WRITTEN - 1, chunk_size, offset, flt_msk) == FAIL) @@ -696,7 +669,8 @@ test_get_chunk_info_highest_v18(hid_t fapl) FAIL_PUTS_ERROR(" Attempt to get info of a non-existing chunk."); /* Attempt to get info of a chunk given its coords from an empty dataset, - should succeed with the returned address as HADDR_UNDEF and size as 0 */ + * should succeed with the returned address as HADDR_UNDEF and size as 0 + */ offset[0] = EMPTY_CHK_X; offset[1] = EMPTY_CHK_Y; if (verify_empty_chunk_info(dset, offset) == FAIL) @@ -710,7 +684,8 @@ test_get_chunk_info_highest_v18(hid_t fapl) ************************************************************************/ /* Set space allocation to early so that chunk query functions will - retrieve chunk information even though the dataset is empty */ + * retrieve chunk information even though the dataset is empty + */ if (H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR; @@ -733,7 +708,8 @@ test_get_chunk_info_highest_v18(hid_t fapl) TEST_ERROR; /* Attempt to get info of a chunk from an empty dataset, verify the - returned address and size in the case of H5D_ALLOC_TIME_EARLY */ + * returned address and size in the case of H5D_ALLOC_TIME_EARLY + */ chk_index = NONEXIST_CHK_INDEX; reinit_vars(&read_flt_msk, &addr, &size); ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size); @@ -758,7 +734,8 @@ test_get_chunk_info_highest_v18(hid_t fapl) TEST_ERROR; /* Attempt to get info of a chunk given its coords from an empty dataset, - verify the returned address and size */ + * verify the returned address and size + */ offset[0] = 0; offset[1] = 0; if (H5Dget_chunk_info_by_coord(dset, offset, &read_flt_msk, &addr, &size) < 0) @@ -781,7 +758,7 @@ test_get_chunk_info_highest_v18(hid_t fapl) TEST_ERROR; PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -793,8 +770,7 @@ test_get_chunk_info_highest_v18(hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_get_chunk_info_highest_v18() */ /*------------------------------------------------------------------------- @@ -803,18 +779,14 @@ test_get_chunk_info_highest_v18(hid_t fapl) * Purpose: Test getting various chunk information when Single Chunk * index type is used * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions are * currently not used. The functionality involved the dataspace * will be implemented in the next version. - * - * Date: November 2018 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_chunk_info_single_chunk(const char *filename, hid_t fapl) { hid_t chunkfile = H5I_INVALID_HID; /* File ID */ @@ -863,8 +835,7 @@ test_chunk_info_single_chunk(const char *filename, hid_t fapl) if (H5Dclose(dset) < 0) TEST_ERROR; - /* ...open it again to test the chunk query functions on a single empty - chunk */ + /* ...open it again to test the chunk query functions on a single empty chunk */ if ((dset = H5Dopen2(chunkfile, SINGLE_CHUNK_DSET_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; @@ -908,7 +879,8 @@ test_chunk_info_single_chunk(const char *filename, hid_t fapl) FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n"); /* Attempt to get chunk info given an invalid chunk index and verify - * that failure occurs */ + * that failure occurs + */ chk_index = INVALID_CHK_INDEX; reinit_vars(&read_flt_msk, &addr, &size); H5E_BEGIN_TRY @@ -928,7 +900,7 @@ test_chunk_info_single_chunk(const char *filename, hid_t fapl) TEST_ERROR; PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -940,8 +912,7 @@ test_chunk_info_single_chunk(const char *filename, hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_chunk_info_single_chunk() */ /*------------------------------------------------------------------------- @@ -950,18 +921,14 @@ test_chunk_info_single_chunk(const char *filename, hid_t fapl) * Purpose: Test getting various chunk information when Implicit * index type is used * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions are * currently not used. The functionality involved the dataspace * will be implemented in the next version. - * - * Date: November 2018 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_chunk_info_implicit(char *filename, hid_t fapl) { hid_t chunkfile = H5I_INVALID_HID; /* File ID */ @@ -1016,7 +983,8 @@ test_chunk_info_implicit(char *filename, hid_t fapl) FAIL_PUTS_ERROR("Verification and write failed\n"); /* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords: - (0,2) (0,3) (1,2) (1,3) */ + * (0,2) (0,3) (1,2) (1,3) + */ if (write_selected_chunks(dset, H5P_DEFAULT, start, end, flt_msk) == FAIL) FAIL_PUTS_ERROR("Writing to selected chunks failed\n"); @@ -1030,8 +998,9 @@ test_chunk_info_implicit(char *filename, hid_t fapl) FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info failed\n"); /* Get info of a chunk and verify its information. Note that - all chunks in this dataset are allocated because of the property - H5D_ALLOC_TIME_EARLY */ + * all chunks in this dataset are allocated because of the property + * H5D_ALLOC_TIME_EARLY + */ if (verify_get_chunk_info_by_coord(dset, offset, CHK_SIZE, flt_msk) == FAIL) FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n"); } @@ -1047,7 +1016,7 @@ test_chunk_info_implicit(char *filename, hid_t fapl) TEST_ERROR; PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -1059,8 +1028,7 @@ test_chunk_info_implicit(char *filename, hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_chunk_info_implicit() */ /*------------------------------------------------------------------------- @@ -1069,18 +1037,14 @@ test_chunk_info_implicit(char *filename, hid_t fapl) * Purpose: Test getting various chunk information when Fixed Array * index type is used * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions are * currently not used. The functionality involved the dataspace * will be implemented in the next version. - * - * Date: November 2018 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_chunk_info_fixed_array(const char *filename, hid_t fapl) { hid_t chunkfile = H5I_INVALID_HID; /* File ID */ @@ -1138,7 +1102,8 @@ test_chunk_info_fixed_array(const char *filename, hid_t fapl) FAIL_PUTS_ERROR("Verification and write failed\n"); /* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords: - (0,2) (0,3) (1,2) (1,3) */ + * (0,2) (0,3) (1,2) (1,3) + */ if (write_selected_chunks(dset, H5P_DEFAULT, start, end, flt_msk) == FAIL) FAIL_PUTS_ERROR("Writing to selected chunks failed\n"); @@ -1180,17 +1145,18 @@ test_chunk_info_fixed_array(const char *filename, hid_t fapl) /* Read and verify values of selected chunks */ if (verify_selected_chunks(dset, H5P_DEFAULT, start, end) < 0) + FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on selected chunks failed\n"); - /* Release resource */ - if (H5Dclose(dset) < 0) - TEST_ERROR; + /* Release resource */ + if (H5Dclose(dset) < 0) + TEST_ERROR; if (H5Sclose(dspace) < 0) TEST_ERROR; if (H5Fclose(chunkfile) < 0) TEST_ERROR; PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -1202,8 +1168,7 @@ test_chunk_info_fixed_array(const char *filename, hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_chunk_info_fixed_array() */ /*------------------------------------------------------------------------- @@ -1212,18 +1177,14 @@ test_chunk_info_fixed_array(const char *filename, hid_t fapl) * Purpose: Test getting various chunk information when Extensible Array * index type is used * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions are * currently not used. The functionality involved the dataspace * will be implemented in the next version. - * - * Date: November 2018 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_chunk_info_extensible_array(const char *filename, hid_t fapl) { hid_t chunkfile = H5I_INVALID_HID; /* File ID */ @@ -1282,7 +1243,8 @@ test_chunk_info_extensible_array(const char *filename, hid_t fapl) FAIL_PUTS_ERROR("Verification and write failed\n"); /* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords: - (0,2) (0,3) (1,2) (1,3) */ + * (0,2) (0,3) (1,2) (1,3) + */ if (write_selected_chunks(dset, H5P_DEFAULT, start, end, flt_msk) == FAIL) FAIL_PUTS_ERROR("Writing to selected chunks failed\n"); @@ -1339,7 +1301,7 @@ test_chunk_info_extensible_array(const char *filename, hid_t fapl) TEST_ERROR; PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -1351,8 +1313,7 @@ test_chunk_info_extensible_array(const char *filename, hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_chunk_info_extensible_array() */ /*------------------------------------------------------------------------- @@ -1361,18 +1322,14 @@ test_chunk_info_extensible_array(const char *filename, hid_t fapl) * Purpose: Test getting various chunk information when Version 2 B-trees * index type is used * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions are * currently not used. The functionality involved the dataspace * will be implemented in the next version. - * - * Date: November 2018 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_chunk_info_version2_btrees(const char *filename, hid_t fapl) { hid_t chunkfile = H5I_INVALID_HID; /* File ID */ @@ -1431,7 +1388,8 @@ test_chunk_info_version2_btrees(const char *filename, hid_t fapl) FAIL_PUTS_ERROR("Verification and write failed\n"); /* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords: - (0,2) (0,3) (1,2) (1,3) */ + * (0,2) (0,3) (1,2) (1,3) + */ if (write_selected_chunks(dset, H5P_DEFAULT, start, end, flt_msk) == FAIL) FAIL_PUTS_ERROR("Writing to selected chunks failed\n"); @@ -1488,7 +1446,7 @@ test_chunk_info_version2_btrees(const char *filename, hid_t fapl) TEST_ERROR; PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -1500,8 +1458,7 @@ test_chunk_info_version2_btrees(const char *filename, hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_chunk_info_version2_btrees() */ typedef struct chunk_iter_info_t { @@ -1557,18 +1514,14 @@ iter_cb_fail(const hsize_t H5_ATTR_UNUSED *offset, unsigned H5_ATTR_UNUSED filte * Purpose: Tests basic operations to ensure the chunk query functions * work properly. * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions are * currently not used. The functionality involved the dataspace * will be implemented in the next version. - * - * Date: August 2019 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_basic_query(hid_t fapl) { char filename[FILENAME_BUF_SIZE]; /* File name */ @@ -1757,7 +1710,7 @@ test_basic_query(hid_t fapl) HDremove(filename); PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -1769,8 +1722,7 @@ test_basic_query(hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_basic_query() */ /*------------------------------------------------------------------------- @@ -1778,18 +1730,14 @@ test_basic_query(hid_t fapl) * * Purpose: Test attempting to use chunk query functions incorrectly. * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions are * currently not used. The functionality involved the dataspace * will be implemented in the next version. - * - * Date: August 2019 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_failed_attempts(const char *filename, hid_t fapl) { hid_t chunkfile = H5I_INVALID_HID; /* File ID */ @@ -1881,7 +1829,7 @@ test_failed_attempts(const char *filename, hid_t fapl) TEST_ERROR; PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -1892,8 +1840,7 @@ test_failed_attempts(const char *filename, hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_failed_attempts() */ /*------------------------------------------------------------------------- @@ -1901,8 +1848,7 @@ test_failed_attempts(const char *filename, hid_t fapl) * * Purpose: Test getting various chunk information in version 1.10. * - * Return: Success: SUCCEED - * Failure: FAIL + * Return: # of errors * * Note: Note that the dataspace argument in these new functions are * currently not used. The functionality involved the dataspace @@ -1912,12 +1858,9 @@ test_failed_attempts(const char *filename, hid_t fapl) * This function tests the new API functions added for HDFFV-10677: * H5Dget_num_chunks, H5Dget_chunk_info, and * H5Dget_chunk_info_by_coord for low bound beyond 1.8. - * - * Date: October 2018 - * *------------------------------------------------------------------------- */ -static herr_t +static int test_get_chunk_info_v110(hid_t fapl) { char filename[FILENAME_BUF_SIZE]; /* File name */ @@ -1973,26 +1916,21 @@ test_get_chunk_info_v110(hid_t fapl) } /* for low libver bound */ - return SUCCEED; + return 0; error: - H5_FAILED(); - return FAIL; + return 1; } /* test_get_chunk_info_v110() */ /*------------------------------------------------------------------------- * Function: test_flt_msk_with_skip_compress * - * Purpose: Test getting chunk info when compression filter is skipped. - * - * Return: Success: SUCCEED - * Failure: FAIL - * - * Date: August 2019 (based on direct_chunk.c/test_skip_compress_write1) + * Purpose: Test getting chunk info when compression filter is skipped * + * Return: # of errors *------------------------------------------------------------------------- */ -static herr_t +static int test_flt_msk_with_skip_compress(hid_t fapl) { char filename[FILENAME_BUF_SIZE]; /* File name */ @@ -2166,7 +2104,7 @@ test_flt_msk_with_skip_compress(hid_t fapl) HDremove(filename); PASSED(); - return SUCCEED; + return 0; error: H5E_BEGIN_TRY @@ -2180,17 +2118,325 @@ test_flt_msk_with_skip_compress(hid_t fapl) } H5E_END_TRY - H5_FAILED(); - return FAIL; + return 1; } /* test_flt_msk_with_skip_compress() */ +#define UBLOCK_FILE_NAME "file_with_userblock.h5" +#define NO_UBLOCK_FILE_NAME "file_without_userblock.h5" +#define UBLOCK_DSET_NAME "ublock_dset" +#define UBLOCK_SIZE 2048 + +/* Helper function to create userblock files and datasets */ +static herr_t +create_userblock_file(const char *filename, hid_t fcpl_id, hid_t fapl_id) +{ + hid_t fid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + + /* The chunk size is set to 1 so we get a lot of chunks without + * writing a lot of data. + */ + int rank = 1; + hsize_t dims = {256}; + hsize_t chunk_dims = {1}; + + int *data = NULL; + + /* Create a new file */ + if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id)) < 0) + TEST_ERROR; + + /* Create file data space for the dataset */ + if ((sid = H5Screate_simple(rank, &dims, &dims)) < 0) + TEST_ERROR; + + /* Create dataset create property list with chunking */ + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + if (H5Pset_chunk(dcpl_id, rank, &chunk_dims) < 0) + TEST_ERROR; + + /* Create a new dataset */ + if ((did = H5Dcreate2(fid, UBLOCK_DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Create some arbitrary data */ + if (NULL == (data = (int *)malloc(256 * sizeof(int)))) + TEST_ERROR; + for (int i = 0; i < 256; i++) + data[i] = i; + + /* Write the data to the dataset */ + if (H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0) + TEST_ERROR; + + /* Close everything */ + if (H5Pclose(dcpl_id) < 0) + TEST_ERROR; + if (H5Dclose(did) < 0) + TEST_ERROR; + if (H5Sclose(sid) < 0) + TEST_ERROR; + if (H5Fclose(fid) < 0) + TEST_ERROR; + + free(data); + + return SUCCEED; +error: + H5E_BEGIN_TRY + { + H5Pclose(dcpl_id); + H5Dclose(did); + H5Sclose(sid); + H5Fclose(fid); + } + H5E_END_TRY + + free(data); + + return FAIL; +} + +/* op_data for the userblock iterator */ +struct ub_op_data { + haddr_t *addresses; + hsize_t i; + hsize_t max; +}; + +/* Callback function for iterating over dataset chunks is files both with + * and without a userblock + */ +static int +ublock_iter_cb(const hsize_t H5_ATTR_UNUSED *offset, unsigned H5_ATTR_UNUSED filter_mask, haddr_t addr, + hsize_t H5_ATTR_UNUSED size, void *op_data) +{ + struct ub_op_data *od = (struct ub_op_data *)op_data; + + /* Error if we try to iterate over too many chunks */ + if (od->i == od->max) + return H5_ITER_ERROR; + + /* Store the address for later comparison */ + od->addresses[od->i] = addr; + od->i += 1; + + return H5_ITER_CONT; +} + +/*------------------------------------------------------------------------- + * Function: test_chunk_address_with_userblock + * + * Purpose: Test that chunk addresses are correct when a file has + * a userblock + * + * Return: # of errors + *------------------------------------------------------------------------- + */ +static int +test_chunk_address_with_userblock(hid_t fapl_id) +{ + hid_t fid = H5I_INVALID_HID; + hid_t fid_ub = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t did_ub = H5I_INVALID_HID; + hid_t fcpl_id = H5I_INVALID_HID; + + hsize_t num_chunks = HSIZE_UNDEF; + hsize_t num_chunks_ub = 0; + + haddr_t *addresses = NULL; + haddr_t *addresses_ub = NULL; + + struct ub_op_data od; + struct ub_op_data od_ub; + + int fd = -1; + int fd_ub = -1; + + bool default_vfd_compatible; + + TESTING("if chunk addresses are correct when a file has a userblock"); + + if (h5_driver_is_default_vfd_compatible(fapl_id, &default_vfd_compatible) < 0) + TEST_ERROR; + if (!default_vfd_compatible) { + puts(" -- SKIPPED for incompatible VFD --"); + return 0; + } + + /* Create files with and without a userblock */ + if (create_userblock_file(NO_UBLOCK_FILE_NAME, H5P_DEFAULT, fapl_id) < 0) + TEST_ERROR; + + if ((fcpl_id = H5Pcreate(H5P_FILE_CREATE)) == H5I_INVALID_HID) + TEST_ERROR; + if (H5Pset_userblock(fcpl_id, UBLOCK_SIZE) < 0) + TEST_ERROR; + + if (create_userblock_file(UBLOCK_FILE_NAME, fcpl_id, fapl_id) < 0) + TEST_ERROR; + + /* Open both files and datasets */ + if ((fid = H5Fopen(NO_UBLOCK_FILE_NAME, H5F_ACC_RDONLY, fapl_id)) == H5I_INVALID_HID) + TEST_ERROR; + if ((did = H5Dopen2(fid, UBLOCK_DSET_NAME, H5P_DEFAULT)) == H5I_INVALID_HID) + TEST_ERROR; + if ((fid_ub = H5Fopen(UBLOCK_FILE_NAME, H5F_ACC_RDONLY, fapl_id)) == H5I_INVALID_HID) + TEST_ERROR; + if ((did_ub = H5Dopen2(fid_ub, UBLOCK_DSET_NAME, H5P_DEFAULT)) == H5I_INVALID_HID) + TEST_ERROR; + + /* Get the number of chunks */ + if (H5Dget_num_chunks(did, H5S_ALL, &num_chunks) < 0) + TEST_ERROR; + if (H5Dget_num_chunks(did_ub, H5S_ALL, &num_chunks_ub) < 0) + TEST_ERROR; + + if (num_chunks != num_chunks_ub) + TEST_ERROR; + + /* Check the chunk information to make sure that the userblock file takes + * the block's size into account. + */ + for (hsize_t i = 0; i < num_chunks; i++) { + haddr_t addr = HADDR_UNDEF; + haddr_t addr_ub = 0; + + /* H5Dget_chunk_info() */ + if (H5Dget_chunk_info(did, H5S_ALL, i, NULL, NULL, &addr, NULL) < 0) + TEST_ERROR; + if (H5Dget_chunk_info(did_ub, H5S_ALL, i, NULL, NULL, &addr_ub, NULL) < 0) + TEST_ERROR; + + if (addr + UBLOCK_SIZE != addr_ub) + TEST_ERROR; + + addr = HADDR_UNDEF; + addr_ub = 0; + + /* H5Dget_chunk_info_by_coord() */ + if (H5Dget_chunk_info_by_coord(did, &i, NULL, &addr, NULL) < 0) + TEST_ERROR; + if (H5Dget_chunk_info_by_coord(did_ub, &i, NULL, &addr_ub, NULL) < 0) + TEST_ERROR; + + if (addr + UBLOCK_SIZE != addr_ub) + TEST_ERROR; + } + + /* Allocate arrays to hold the chunk addresses */ + if (NULL == (addresses = (haddr_t *)calloc(num_chunks, sizeof(haddr_t)))) + TEST_ERROR; + if (NULL == (addresses_ub = (haddr_t *)calloc(num_chunks, sizeof(haddr_t)))) + TEST_ERROR; + + od.addresses = addresses; + od.i = 0; + od.max = num_chunks; + + od_ub.addresses = addresses_ub; + od_ub.i = 0; + od_ub.max = num_chunks; + + /* Iterate over the chunks, storing the chunk addresses */ + if (H5Dchunk_iter(did, H5P_DEFAULT, ublock_iter_cb, &od) < 0) + TEST_ERROR; + if (H5Dchunk_iter(did_ub, H5P_DEFAULT, ublock_iter_cb, &od_ub) < 0) + TEST_ERROR; + + /* Compare the chunk addresses to ensure the userblock file takes the + * chunk's size into account. + */ + if (od.i != od_ub.i) + TEST_ERROR; + for (hsize_t i = 0; i < num_chunks; i++) + if (od.addresses[i] + UBLOCK_SIZE != od_ub.addresses[i]) + TEST_ERROR; + + /* Compare the raw chunk data */ + if ((fd = HDopen(NO_UBLOCK_FILE_NAME, O_RDONLY)) < 0) + TEST_ERROR; + if ((fd_ub = HDopen(UBLOCK_FILE_NAME, O_RDONLY)) < 0) + TEST_ERROR; + + for (hsize_t i = 0; i < num_chunks; i++) { + int data = -1; + int data_ub = -1; + + if (HDlseek(fd, (off_t)(od.addresses[i]), SEEK_SET) < 0) + TEST_ERROR; + if (HDlseek(fd_ub, (off_t)(od_ub.addresses[i]), SEEK_SET) < 0) + TEST_ERROR; + + if (HDread(fd, &data, sizeof(int)) != sizeof(int)) + TEST_ERROR; + if (HDread(fd_ub, &data_ub, sizeof(int)) != sizeof(int)) + TEST_ERROR; + + if (data != data_ub) + TEST_ERROR; + } + + HDclose(fd); + fd = -1; + HDclose(fd_ub); + fd_ub = -1; + + /* Close everything */ + if (H5Pclose(fcpl_id) < 0) + TEST_ERROR; + if (H5Dclose(did) < 0) + TEST_ERROR; + if (H5Dclose(did_ub) < 0) + TEST_ERROR; + if (H5Fclose(fid) < 0) + TEST_ERROR; + if (H5Fclose(fid_ub) < 0) + TEST_ERROR; + + free(addresses); + free(addresses_ub); + + if (H5Fdelete(UBLOCK_FILE_NAME, fapl_id) < 0) + TEST_ERROR; + if (H5Fdelete(NO_UBLOCK_FILE_NAME, fapl_id) < 0) + TEST_ERROR; + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(fcpl_id); + H5Dclose(did); + H5Dclose(did_ub); + H5Fclose(fid); + H5Fclose(fid_ub); + } + H5E_END_TRY + + if (fd >= 0) + HDclose(fd); + if (fd_ub >= 0) + HDclose(fd_ub); + + free(addresses); + free(addresses_ub); + + return 1; +} /* test_chunk_address_with_userblock() */ + /*------------------------------------------------------------------------- * Function: main * * Purpose: Tests functions related to chunk information * * Return: EXIT_SUCCESS/EXIT_FAILURE - * *------------------------------------------------------------------------- */ int @@ -2203,19 +2449,22 @@ main(void) /* Create a copy of file access property list */ if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) - TEST_ERROR; + goto error; /* Test basic operations on the chunk query functions */ - nerrors += test_basic_query(fapl) < 0 ? 1 : 0; + nerrors += test_basic_query(fapl); /* Tests getting chunk information of version 1.8 and prior */ - nerrors += test_get_chunk_info_highest_v18(fapl) < 0 ? 1 : 0; + nerrors += test_get_chunk_info_highest_v18(fapl); /* Tests getting chunk information of version 1.10 */ - nerrors += test_get_chunk_info_v110(fapl) < 0 ? 1 : 0; + nerrors += test_get_chunk_info_v110(fapl); /* Tests getting filter mask when compression filter is skipped */ - nerrors += test_flt_msk_with_skip_compress(fapl) < 0 ? 1 : 0; + nerrors += test_flt_msk_with_skip_compress(fapl); + + /* Test that chunk addresses are correct when files have a userblock */ + nerrors += test_chunk_address_with_userblock(fapl); if (nerrors) goto error; @@ -2227,14 +2476,13 @@ main(void) return EXIT_SUCCESS; error: + H5E_BEGIN_TRY + { + H5Pclose(fapl); + } + H5E_END_TRY + nerrors = MAX(1, nerrors); printf("***** %d QUERY CHUNK INFO TEST%s FAILED! *****\n", nerrors, 1 == nerrors ? "" : "S"); return EXIT_FAILURE; } - -/**************************************************************************** - Additional tests to be added: -- do the query when extending the dataset (shrink or expand) -- verify that invalid input parameters are handled properly - -****************************************************************************/ diff --git a/test/cmpd_dtransform.c b/test/cmpd_dtransform.c index 8fd3788a9ab..be05b008969 100644 --- a/test/cmpd_dtransform.c +++ b/test/cmpd_dtransform.c @@ -30,7 +30,8 @@ main(void) { hsize_t dima[] = {1}; hsize_t dims[] = {LENGTH}; - hid_t str_dtyp_id = H5I_INVALID_HID, att_dtyp_id = H5I_INVALID_HID; + hid_t str_dtyp_id = H5I_INVALID_HID; + hid_t att_dtyp_id = H5I_INVALID_HID; hid_t file_id = H5I_INVALID_HID; hid_t fspace_id = H5I_INVALID_HID; hid_t dset_id = H5I_INVALID_HID; @@ -43,54 +44,58 @@ main(void) att_t *atts = NULL; att_t *atts_res = NULL; + printf("Testing writing compound attributes followed by data w/ transform.\n"); + + TESTING("data types are reset properly"); + /* Compound datatype */ - if (NULL == (atts = malloc(sizeof(att_t)))) + if (NULL == (atts = (att_t *)calloc(1, sizeof(att_t)))) TEST_ERROR; strcpy(atts[0].name, "Name"); strcpy(atts[0].unit, "Unit"); /* String type */ if ((str_dtyp_id = H5Tcopy(H5T_C_S1)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tset_size(str_dtyp_id, 64) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Attribute type */ if ((att_dtyp_id = H5Tcreate(H5T_COMPOUND, sizeof(att_t))) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(att_dtyp_id, "NAME", HOFFSET(att_t, name), str_dtyp_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tinsert(att_dtyp_id, "UNIT", HOFFSET(att_t, unit), str_dtyp_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create file. */ if ((file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create file dataspace. */ if ((fspace_id = H5Screate_simple(1, dims, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create dataset. */ if ((dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Write the attribute (compound) to the dataset */ if ((att_dspc_id = H5Screate_simple(1, dima, NULL)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if ((att_attr_id = H5Acreate2(dset_id, "ATTRIBUTES", att_dtyp_id, att_dspc_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Awrite(att_attr_id, att_dtyp_id, atts) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Create dataset transfer property list */ if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Pset_data_transform(dxpl_id, expr) < 0) { printf("**** ERROR: H5Pset_data_transform (expression: %s) ****\n", expr); - FAIL_STACK_ERROR; + TEST_ERROR; } if (NULL == (data = malloc(LENGTH * sizeof(int)))) @@ -104,13 +109,13 @@ main(void) /* Write the data */ if (H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, data) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Read attribute */ if (NULL == (atts_res = malloc(sizeof(att_t)))) TEST_ERROR; if (H5Aread(att_attr_id, att_dtyp_id, atts_res) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify attribute */ if (strcmp(atts_res[0].name, atts[0].name) != 0) @@ -120,37 +125,40 @@ main(void) /* Read the data */ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; /* Verify data */ for (unsigned idx = 0; idx < LENGTH; idx++) if (data[idx] != data_res[idx]) TEST_ERROR; - free(atts); - free(atts_res); - free(data); - free(data_res); - /* Close all identifiers. */ if (H5Pclose(dxpl_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Aclose(att_attr_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(att_dspc_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Dclose(dset_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Sclose(fspace_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Fclose(file_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(att_dtyp_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Tclose(str_dtyp_id) < 0) - FAIL_STACK_ERROR; + TEST_ERROR; - return 0; + free(atts); + free(atts_res); + free(data); + free(data_res); + + HDremove(FILENAME); + + PASSED(); + return EXIT_SUCCESS; error: H5E_BEGIN_TRY @@ -166,14 +174,10 @@ main(void) } H5E_END_TRY - if (atts) - free(atts); - if (atts_res) - free(atts_res); - if (data) - free(data); - if (data_res) - free(data_res); + free(atts); + free(atts_res); + free(data); + free(data_res); - return 1; + return EXIT_FAILURE; } diff --git a/test/dt_arith.c b/test/dt_arith.c index 3c2189650dd..83d64bcef00 100644 --- a/test/dt_arith.c +++ b/test/dt_arith.c @@ -878,7 +878,7 @@ test_particular_fp_integer(void) /*------------------------------------------------------------------------- * Function: test_derived_flt * - * Purpose: Tests user-define and query functions of floating-point types. + * Purpose: Tests user-defined and query functions of floating-point types. * * Return: Success: 0 * @@ -903,7 +903,7 @@ test_derived_flt(void) char str[256]; /*message string */ unsigned int i, j; - TESTING("user-define and query functions of floating-point types"); + TESTING("user-defined and query functions of floating-point types"); /* Create File */ h5_fixname(FILENAME[0], H5P_DEFAULT, filename, sizeof filename); @@ -1324,7 +1324,7 @@ test_derived_flt(void) /*------------------------------------------------------------------------- * Function: test_derived_integer * - * Purpose: Tests user-define and query functions of integer types. + * Purpose: Tests user-defined and query functions of integer types. * * Return: Success: 0 * @@ -1347,7 +1347,7 @@ test_derived_integer(void) char str[256]; /*message string */ unsigned int i, j; - TESTING("user-define and query functions of integer types"); + TESTING("user-defined and query functions of integer types"); /* Create File */ h5_fixname(FILENAME[1], H5P_DEFAULT, filename, sizeof filename); @@ -5990,11 +5990,11 @@ main(void) /* Test H5Tcompiler_conv() for querying hard conversion. */ nerrors += (unsigned long)test_hard_query(); - /* Test user-define, query functions and software conversion + /* Test user-defined, query functions and software conversion * for user-defined floating-point types */ nerrors += (unsigned long)test_derived_flt(); - /* Test user-define, query functions and software conversion + /* Test user-defined, query functions and software conversion * for user-defined integer types */ nerrors += (unsigned long)test_derived_integer(); diff --git a/test/h5test.c b/test/h5test.c index fcba51fa2fc..b6fc4c86953 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -843,13 +843,13 @@ h5_get_vfd_fapl(hid_t fapl) } else if (!strcmp(tok, "multi")) { /* Multi-file driver, general case of the split driver */ - H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; - hid_t memb_fapl[H5FD_MEM_NTYPES]; - const char *memb_name[H5FD_MEM_NTYPES]; - char *sv[H5FD_MEM_NTYPES]; - haddr_t memb_addr[H5FD_MEM_NTYPES]; - H5FD_mem_t mt; - const int multi_memname_maxlen = 1024; + H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; + hid_t memb_fapl[H5FD_MEM_NTYPES]; + const char *memb_name[H5FD_MEM_NTYPES]; + char *sv[H5FD_MEM_NTYPES]; + haddr_t memb_addr[H5FD_MEM_NTYPES]; + H5FD_mem_t mt; + const size_t multi_memname_maxlen = 1024; memset(memb_map, 0, sizeof(memb_map)); memset(memb_fapl, 0, sizeof(memb_fapl)); diff --git a/test/testfiles/tmisc38a.h5 b/test/testfiles/tmisc38a.h5 new file mode 100644 index 00000000000..cf516f0a2ab Binary files /dev/null and b/test/testfiles/tmisc38a.h5 differ diff --git a/test/testfiles/tmisc38b.h5 b/test/testfiles/tmisc38b.h5 new file mode 100644 index 00000000000..7f58280ac8d Binary files /dev/null and b/test/testfiles/tmisc38b.h5 differ diff --git a/test/tmisc.c b/test/tmisc.c index e08d76cb7ac..a9d94a5ec97 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -337,9 +337,19 @@ typedef struct { See https://nvd.nist.gov/vuln/detail/CVE-2020-10812 */ #define CVE_2020_10812_FILENAME "cve_2020_10812.h5" -#define MISC38_FILE "type_conversion_path_table_issue.h5" -#define MISC39_FILE "set_est_link_info.h5" -#define MISC40_FILE "obj_props_intermediate.h5" +/* Definitions for misc. test #38 */ +#define MISC38A_FILE "tmisc38a.h5" +#define MISC38A_DSETNAME "Fletcher_float_data_be" +#define MISC38B_FILE "tmisc38b.h5" +#define MISC38B_DSETNAME "unusual_datatype" +#define MISC38C_FILE "tmisc38c.h5" +#define MISC38C_DSETNAME "dset_unusual_datatype" +#define MISC38C_TYPENAME "type_unusual_datatype" +#define MISC38C_ATTRNAME "attr_unusual_datatype" + +#define MISC39_FILE "type_conversion_path_table_issue.h5" +#define MISC40_FILE "set_est_link_info.h5" +#define MISC41_FILE "obj_props_intermediate.h5" /**************************************************************** ** @@ -3972,7 +3982,7 @@ test_misc21(void) /* Allocate space for the buffer */ buf = (char *)calloc(MISC21_SPACE_DIM0 * MISC21_SPACE_DIM1, 1); - CHECK(buf, NULL, "calloc"); + CHECK_PTR(buf, "calloc"); /* Create the file */ fid = H5Fcreate(MISC21_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); @@ -4025,6 +4035,7 @@ test_misc21(void) static void test_misc22(void) { + hid_t fapl; /* File access property list */ hid_t fid, sid, dcpl, dsid, dcpl2; char *buf; hsize_t dims[2] = {MISC22_SPACE_DIM0, MISC22_SPACE_DIM1}, @@ -4057,12 +4068,24 @@ test_misc22(void) /* Allocate space for the buffer */ buf = (char *)calloc(MISC22_SPACE_DIM0 * MISC22_SPACE_DIM1, 8); - CHECK(buf, NULL, "calloc"); + CHECK_PTR(buf, "calloc"); + + /* Create a file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, FAIL, "H5Pcreate"); + + /* Set property to allow unusual datatypes to be created */ + ret = H5Pset_relax_file_integrity_checks(fapl, H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS); + CHECK(ret, FAIL, "H5Pset_relax_file_integrity_checks"); /* Create the file */ - fid = H5Fcreate(MISC22_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + fid = H5Fcreate(MISC22_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(fid, FAIL, "H5Fcreate"); + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + /* Create the dataspace for the dataset */ sid = H5Screate_simple(MISC22_SPACE_RANK, dims, NULL); CHECK(sid, FAIL, "H5Screate_simple"); @@ -6264,6 +6287,223 @@ test_misc37(void) /**************************************************************** ** ** test_misc38(): +** Test for seg fault issue when opening dataset with corrupted +** object header. +** +****************************************************************/ +static void +test_misc38(void) +{ + const char *testfile = H5_get_srcdir_filename(MISC38A_FILE); /* Corrected test file name */ + const char *testfile2 = H5_get_srcdir_filename(MISC38B_FILE); /* Corrected test file name */ + bool driver_is_default_compatible; + hid_t fapl = H5I_INVALID_HID; /* File access property list */ + hid_t fid = H5I_INVALID_HID; /* File ID */ + hid_t did = H5I_INVALID_HID; /* Dataset ID */ + hid_t sid = H5I_INVALID_HID; /* Dataspace ID */ + hid_t tid = H5I_INVALID_HID; /* Datatype ID */ + hid_t gid = H5I_INVALID_HID; /* Group ID */ + hid_t aid = H5I_INVALID_HID; /* Attribute ID */ + size_t type_size; /* Size of dataset's datatype */ + uint64_t rfic_flags; /* Value of RFIC flags property for FAPL & file */ + herr_t ret; + + /* Output message about test being performed */ + MESSAGE(5, ("Fix for detecting numeric datatypes with unusually large numbers of unused bits")); + + ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible); + CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible"); + + if (!driver_is_default_compatible) { + printf("-- SKIPPED --\n"); + return; + } + + fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); + + /* This should fail due to the illegal datatype encoding in the corrupted + * object header. + * It should fail gracefully and not seg fault + */ + H5E_BEGIN_TRY + { + did = H5Dopen2(fid, MISC38A_DSETNAME, H5P_DEFAULT); + } + H5E_END_TRY + VERIFY(did, H5I_INVALID_HID, "H5Dopen2"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create a file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); + + /* Get property to allow unusual datatypes to be opened */ + rfic_flags = H5F_RFIC_ALL; + ret = H5Pget_relax_file_integrity_checks(fapl, &rfic_flags); + CHECK(ret, FAIL, "H5Pget_relax_file_integrity_checks"); + VERIFY(rfic_flags, 0, "H5Pget_relax_file_integrity_checks"); + + /* Set property to allow unusual datatypes to be opened */ + ret = H5Pset_relax_file_integrity_checks(fapl, H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS); + CHECK(ret, FAIL, "H5Pset_relax_file_integrity_checks"); + + /* Get property to allow unusual datatypes to be opened */ + rfic_flags = 0; + ret = H5Pget_relax_file_integrity_checks(fapl, &rfic_flags); + CHECK(ret, FAIL, "H5Pget_relax_file_integrity_checks"); + VERIFY(rfic_flags, H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS, "H5Pget_relax_file_integrity_checks"); + + /* Open valid file */ + fid = H5Fopen(testfile2, H5F_ACC_RDONLY, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fopen"); + + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Open dataset w/unusual datatype + * It should succeed and not return an error or seg fault + */ + did = H5Dopen2(fid, MISC38B_DSETNAME, H5P_DEFAULT); + CHECK(did, H5I_INVALID_HID, "H5Dopen2"); + + /* Get the dataset's datatype */ + tid = H5Dget_type(did); + CHECK(tid, H5I_INVALID_HID, "H5Dget_type"); + + type_size = H5Tget_size(tid); + CHECK(type_size, 0, "H5Tget_size"); + VERIFY(type_size, 1000, "H5Tget_size"); + + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + + /* Check that property is handled correctly */ + fapl = H5Fget_access_plist(fid); + CHECK(fapl, H5I_INVALID_HID, "H5Fget_access_plist"); + + /* Get property to allow unusual datatypes to be opened */ + rfic_flags = 0; + ret = H5Pget_relax_file_integrity_checks(fapl, &rfic_flags); + CHECK(ret, FAIL, "H5Pget_relax_file_integrity_checks"); + VERIFY(rfic_flags, H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS, "H5Pget_relax_file_integrity_checks"); + + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + /* Close file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + + /* Create objects with unusual datatypes and verify correct behavior */ + for (unsigned u = 0; u < 3; u++) { + /* Create a file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + CHECK(fapl, H5I_INVALID_HID, "H5Pcreate"); + + if (1 == u) { + /* Set property to allow unusual datatypes to be opened */ + ret = H5Pset_relax_file_integrity_checks(fapl, H5F_RFIC_UNUSUAL_NUM_UNUSED_NUMERIC_BITS); + CHECK(ret, FAIL, "H5Pset_relax_file_integrity_checks"); + } + else if (2 == u) { + /* Use a later version of the file format, with checksummed object headers */ + ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + CHECK(ret, FAIL, "H5Pset_libver_bounds"); + } + + fid = H5Fcreate(MISC38C_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); + + /* Close file access property list */ + ret = H5Pclose(fapl); + CHECK(ret, FAIL, "H5Pclose"); + + sid = H5Screate(H5S_SCALAR); + CHECK(sid, H5I_INVALID_HID, "H5Screate"); + + tid = H5Tcopy(H5T_NATIVE_INT); + CHECK(tid, H5I_INVALID_HID, "H5Tcopy"); + + /* Set type to have unusual size, for precision */ + ret = H5Tset_size(tid, 1000); + CHECK(ret, FAIL, "H5Tset_size"); + + /* Create a dataset with the unusual datatype */ + H5E_BEGIN_TRY + { + did = H5Dcreate2(fid, MISC38C_DSETNAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY + if (u > 0) { + CHECK(did, H5I_INVALID_HID, "H5Dcreate2"); + + ret = H5Dclose(did); + CHECK(ret, FAIL, "H5Dclose"); + } + else { + VERIFY(did, H5I_INVALID_HID, "H5Dcreate2"); + } + + gid = H5Gopen2(fid, "/", H5P_DEFAULT); + CHECK(gid, H5I_INVALID_HID, "H5Gopen2"); + + /* Create an attribute with the unusual datatype */ + H5E_BEGIN_TRY + { + aid = H5Acreate2(gid, MISC38C_ATTRNAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY + if (u > 0) { + CHECK(aid, H5I_INVALID_HID, "H5Acreate2"); + + ret = H5Aclose(aid); + CHECK(ret, FAIL, "H5Aclose"); + } + else { + VERIFY(aid, H5I_INVALID_HID, "H5Acreate2"); + } + + ret = H5Gclose(gid); + CHECK(ret, FAIL, "H5Gclose"); + + ret = H5Sclose(sid); + CHECK(ret, FAIL, "H5Sclose"); + + /* Create a committed datatype with the unusual datatype */ + H5E_BEGIN_TRY + { + ret = H5Tcommit2(fid, MISC38C_TYPENAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + } + H5E_END_TRY + if (u > 0) { + CHECK(ret, FAIL, "H5Tcommit2"); + } + else { + VERIFY(ret, FAIL, "H5Tcommit2"); + } + + if (tid != H5I_INVALID_HID) { + ret = H5Tclose(tid); + CHECK(ret, FAIL, "H5Tclose"); + } + + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); + } +} /* end test_misc38() */ + +/**************************************************************** +** +** test_misc39(): ** Test for issue where the type conversion path table cache ** would grow continuously when variable-length datatypes ** are involved due to file VOL object comparisons causing @@ -6271,7 +6511,7 @@ test_misc37(void) ** ****************************************************************/ static void -test_misc38(void) +test_misc39(void) { H5VL_object_t *file_vol_obj = NULL; const char *buf[] = {"attr_value"}; @@ -6309,7 +6549,7 @@ test_misc38(void) */ init_npaths = H5T__get_path_table_npaths(); - file_id = H5Fcreate(MISC38_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + file_id = H5Fcreate(MISC39_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file_id, H5I_INVALID_HID, "H5Fcreate"); /* Check if native VOL is being used */ @@ -6444,7 +6684,7 @@ test_misc38(void) CHECK_PTR(vlen_rbuf, "vlen varstr read buf allocation"); for (size_t i = 0; i < 10; i++) { - file_id = H5Fopen(MISC38_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); + file_id = H5Fopen(MISC39_FILE, H5F_ACC_RDONLY, H5P_DEFAULT); CHECK(file_id, H5I_INVALID_HID, "H5Fopen"); /* Retrieve file's VOL object field for further use */ @@ -6546,7 +6786,7 @@ test_misc38(void) /**************************************************************** ** -** test_misc39(): Ensure H5Pset_est_link_info() handles large +** test_misc40(): Ensure H5Pset_est_link_info() handles large ** values ** ** H5Pset_est_link_info() values can be set to large values, @@ -6560,7 +6800,7 @@ test_misc38(void) ** ****************************************************************/ static void -test_misc39(void) +test_misc40(void) { hid_t fid = H5I_INVALID_HID; /* File ID */ hid_t gid = H5I_INVALID_HID; /* Group ID */ @@ -6581,7 +6821,7 @@ test_misc39(void) CHECK(ret, FAIL, "H5Pset_libver_bounds"); /* Create the file */ - fid = H5Fcreate(MISC39_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + fid = H5Fcreate(MISC40_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); CHECK(fid, H5I_INVALID_HID, "H5Fcreate"); /* Compose group creation property list */ @@ -6639,16 +6879,16 @@ test_misc39(void) ret = H5Pclose(gcpl); CHECK(ret, FAIL, "H5Pclose"); -} /* end test_misc39() */ +} /* end test_misc40() */ /**************************************************************** ** -** test_misc40(): Test that object creation properties are propagated +** test_misc41(): Test that object creation properties are propagated ** to intermediate groups. ** ****************************************************************/ static void -test_misc40(void) +test_misc41(void) { hid_t lcpl = H5I_INVALID_HID; hid_t gcpl = H5I_INVALID_HID; @@ -6675,7 +6915,7 @@ test_misc40(void) status = H5Pset_create_intermediate_group(lcpl, 1); CHECK(status, FAIL, "H5Pset_create_intermediate_group"); - fid = H5Fcreate(MISC40_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + fid = H5Fcreate(MISC41_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(fid, FAIL, "H5Fcreate"); /* @@ -6856,7 +7096,7 @@ test_misc40(void) status = H5Pclose(lcpl); CHECK(status, FAIL, "H5Pclose"); -} /* end test_misc40() */ +} /* end test_misc41() */ /**************************************************************** ** @@ -6930,9 +7170,10 @@ test_misc(void) test_misc35(); /* Test behavior of free-list & allocation statistics API calls */ test_misc36(); /* Exercise H5atclose and H5is_library_terminating */ test_misc37(); /* Test for seg fault failure at file close */ - test_misc38(); /* Test for type conversion path table issue */ - test_misc39(); /* Ensure H5Pset_est_link_info() handles large values */ - test_misc40(); /* Test object properties propagated to intermediate groups */ + test_misc38(); /* Test for seg fault when opening corrupted object header */ + test_misc39(); /* Test for type conversion path table issue */ + test_misc40(); /* Ensure H5Pset_est_link_info() handles large values */ + test_misc41(); /* Test object properties propagated to intermediate groups */ } /* test_misc() */ @@ -6988,9 +7229,10 @@ cleanup_misc(void) #ifndef H5_NO_DEPRECATED_SYMBOLS H5Fdelete(MISC31_FILE, H5P_DEFAULT); #endif /* H5_NO_DEPRECATED_SYMBOLS */ - H5Fdelete(MISC38_FILE, H5P_DEFAULT); + H5Fdelete(MISC38C_FILE, H5P_DEFAULT); H5Fdelete(MISC39_FILE, H5P_DEFAULT); H5Fdelete(MISC40_FILE, H5P_DEFAULT); + H5Fdelete(MISC41_FILE, H5P_DEFAULT); } H5E_END_TRY } /* end cleanup_misc() */ diff --git a/tools/lib/h5tools_str.c b/tools/lib/h5tools_str.c index 47893ec9390..44e9e681371 100644 --- a/tools/lib/h5tools_str.c +++ b/tools/lib/h5tools_str.c @@ -1207,54 +1207,58 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai /* * Object references -- show the type and OID of the referenced object. */ - H5O_info2_t oi; - char *obj_tok_str = NULL; - H5TOOLS_DEBUG("H5T_REFERENCE:H5T_STD_REF_OBJ"); obj = H5Rdereference2(container, H5P_DEFAULT, H5R_OBJECT, vp); - H5Oget_info3(obj, &oi, H5O_INFO_BASIC); + if (obj >= 0) { + H5O_info2_t oi; + char *obj_tok_str = NULL; + + H5Oget_info3(obj, &oi, H5O_INFO_BASIC); + + /* Print object type and close object */ + switch (oi.type) { + case H5O_TYPE_GROUP: + h5tools_str_append(str, H5_TOOLS_GROUP); + break; + + case H5O_TYPE_DATASET: + h5tools_str_append(str, H5_TOOLS_DATASET); + break; + + case H5O_TYPE_NAMED_DATATYPE: + h5tools_str_append(str, H5_TOOLS_DATATYPE); + break; + + case H5O_TYPE_MAP: + h5tools_str_append(str, H5_TOOLS_MAP); + break; + + case H5O_TYPE_UNKNOWN: + case H5O_TYPE_NTYPES: + default: + h5tools_str_append(str, "%u-", (unsigned)oi.type); + break; + } - /* Print object type and close object */ - switch (oi.type) { - case H5O_TYPE_GROUP: - h5tools_str_append(str, H5_TOOLS_GROUP); - break; + /* Print OID */ + H5Otoken_to_str(obj, &oi.token, &obj_tok_str); - case H5O_TYPE_DATASET: - h5tools_str_append(str, H5_TOOLS_DATASET); - break; + H5Oclose(obj); - case H5O_TYPE_NAMED_DATATYPE: - h5tools_str_append(str, H5_TOOLS_DATATYPE); - break; + if (info->obj_hidefileno) + h5tools_str_append(str, info->obj_format, obj_tok_str); + else + h5tools_str_append(str, info->obj_format, oi.fileno, obj_tok_str); - case H5O_TYPE_MAP: - h5tools_str_append(str, H5_TOOLS_MAP); - break; + if (obj_tok_str) { + H5free_memory(obj_tok_str); + obj_tok_str = NULL; + } - case H5O_TYPE_UNKNOWN: - case H5O_TYPE_NTYPES: - default: - h5tools_str_append(str, "%u-", (unsigned)oi.type); - break; + h5tools_str_sprint_old_reference(str, container, H5R_OBJECT, vp); } - - /* Print OID */ - H5Otoken_to_str(obj, &oi.token, &obj_tok_str); - - H5Oclose(obj); - - if (info->obj_hidefileno) - h5tools_str_append(str, info->obj_format, obj_tok_str); else - h5tools_str_append(str, info->obj_format, oi.fileno, obj_tok_str); - - if (obj_tok_str) { - H5free_memory(obj_tok_str); - obj_tok_str = NULL; - } - - h5tools_str_sprint_old_reference(str, container, H5R_OBJECT, vp); + h5tools_str_append(str, ""); } /* end else if (H5Tequal(type, H5T_STD_REF_OBJ)) */ } break; diff --git a/tools/test/h5dump/CMakeTests.cmake b/tools/test/h5dump/CMakeTests.cmake index 3a863bdee72..2369f63c746 100644 --- a/tools/test/h5dump/CMakeTests.cmake +++ b/tools/test/h5dump/CMakeTests.cmake @@ -1285,7 +1285,7 @@ ADD_H5_COMP_TEST (tfletcher32 0 0 --enable-error-stack -H -p -d fletcher32 tfilters.h5) # nbit - ADD_H5_COMP_TEST (tnbit 0 10 --enable-error-stack -H -p -d nbit tfilters.h5) + ADD_H5_COMP_TEST (tnbit 0 1 --enable-error-stack -H -p -d nbit tfilters.h5) # scaleoffset ADD_H5_COMP_TEST (tscaleoffset 0 4 --enable-error-stack -H -p -d scaleoffset tfilters.h5) diff --git a/tools/test/h5dump/expected/tnbit.ddl b/tools/test/h5dump/expected/tnbit.ddl index 35c111fb5b3..3801c1bf655 100644 --- a/tools/test/h5dump/expected/tnbit.ddl +++ b/tools/test/h5dump/expected/tnbit.ddl @@ -1,10 +1,10 @@ HDF5 "tfilters.h5" { DATASET "nbit" { - DATATYPE 32-bit little-endian integer 3-bit precision + DATATYPE 32-bit little-endian integer 17-bit precision DATASPACE SIMPLE { ( 20, 10 ) / ( 20, 10 ) } STORAGE_LAYOUT { CHUNKED ( 10, 5 ) - SIZE XXXX (10.XXX:1 COMPRESSION) + SIZE XXXX (1.XXX:1 COMPRESSION) } FILTERS { COMPRESSION NBIT diff --git a/tools/test/h5dump/expected/treadintfilter.ddl b/tools/test/h5dump/expected/treadintfilter.ddl index fbad3f67369..7a670a2a25d 100644 --- a/tools/test/h5dump/expected/treadintfilter.ddl +++ b/tools/test/h5dump/expected/treadintfilter.ddl @@ -78,29 +78,29 @@ DATASET "fletcher32" { } } DATASET "nbit" { - DATATYPE 32-bit little-endian integer 3-bit precision + DATATYPE 32-bit little-endian integer 17-bit precision DATASPACE SIMPLE { ( 20, 10 ) / ( 20, 10 ) } DATA { - (0,0): 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, - (1,0): 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, - (2,0): -4, -3, -2, -1, 0, 1, 2, 3, -4, -3, - (3,0): -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, - (4,0): 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, - (5,0): 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, - (6,0): -4, -3, -2, -1, 0, 1, 2, 3, -4, -3, - (7,0): -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, - (8,0): 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, - (9,0): 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, - (10,0): -4, -3, -2, -1, 0, 1, 2, 3, -4, -3, - (11,0): -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, - (12,0): 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, - (13,0): 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, - (14,0): -4, -3, -2, -1, 0, 1, 2, 3, -4, -3, - (15,0): -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, - (16,0): 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, - (17,0): 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, - (18,0): -4, -3, -2, -1, 0, 1, 2, 3, -4, -3, - (19,0): -2, -1, 0, 1, 2, 3, -4, -3, -2, -1 + (0,0): 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + (1,0): 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + (2,0): 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + (3,0): 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + (4,0): 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + (5,0): 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + (6,0): 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + (7,0): 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + (8,0): 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + (9,0): 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + (10,0): 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + (11,0): 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + (12,0): 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + (13,0): 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + (14,0): 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + (15,0): 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + (16,0): 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + (17,0): 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + (18,0): 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + (19,0): 190, 191, 192, 193, 194, 195, 196, 197, 198, 199 } } DATASET "scaleoffset" { diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c index bf0bfd4a251..c7d5fe66c28 100644 --- a/tools/test/h5dump/h5dumpgentest.c +++ b/tools/test/h5dump/h5dumpgentest.c @@ -5615,7 +5615,7 @@ gent_filters(void) assert(ret >= 0); tid = H5Tcopy(H5T_NATIVE_INT); - H5Tset_precision(tid, H5Tget_size(tid) - 1); + H5Tset_precision(tid, (H5Tget_size(tid) * 4) + 1); ret = make_dset(fid, "nbit", sid, tid, dcpl, buf1); assert(ret >= 0); diff --git a/tools/test/h5dump/testfiles/tfilters.h5 b/tools/test/h5dump/testfiles/tfilters.h5 index 7c33e55aae1..23d68a9b72b 100644 Binary files a/tools/test/h5dump/testfiles/tfilters.h5 and b/tools/test/h5dump/testfiles/tfilters.h5 differ diff --git a/tools/test/h5dump/testh5dump.sh.in b/tools/test/h5dump/testh5dump.sh.in index daba93b3509..0964d7dda6d 100644 --- a/tools/test/h5dump/testh5dump.sh.in +++ b/tools/test/h5dump/testh5dump.sh.in @@ -1430,7 +1430,7 @@ TOOLTEST tshuffle.ddl --enable-error-stack -H -p -d shuffle tfilters.h5 # fletcher32 TOOLTESTC 0 tfletcher32.ddl --enable-error-stack -H -p -d fletcher32 tfilters.h5 # nbit -TOOLTESTC 10 tnbit.ddl --enable-error-stack -H -p -d nbit tfilters.h5 +TOOLTESTC 1 tnbit.ddl --enable-error-stack -H -p -d nbit tfilters.h5 # scaleoffset TOOLTESTC 4 tscaleoffset.ddl --enable-error-stack -H -p -d scaleoffset tfilters.h5 # all diff --git a/tools/test/h5stat/expected/h5stat_filters-F.ddl b/tools/test/h5stat/expected/h5stat_filters-F.ddl index d44445bb2e3..065d0c97477 100644 --- a/tools/test/h5stat/expected/h5stat_filters-F.ddl +++ b/tools/test/h5stat/expected/h5stat_filters-F.ddl @@ -4,12 +4,12 @@ File space information for file metadata (in bytes): Superblock extension: 0 User block: 0 Object headers: (total/unused) - Groups: 48/8 - Datasets(exclude compact data): 4136/1344 + Groups: 40/0 + Datasets(exclude compact data): 4128/1088 Datatypes: 80/0 Groups: B-tree/List: 1200 - Heap: 288 + Heap: 384 Attributes: B-tree/List: 0 Heap: 0 diff --git a/tools/test/h5stat/expected/h5stat_filters-UD.ddl b/tools/test/h5stat/expected/h5stat_filters-UD.ddl index 4efafd13c40..9f6335aaed6 100644 --- a/tools/test/h5stat/expected/h5stat_filters-UD.ddl +++ b/tools/test/h5stat/expected/h5stat_filters-UD.ddl @@ -1,5 +1,5 @@ Filename: h5stat_filters.h5 File space information for datasets' metadata (in bytes): - Object headers (total/unused): 4136/1344 + Object headers (total/unused): 4128/1088 Index for Chunked datasets: 31392 Heap: 72 diff --git a/tools/test/h5stat/expected/h5stat_filters-d.ddl b/tools/test/h5stat/expected/h5stat_filters-d.ddl index 6e6dd6140dd..eee7e1845d4 100644 --- a/tools/test/h5stat/expected/h5stat_filters-d.ddl +++ b/tools/test/h5stat/expected/h5stat_filters-d.ddl @@ -12,7 +12,7 @@ Dataset dimension information: # of datasets with dimension size 100 - 999: 1 Total # of datasets: 1 Dataset storage information: - Total raw data size: 8659 + Total raw data size: 9046 Total external raw data size: 400 Dataset layout information: Dataset layout counts[COMPACT]: 1 diff --git a/tools/test/h5stat/expected/h5stat_filters-dT.ddl b/tools/test/h5stat/expected/h5stat_filters-dT.ddl index b14ca9f9745..e513b3a5f28 100644 --- a/tools/test/h5stat/expected/h5stat_filters-dT.ddl +++ b/tools/test/h5stat/expected/h5stat_filters-dT.ddl @@ -12,7 +12,7 @@ Dataset dimension information: # of datasets with dimension size 100 - 999: 1 Total # of datasets: 1 Dataset storage information: - Total raw data size: 8659 + Total raw data size: 9046 Total external raw data size: 400 Dataset layout information: Dataset layout counts[COMPACT]: 1 diff --git a/tools/test/h5stat/expected/h5stat_filters.ddl b/tools/test/h5stat/expected/h5stat_filters.ddl index 9f9e146f08f..7383f0b31ff 100644 --- a/tools/test/h5stat/expected/h5stat_filters.ddl +++ b/tools/test/h5stat/expected/h5stat_filters.ddl @@ -12,12 +12,12 @@ File space information for file metadata (in bytes): Superblock extension: 0 User block: 0 Object headers: (total/unused) - Groups: 48/8 - Datasets(exclude compact data): 4136/1344 + Groups: 40/0 + Datasets(exclude compact data): 4128/1088 Datatypes: 80/0 Groups: B-tree/List: 1200 - Heap: 288 + Heap: 384 Attributes: B-tree/List: 0 Heap: 0 @@ -50,7 +50,7 @@ Dataset dimension information: # of datasets with dimension size 100 - 999: 1 Total # of datasets: 1 Dataset storage information: - Total raw data size: 8659 + Total raw data size: 9046 Total external raw data size: 400 Dataset layout information: Dataset layout counts[COMPACT]: 1 @@ -91,9 +91,9 @@ Free-space section bins: File space management strategy: H5F_FSPACE_STRATEGY_FSM_AGGR File space page size: 4096 bytes Summary of file space information: - File metadata: 37312 bytes - Raw data: 8659 bytes + File metadata: 37392 bytes + Raw data: 9046 bytes Amount/Percent of tracked free space: 0 bytes/0.0% - Unaccounted space: 301 bytes -Total space: 46272 bytes + Unaccounted space: 258 bytes +Total space: 46696 bytes External raw data: 400 bytes diff --git a/tools/test/h5stat/testfiles/h5stat_filters.h5 b/tools/test/h5stat/testfiles/h5stat_filters.h5 index 5b5f4bb7a68..23d68a9b72b 100644 Binary files a/tools/test/h5stat/testfiles/h5stat_filters.h5 and b/tools/test/h5stat/testfiles/h5stat_filters.h5 differ