diff --git a/.github/workflows/aocc-auto.yml b/.github/workflows/aocc-auto.yml index a1927eb0403..8b33173db53 100644 --- a/.github/workflows/aocc-auto.yml +++ b/.github/workflows/aocc-auto.yml @@ -74,29 +74,38 @@ jobs: --enable-build-mode=${{ inputs.build_mode }} \ --enable-shared \ --enable-parallel \ + --enable-subfiling-vfd \ LDFLAGS="-L/home/runner/work/hdf5/hdf5/aocc-compiler-4.2.0/lib \ -L/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/lib" - name: Autotools Build shell: bash - env: - NPROCS: 2 run: | export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH make -j3 working-directory: ${{ runner.workspace }}/build + # ph5diff tests are in the tools/tests directory so they will get run + # here, so leave NPROCS set here as well - name: Autotools Run Tests env: NPROCS: 2 run: | export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH - make check -j + cd test && make check -j2 && cd .. + cd tools && make check -j2 && cd .. + cd hl && make check -j2 && cd .. working-directory: ${{ runner.workspace }}/build - - name: Autotools Install + - name: Autotools Run Parallel Tests env: NPROCS: 2 + run: | + export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH + cd testpar && make check && cd .. + working-directory: ${{ runner.workspace }}/build + + - name: Autotools Install run: | export PATH=/home/runner/work/hdf5/hdf5/openmpi-4.1.6-install/bin:/usr/local/bin:$PATH make install diff --git a/.github/workflows/aocc-cmake.yml b/.github/workflows/aocc-cmake.yml index 71966caf41e..d4cff890e63 100644 --- a/.github/workflows/aocc-cmake.yml +++ b/.github/workflows/aocc-cmake.yml @@ -71,6 +71,7 @@ jobs: -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF \ -DHDF5_ENABLE_PARALLEL:BOOL=ON \ + -DHDF5_ENABLE_SUBFILING_VFD:BOOL=ON \ -DHDF5_BUILD_CPP_LIB:BOOL=OFF \ -DLIBAEC_USE_LOCALCONTENT=OFF \ -DZLIB_USE_LOCALCONTENT=OFF \ @@ -89,5 +90,11 @@ jobs: - name: CMake Run Tests shell: bash run: | - ctest . --parallel 2 -C ${{ inputs.build_mode }} -V + ctest . -E MPI_TEST --parallel 2 -C ${{ inputs.build_mode }} -V + working-directory: ${{ runner.workspace }}/build + + - name: CMake Run Parallel Tests + shell: bash + run: | + ctest . -R MPI_TEST -C ${{ inputs.build_mode }} -V working-directory: ${{ runner.workspace }}/build diff --git a/.github/workflows/autotools.yml b/.github/workflows/autotools.yml index a430dc6b51c..a202076005a 100644 --- a/.github/workflows/autotools.yml +++ b/.github/workflows/autotools.yml @@ -100,6 +100,12 @@ jobs: name: "Autotools TestExpress Workflows" uses: ./.github/workflows/testxpr-auto.yml + call-release-auto-julia: + name: "Autotools Julia Workflows" + uses: ./.github/workflows/julia-auto.yml + with: + build_mode: "production" + # workflow-msys2-autotools: # name: "CMake msys2 Workflows" # uses: ./.github/workflows/msys2-auto.yml diff --git a/.github/workflows/clang-format-check.yml b/.github/workflows/clang-format-check.yml index c4d68a8f004..8fd5b5fb8f7 100644 --- a/.github/workflows/clang-format-check.yml +++ b/.github/workflows/clang-format-check.yml @@ -1,8 +1,10 @@ name: clang-format Check on: pull_request: + permissions: contents: read + jobs: formatting-check: name: Formatting Check @@ -10,8 +12,9 @@ jobs: if: "!contains(github.event.head_commit.message, 'skip-ci')" steps: - uses: actions/checkout@v4.1.7 + - name: Run clang-format style check for C and Java code - uses: DoozyX/clang-format-lint-action@v0.17 + uses: DoozyX/clang-format-lint-action@v0.18.2 with: source: '.' extensions: 'c,h,cpp,hpp,java' diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index 2ce9f6e9dad..a99d32d38ad 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -11,8 +11,10 @@ name: clang-format Commit Changes on: workflow_dispatch: push: + permissions: contents: read + jobs: formatting-check: name: Commit Format Changes @@ -21,9 +23,10 @@ jobs: permissions: contents: write # In order to allow EndBug/add-and-commit to commit changes steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@v4.1.7 + - name: Fix C and Java formatting issues detected by clang-format - uses: DoozyX/clang-format-lint-action@d3c7f85989e3b6416265a0d12f8b4a8aa8b0c4ff # v0.13 + uses: DoozyX/clang-format-lint-action@v0.18.2 with: source: '.' extensions: 'c,h,cpp,hpp,java' @@ -31,6 +34,7 @@ jobs: inplace: True style: file exclude: './config ./hl/src/H5LTanalyze.c ./hl/src/H5LTparse.c ./hl/src/H5LTparse.h ./src/H5Epubgen.h ./src/H5Einit.h ./src/H5Eterm.h ./src/H5Edefin.h ./src/H5version.h ./src/H5overflow.h' + - uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # v9.1.4 with: author_name: github-actions diff --git a/.github/workflows/cmake-bintest.yml b/.github/workflows/cmake-bintest.yml index 73d66f2414d..379db3a2ca5 100644 --- a/.github/workflows/cmake-bintest.yml +++ b/.github/workflows/cmake-bintest.yml @@ -161,7 +161,7 @@ jobs: - name: Get published binary (MacOS_latest) uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: tgz-osx-${{ inputs.build_mode }}-binary + name: tgz-macos14_clang-${{ inputs.build_mode }}-binary path: ${{ github.workspace }} - name: Uncompress hdf5 binary (MacOS_latest) @@ -192,7 +192,7 @@ jobs: id: setup-fortran with: compiler: gcc - version: 12 + version: 14 - name: Run ctest (MacOS_latest) id: run-ctest @@ -201,6 +201,6 @@ jobs: HDF5_PLUGIN_PATH: ${{ steps.set-hdf5lib-name.outputs.HDF5_PLUGIN_PATH }} run: | cd "${{ steps.set-hdf5lib-name.outputs.HDF5_ROOT }}/share/HDF5Examples" - cmake --workflow --preset=ci-StdShar-OSX-Clang --fresh + cmake --workflow --preset=ci-StdShar-MACOS-Clang --fresh shell: bash diff --git a/.github/workflows/cmake-ctest.yml b/.github/workflows/cmake-ctest.yml index 04dc2b664fc..b927b3ce403 100644 --- a/.github/workflows/cmake-ctest.yml +++ b/.github/workflows/cmake-ctest.yml @@ -23,6 +23,12 @@ on: required: true default: snapshots secrets: + APPLE_CERTS_BASE64: + required: true + APPLE_CERTS_BASE64_PASSWD: + required: true + KEYCHAIN_PASSWD: + required: true AZURE_TENANT_ID: required: true AZURE_CLIENT_ID: @@ -40,11 +46,34 @@ permissions: contents: read jobs: + check-secret: + name: Check Secrets exists + runs-on: ubuntu-latest + outputs: + sign-state: ${{ steps.set-signing-state.outputs.BINSIGN }} + steps: + - name: Identify Signing Status + id: set-signing-state + env: + signing_secret: ${{ secrets.AZURE_ENDPOINT }} + run: | + if [[ '${{ env.signing_secret }}' == '' ]] + then + SIGN_VAL=$(echo 'notexists') + else + SIGN_VAL=$(echo 'exists') + fi + echo "BINSIGN=$SIGN_VAL" >> $GITHUB_OUTPUT + shell: bash + + - run: echo "signing is ${{ steps.set-signing-state.outputs.BINSIGN }}." + build_and_test_win: # Windows w/ MSVC + CMake # name: "Windows MSVC CTest" runs-on: windows-latest + needs: [check-secret] steps: - name: Install Dependencies (Windows) run: choco install ninja @@ -57,20 +86,6 @@ jobs: - name: Enable Developer Command Prompt uses: ilammy/msvc-dev-cmd@v1.13.0 - - name: Check Secrets exists - id: set-signing-state - env: - super_secret: ${{ secrets.AZURE_ENDPOINT }} - run: | - if [[ '${{ env.super_secret }}' == '' ]] - then - SIGN_VAL=$(echo "false") - else - SIGN_VAL=$(echo "true") - fi - echo "BINSIGN=$SIGN_VAL" >> $GITHUB_OUTPUT - shell: bash - - name: Set file base name (Windows) id: set-file-base run: | @@ -107,7 +122,27 @@ jobs: run: 7z x ${{ steps.set-file-base.outputs.FILE_BASE }}.zip shell: bash + - name: Install TrustedSigning (Windows) + run: | + Invoke-WebRequest -Uri https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile .\nuget.exe + .\nuget.exe install Microsoft.Windows.SDK.BuildTools -Version 10.0.22621.3233 -x + .\nuget.exe install Microsoft.Trusted.Signing.Client -Version 1.0.53 -x + shell: pwsh + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + + - name: create-json + id: create-json + uses: jsdaniell/create-json@v1.2.3 + with: + name: "credentials.json" + dir: '${{ steps.set-file-base.outputs.SOURCE_BASE }}' + json: '{"Endpoint": "${{ secrets.AZURE_ENDPOINT }}","CodeSigningAccountName": "${{ secrets.AZURE_CODE_SIGNING_NAME }}","CertificateProfileName": "${{ secrets.AZURE_CERT_PROFILE_NAME }}"}' + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + - name: Run ctest (Windows) + env: + BINSIGN: ${{ needs.check-secret.outputs.sign-state }} + SIGNTOOLDIR: ${{ github.workspace }}/Microsoft.Windows.SDK.BuildTools/bin/10.0.22621.0/x64 run: | cd "${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}" cmake --workflow --preset=${{ inputs.preset_name }}-MSVC --fresh @@ -127,7 +162,7 @@ jobs: file-digest: SHA256 timestamp-rfc3161: http://timestamp.acs.microsoft.com timestamp-digest: SHA256 - if: ${{ steps.set-signing-state.BINSIGN == 'true' }} + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} - name: Publish binary (Windows) id: publish-ctest-binary @@ -146,13 +181,7 @@ jobs: id: publish-ctest-msi-binary run: | mkdir "${{ runner.workspace }}/buildmsi" - mkdir "${{ runner.workspace }}/buildmsi/hdf5" - Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING -Destination ${{ runner.workspace }}/buildmsi/hdf5/ - Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING_LBNL_HDF5 -Destination ${{ runner.workspace }}/buildmsi/hdf5/ - Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-MSVC/README.md -Destination ${{ runner.workspace }}/buildmsi/hdf5/ - Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-MSVC/* -Destination ${{ runner.workspace }}/buildmsi/hdf5/ -Include *.msi - cd "${{ runner.workspace }}/buildmsi" - 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip hdf5 + Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-MSVC/* -Destination ${{ runner.workspace }}/buildmsi/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi -Include *.msi shell: pwsh - name: List files in the space (Windows) @@ -173,7 +202,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: msi-vs2022_cl-binary - path: ${{ runner.workspace }}/buildmsi/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip + path: ${{ runner.workspace }}/buildmsi/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` build_and_test_linux: @@ -181,6 +210,7 @@ jobs: # name: "Ubuntu gcc CMake" runs-on: ubuntu-latest + needs: [check-secret] steps: - name: Install CMake Dependencies (Linux) run: | @@ -243,26 +273,14 @@ jobs: id: publish-ctest-deb-binary run: | mkdir "${{ runner.workspace }}/builddeb" - mkdir "${{ runner.workspace }}/builddeb/hdf5" - cp ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING ${{ runner.workspace }}/builddeb/hdf5 - cp ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING_LBNL_HDF5 ${{ runner.workspace }}/builddeb/hdf5 - cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-GNUC/README.md ${{ runner.workspace }}/builddeb/hdf5 - cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-GNUC/*.deb ${{ runner.workspace }}/builddeb/hdf5 - cd "${{ runner.workspace }}/builddeb" - tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb.tar.gz hdf5 + cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-GNUC/*.deb ${{ runner.workspace }}/builddeb/${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb shell: bash - name: Publish rpm binary (Linux) id: publish-ctest-rpm-binary run: | mkdir "${{ runner.workspace }}/buildrpm" - mkdir "${{ runner.workspace }}/buildrpm/hdf5" - cp ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING ${{ runner.workspace }}/buildrpm/hdf5 - cp ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING_LBNL_HDF5 ${{ runner.workspace }}/buildrpm/hdf5 - cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-GNUC/README.md ${{ runner.workspace }}/buildrpm/hdf5 - cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-GNUC/*.rpm ${{ runner.workspace }}/buildrpm/hdf5 - cd "${{ runner.workspace }}/buildrpm" - tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz hdf5 + cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-GNUC/*.rpm ${{ runner.workspace }}/buildrpm/${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm shell: bash - name: List files in the space (Linux) @@ -282,14 +300,14 @@ jobs: uses: actions/upload-artifact@v4 with: name: deb-ubuntu-2204_gcc-binary - path: ${{ runner.workspace }}/builddeb/${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb.tar.gz + path: ${{ runner.workspace }}/builddeb/${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` - name: Save published binary rpm (Linux) uses: actions/upload-artifact@v4 with: name: rpm-ubuntu-2204_gcc-binary - path: ${{ runner.workspace }}/buildrpm/${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz + path: ${{ runner.workspace }}/buildrpm/${{ steps.set-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` # Save doxygen files created by ctest script @@ -305,6 +323,7 @@ jobs: # name: "MacOS Clang CMake" runs-on: macos-latest + needs: [check-secret] steps: - name: Install Dependencies (MacOS_latest) run: brew install ninja @@ -314,6 +333,28 @@ jobs: with: version: "1.9.7" + - name: Install the Apple certificate and provisioning profile + shell: bash + env: + BUILD_CERTIFICATE_BASE64: ${{ secrets.APPLE_CERTS_BASE64 }} + P12_PASSWORD: ${{ secrets.APPLE_CERTS_BASE64_PASSWD }} + KEYCHAIN_PASSWD: ${{ secrets.KEYCHAIN_PASSWD }} + run: | + # create variables + CERTIFICATE_PATH=$RUNNER_TEMP/build_certificate.p12 + KEYCHAIN_FILE=${{ vars.KEYCHAIN_NAME }}.keychain + # import certificate from secrets + echo $BUILD_CERTIFICATE_BASE64 | base64 --decode > $CERTIFICATE_PATH + security -v create-keychain -p $KEYCHAIN_PASSWD $KEYCHAIN_FILE + security -v list-keychain -d user -s $KEYCHAIN_FILE + security -v list-keychains + security -v set-keychain-settings -lut 21600 $KEYCHAIN_FILE + security -v unlock-keychain -p $KEYCHAIN_PASSWD $KEYCHAIN_FILE + # import certificate to keychain + security -v import $CERTIFICATE_PATH -P $P12_PASSWORD -A -t cert -f pkcs12 -k $KEYCHAIN_FILE + security -v set-key-partition-list -S apple-tool:,codesign:,apple: -k $KEYCHAIN_PASSWD $KEYCHAIN_FILE + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + - name: Set up JDK 19 uses: actions/setup-java@v4 with: @@ -354,15 +395,104 @@ jobs: id: setup-fortran with: compiler: gcc - version: 12 + version: 14 - name: Run ctest (MacOS_latest) id: run-ctest + env: + BINSIGN: ${{ needs.check-secret.outputs.sign-state }} + SIGNER: ${{ vars.SIGNER }} run: | cd "${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}" - cmake --workflow --preset=${{ inputs.preset_name }}-OSX-Clang --fresh + cmake --workflow --preset=${{ inputs.preset_name }}-macos-Clang --fresh + shell: bash + + - name: Sign dmg (MacOS_latest) + id: sign-dmg + env: + KEYCHAIN_PASSWD: ${{ secrets.KEYCHAIN_PASSWD }} + KEYCHAIN_NAME: ${{ vars.KEYCHAIN_NAME }} + SIGNER: ${{ vars.SIGNER }} + NOTARY_USER: ${{ vars.NOTARY_USER }} + NOTARY_KEY: ${{ vars.NOTARY_KEY }} + run: | + /usr/bin/codesign --force --timestamp --options runtime --entitlements ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/config/cmake/distribution.entitlements --verbose=4 --strict --sign ${{ env.SIGNER }} --deep ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-macos-Clang/*.dmg + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} shell: bash + - name: Check dmg timestamp (MacOS_latest) + run: | + /usr/bin/codesign -dvv ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-macos-Clang/*.dmg + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + shell: bash + + - name: Verify dmg (MacOS_latest) + run: | + /usr/bin/hdiutil verify ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-macos-Clang/*.dmg + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + shell: bash + + - name: Notarize dmg (MacOS_latest) + id: notarize-dmg + env: + KEYCHAIN_PASSWD: ${{ secrets.KEYCHAIN_PASSWD }} + KEYCHAIN_NAME: ${{ vars.KEYCHAIN_NAME }} + SIGNER: ${{ vars.SIGNER }} + NOTARY_USER: ${{ vars.NOTARY_USER }} + NOTARY_KEY: ${{ vars.NOTARY_KEY }} + run: | + jsonout=$(/usr/bin/xcrun notarytool submit --wait --output-format json --apple-id ${{ env.NOTARY_USER }} --password ${{ env.NOTARY_KEY }} --team-id ${{ env.SIGNER }} ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-macos-Clang/*.dmg) + echo "JSONOUT=$jsonout" >> $GITHUB_OUTPUT + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + shell: bash + + - name: Get ID token (MacOS_latest) + id: get-id-token + run: | + echo "notary result is ${{ fromJson(steps.notarize-dmg.outputs.JSONOUT) }}" + token=${{ fromJson(steps.notarize-dmg.outputs.JSONOUT).id }} + echo "ID_TOKEN=$token" >> "$GITHUB_OUTPUT" + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + shell: bash + + - name: post notary check (MacOS_latest) + id: post-notary + env: + KEYCHAIN_PASSWD: ${{ secrets.KEYCHAIN_PASSWD }} + KEYCHAIN_NAME: ${{ vars.KEYCHAIN_NAME }} + SIGNER: ${{ vars.SIGNER }} + NOTARY_USER: ${{ vars.NOTARY_USER }} + NOTARY_KEY: ${{ vars.NOTARY_KEY }} + run: | + { + echo 'NOTARYOUT<> $GITHUB_OUTPUT + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + shell: bash + + - name: Get notary info (MacOS_latest) + id: get-notary-info + run: | + echo "notary info is ${{ steps.post-notary.outputs.NOTARYOUT }}." + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + shell: bash + + - name: Staple dmg (MacOS_latest) + id: staple-dmg + env: + KEYCHAIN_PASSWD: ${{ secrets.KEYCHAIN_PASSWD }} + KEYCHAIN_NAME: ${{ vars.KEYCHAIN_NAME }} + SIGNER: ${{ vars.SIGNER }} + NOTARY_USER: ${{ vars.NOTARY_USER }} + NOTARY_KEY: ${{ vars.NOTARY_KEY }} + run: | + /usr/bin/xcrun stapler staple ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-macos-Clang/*.dmg + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + shell: bash + continue-on-error: true + - name: Publish binary (MacOS_latest) id: publish-ctest-binary run: | @@ -370,23 +500,17 @@ jobs: mkdir "${{ runner.workspace }}/build/hdf5" cp ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING ${{ runner.workspace }}/build/hdf5 cp ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING_LBNL_HDF5 ${{ runner.workspace }}/build/hdf5 - cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Clang/README.md ${{ runner.workspace }}/build/hdf5 - cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Clang/*.tar.gz ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-macos-Clang/README.md ${{ runner.workspace }}/build/hdf5 + cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-macos-Clang/*.tar.gz ${{ runner.workspace }}/build/hdf5 cd "${{ runner.workspace }}/build" - tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-osx.tar.gz hdf5 + tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-macos14_clang.tar.gz hdf5 shell: bash - name: Publish dmg binary (MacOS_latest) id: publish-ctest-dmg-binary run: | mkdir "${{ runner.workspace }}/builddmg" - mkdir "${{ runner.workspace }}/builddmg/hdf5" - cp ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING ${{ runner.workspace }}/builddmg/hdf5 - cp ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING_LBNL_HDF5 ${{ runner.workspace }}/builddmg/hdf5 - cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Clang/README.md ${{ runner.workspace }}/builddmg/hdf5 - cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Clang/*.dmg ${{ runner.workspace }}/builddmg/hdf5 - cd "${{ runner.workspace }}/builddmg" - tar -zcvf ${{ steps.set-file-base.outputs.FILE_BASE }}-osx.dmg.tar.gz hdf5 + cp ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-macos-Clang/*.dmg ${{ runner.workspace }}/builddmg/${{ steps.set-file-base.outputs.FILE_BASE }}-macos14_clang.dmg shell: bash - name: List files in the space (MacOS_latest) @@ -398,15 +522,15 @@ jobs: - name: Save published binary (MacOS_latest) uses: actions/upload-artifact@v4 with: - name: tgz-osx-binary - path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-osx.tar.gz + name: tgz-macos14_clang-binary + path: ${{ runner.workspace }}/build/${{ steps.set-file-base.outputs.FILE_BASE }}-macos14_clang.tar.gz if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` - name: Save published dmg binary (MacOS_latest) uses: actions/upload-artifact@v4 with: - name: tgz-osx-dmg-binary - path: ${{ runner.workspace }}/builddmg/${{ steps.set-file-base.outputs.FILE_BASE }}-osx.dmg.tar.gz + name: tgz-macos14_clang-dmg-binary + path: ${{ runner.workspace }}/builddmg/${{ steps.set-file-base.outputs.FILE_BASE }}-macos14_clang.dmg if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` build_and_test_S3_linux: @@ -487,24 +611,11 @@ jobs: # name: "Windows Intel CTest" runs-on: windows-latest + needs: [check-secret] steps: - name: Install Dependencies (Windows_intel) run: choco install ninja - - name: Check Secrets exists - id: set-signing-state - env: - super_secret: ${{ secrets.AZURE_ENDPOINT }} - run: | - if [[ '${{ env.super_secret }}' == '' ]] - then - SIGN_VAL=$(echo "false") - else - SIGN_VAL=$(echo "true") - fi - echo "BINSIGN=$SIGN_VAL" >> $GITHUB_OUTPUT - shell: bash - - name: add oneAPI to env uses: fortran-lang/setup-fortran@v1 id: setup-fortran @@ -548,11 +659,30 @@ jobs: run: 7z x ${{ steps.set-file-base.outputs.FILE_BASE }}.zip shell: bash + - name: Install TrustedSigning (Windows) + run: | + Invoke-WebRequest -Uri https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile .\nuget.exe + .\nuget.exe install Microsoft.Windows.SDK.BuildTools -Version 10.0.22621.3233 -x + .\nuget.exe install Microsoft.Trusted.Signing.Client -Version 1.0.53 -x + shell: pwsh + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + + - name: create-json + id: create-json + uses: jsdaniell/create-json@v1.2.3 + with: + name: "credentials.json" + dir: '${{ steps.set-file-base.outputs.SOURCE_BASE }}' + json: '{"Endpoint": "${{ secrets.AZURE_ENDPOINT }}","CodeSigningAccountName": "${{ secrets.AZURE_CODE_SIGNING_NAME }}","CertificateProfileName": "${{ secrets.AZURE_CERT_PROFILE_NAME }}"}' + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} + - name: Run ctest (Windows_intel) with oneapi env: FC: ${{ steps.setup-fortran.outputs.fc }} CC: ${{ steps.setup-fortran.outputs.cc }} CXX: ${{ steps.setup-fortran.outputs.cxx }} + BINSIGN: ${{ needs.check-secret.outputs.sign-state }} + SIGNTOOLDIR: ${{ github.workspace }}/Microsoft.Windows.SDK.BuildTools/bin/10.0.22621.0/x64 run: | cd "${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}" cmake --workflow --preset=${{ inputs.preset_name }}-win-Intel --fresh @@ -567,12 +697,12 @@ jobs: endpoint: ${{ secrets.AZURE_ENDPOINT }} trusted-signing-account-name: ${{ secrets.AZURE_CODE_SIGNING_NAME }} certificate-profile-name: ${{ secrets.AZURE_CERT_PROFILE_NAME }} - files-folder: ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-MSVC + files-folder: ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Intel files-folder-filter: msi file-digest: SHA256 timestamp-rfc3161: http://timestamp.acs.microsoft.com timestamp-digest: SHA256 - if: ${{ steps.set-signing-state.BINSIGN == 'true' }} + if: ${{ needs.check-secret.outputs.sign-state == 'exists' }} - name: Publish binary (Windows_intel) id: publish-ctest-binary @@ -591,13 +721,7 @@ jobs: id: publish-ctest-msi-binary run: | mkdir "${{ runner.workspace }}/buildmsi" - mkdir "${{ runner.workspace }}/buildmsi/hdf5" - Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING -Destination ${{ runner.workspace }}/buildmsi/hdf5/ - Copy-Item -Path ${{ runner.workspace }}/hdf5/${{ steps.set-file-base.outputs.SOURCE_BASE }}/COPYING_LBNL_HDF5 -Destination ${{ runner.workspace }}/buildmsi/hdf5/ - Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Intel/README.md -Destination ${{ runner.workspace }}/buildmsi/hdf5/ - Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Intel/* -Destination ${{ runner.workspace }}/buildmsi/hdf5/ -Include *.msi - cd "${{ runner.workspace }}/buildmsi" - 7z a -tzip ${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip hdf5 + Copy-Item -Path ${{ runner.workspace }}/hdf5/build/${{ inputs.preset_name }}-Intel/* -Destination ${{ runner.workspace }}/buildmsi/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi -Include *.msi shell: pwsh - name: List files in the space (Windows_intel) @@ -618,7 +742,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: msi-vs2022_intel-binary - path: ${{ runner.workspace }}/buildmsi/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip + path: ${{ runner.workspace }}/buildmsi/${{ steps.set-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` build_and_test_linux_intel: @@ -626,6 +750,7 @@ jobs: # name: "Ubuntu Intel CMake" runs-on: ubuntu-latest + needs: [check-secret] steps: - name: Install CMake Dependencies (Linux_intel) run: | diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml index 21d201922de..7fe99c2f3cb 100644 --- a/.github/workflows/cmake.yml +++ b/.github/workflows/cmake.yml @@ -103,4 +103,8 @@ jobs: name: "CMake TestExpress Workflows" uses: ./.github/workflows/testxpr-cmake.yml - + call-release-cmake-julia: + name: "CMake Julia Workflows" + uses: ./.github/workflows/julia-cmake.yml + with: + build_mode: "Release" diff --git a/.github/workflows/daily-build.yml b/.github/workflows/daily-build.yml index ad53474b91c..2b7d6aef3bf 100644 --- a/.github/workflows/daily-build.yml +++ b/.github/workflows/daily-build.yml @@ -45,6 +45,9 @@ jobs: # use_tag: snapshot use_environ: snapshots secrets: + APPLE_CERTS_BASE64: ${{ secrets.APPLE_CERTS_BASE64 }} + APPLE_CERTS_BASE64_PASSWD: ${{ secrets.APPLE_CERTS_BASE64_PASSWD }} + KEYCHAIN_PASSWD: ${{ secrets.KEYCHAIN_PASSWD }} AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} @@ -64,7 +67,7 @@ jobs: if: ${{ needs.call-workflow-tarball.outputs.has_changes == 'true' }} call-workflow-release: - needs: [call-workflow-tarball, call-workflow-ctest, call-workflow-abi] + needs: [get-old-names, call-workflow-tarball, call-workflow-ctest, call-workflow-abi] permissions: contents: write # In order to allow tag creation uses: ./.github/workflows/release-files.yml @@ -85,5 +88,5 @@ jobs: file_base: ${{ needs.get-old-names.outputs.hdf5-name }} use_tag: snapshot use_environ: snapshots - if: ${{ needs.call-workflow-tarball.outputs.has_changes == 'true' }} + if: ${{ (needs.call-workflow-tarball.outputs.has_changes == 'true') && (needs.get-old-names.outputs.hdf5-name != needs.call-workflow-tarball.outputs.file_base) }} diff --git a/.github/workflows/hdfeos5.yml b/.github/workflows/hdfeos5.yml deleted file mode 100644 index 0d5cf969382..00000000000 --- a/.github/workflows/hdfeos5.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: hdfeos5 dev - -# Triggers the workflow on push or pull request or on demand -on: - workflow_dispatch: - push: - pull_request: - branches: [ develop ] - paths-ignore: - - '.github/CODEOWNERS' - - '.github/FUNDING.yml' - - 'doc/**' - - 'release_docs/**' - - 'ACKNOWLEDGEMENTS' - - 'COPYING**' - - '**.md' - -# Using concurrency to cancel any in-progress job or run -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - build: - name: Build hdfeos5 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.7 - - - name: Install Autotools Dependencies (Linux) - run: | - sudo apt update - sudo apt install automake autoconf libtool libtool-bin - - name: Install HDF5 - run: | - ./autogen.sh - ./configure --prefix=/usr/local --disable-tests --with-default-api-version=v16 - make - sudo make install - - name: Install HDF-EOS5 - run: | - wget -O HDF-EOS5.2.0.tar.gz "https://git.earthdata.nasa.gov/projects/DAS/repos/hdfeos5/raw/hdf-eos5-2.0-src.tar.gz?at=refs%2Fheads%2FHDFEOS5_2.0" - tar zxvf HDF-EOS5.2.0.tar.gz - cd hdf-eos5-2.0 - ./configure CC=/usr/local/bin/h5cc --prefix=/usr/local/ --enable-install-include - make - make check - sudo make install diff --git a/.github/workflows/intel-auto.yml b/.github/workflows/intel-auto.yml index ea983c3a76b..6e0380efb4e 100644 --- a/.github/workflows/intel-auto.yml +++ b/.github/workflows/intel-auto.yml @@ -46,7 +46,8 @@ jobs: $GITHUB_WORKSPACE/configure \ --enable-build-mode=${{ inputs.build_mode }} \ --enable-shared \ - --disable-fortran + --enable-cxx \ + --enable-fortran - name: Autotools Build shell: bash diff --git a/.github/workflows/intel-cmake.yml b/.github/workflows/intel-cmake.yml index a8d5b7d49b8..fb703d480b3 100644 --- a/.github/workflows/intel-cmake.yml +++ b/.github/workflows/intel-cmake.yml @@ -43,13 +43,13 @@ jobs: run: | mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" - cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \ - -G Ninja \ - --log-level=VERBOSE \ + cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja --log-level=VERBOSE \ -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ + -DHDF5_BUILD_FORTRAN:BOOL=ON \ + -DHDF5_BUILD_CPP_LIB:BOOL=ON \ -DLIBAEC_USE_LOCALCONTENT=OFF \ -DZLIB_USE_LOCALCONTENT=OFF \ - $GITHUB_WORKSPACE + ${{ github.workspace }} - name: CMake Build (Linux) shell: bash @@ -97,7 +97,7 @@ jobs: run: | mkdir "${{ runner.workspace }}/build" Set-Location -Path "${{ runner.workspace }}\\build" - cmake -C ${{ github.workspace }}/config/cmake/cacheinit.cmake -G Ninja -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} -DHDF5_BUILD_FORTRAN=ON -DLIBAEC_USE_LOCALCONTENT=OFF -DZLIB_USE_LOCALCONTENT=OFF ${{ github.workspace }} + cmake -C ${{ github.workspace }}/config/cmake/cacheinit.cmake -G Ninja -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} -DHDF5_BUILD_FORTRAN=ON -DHDF5_BUILD_CPP_LIB=ON -DLIBAEC_USE_LOCALCONTENT=OFF -DZLIB_USE_LOCALCONTENT=OFF ${{ github.workspace }} - name: CMake Build (Windows) shell: pwsh diff --git a/.github/workflows/julia-auto.yml b/.github/workflows/julia-auto.yml new file mode 100644 index 00000000000..f21fd659413 --- /dev/null +++ b/.github/workflows/julia-auto.yml @@ -0,0 +1,79 @@ +name: hdf5 dev autotools julia + +on: + workflow_call: + inputs: + build_mode: + description: "release vs. debug build" + required: true + type: string + +permissions: + contents: read + +jobs: + julia_build_and_test: + name: "julia ${{ inputs.build_mode }}" + runs-on: ubuntu-latest + steps: + - name: Get Sources + uses: actions/checkout@v4.1.7 + + - name: Install Dependencies + shell: bash + run: | + sudo apt-get update + sudo apt-get install autoconf automake libtool libtool-bin libaec-dev + sudo apt-get install doxygen graphviz + sudo apt install -y zlib1g-dev libcurl4-openssl-dev libjpeg-dev wget curl bzip2 + sudo apt install -y m4 flex bison cmake libzip-dev openssl build-essential + + - name: Autotools Configure + shell: bash + run: | + sh ./autogen.sh + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + $GITHUB_WORKSPACE/configure \ + --enable-build-mode=${{ inputs.build_mode }} \ + --disable-fortran \ + --enable-shared \ + --disable-parallel \ + --prefix=/tmp + + - name: Autotools Build + shell: bash + run: | + make -j3 + working-directory: ${{ runner.workspace }}/build + + - name: Install HDF5 + shell: bash + run: | + make install + working-directory: ${{ runner.workspace }}/build + + - name: Install julia + uses: julia-actions/setup-julia@latest + with: + version: '1.6' + arch: 'x64' + + - name: Get julia hdf5 source + uses: actions/checkout@v4.1.7 + with: + repository: JuliaIO/HDF5.jl + path: . + + - name: Generate LocalPreferences + run: | + echo '[HDF5]' >> LocalPreferences.toml + echo 'libhdf5 = "/tmp/lib/libhdf5.so"' >> LocalPreferences.toml + echo 'libhdf5_hl = "/tmp/lib/libhdf5_hl.so"' >> LocalPreferences.toml + + - uses: julia-actions/julia-buildpkg@latest + + - name: Julia Run Tests + uses: julia-actions/julia-runtest@latest + env: + JULIA_DEBUG: Main diff --git a/.github/workflows/julia-cmake.yml b/.github/workflows/julia-cmake.yml new file mode 100644 index 00000000000..113b81bd70c --- /dev/null +++ b/.github/workflows/julia-cmake.yml @@ -0,0 +1,82 @@ +name: hdf5 dev CMake julia + +on: + workflow_call: + inputs: + build_mode: + description: "release vs. debug build" + required: true + type: string + +permissions: + contents: read + +jobs: + julia_build_and_test: + name: "julia ${{ inputs.build_mode }}" + runs-on: ubuntu-latest + steps: + - name: Get Sources + uses: actions/checkout@v4.1.7 + + - name: Install Dependencies + shell: bash + run: | + sudo apt-get update + sudo apt-get install ninja-build doxygen graphviz + sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev + sudo apt install -y libaec-dev zlib1g-dev wget curl bzip2 flex bison cmake libzip-dev openssl build-essential + + - name: CMake Configure + shell: bash + run: | + mkdir "${{ runner.workspace }}/build" + cd "${{ runner.workspace }}/build" + cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja \ + -DCMAKE_BUILD_TYPE=${{ inputs.build_mode }} \ + -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF \ + -DHDF5_ENABLE_PARALLEL:BOOL=OFF \ + -DHDF5_BUILD_CPP_LIB:BOOL=OFF \ + -DLIBAEC_USE_LOCALCONTENT=OFF \ + -DZLIB_USE_LOCALCONTENT=OFF \ + -DHDF5_BUILD_FORTRAN:BOOL=OFF \ + -DHDF5_BUILD_JAVA:BOOL=OFF \ + -DCMAKE_INSTALL_PREFIX=/tmp \ + $GITHUB_WORKSPACE + + - name: CMake Build + shell: bash + run: | + cmake --build . --parallel 3 --config ${{ inputs.build_mode }} + working-directory: ${{ runner.workspace }}/build + + - name: Install HDF5 + shell: bash + run: | + cmake --install . + working-directory: ${{ runner.workspace }}/build + + - name: Install julia + uses: julia-actions/setup-julia@latest + with: + version: '1.6' + arch: 'x64' + + - name: Get julia hdf5 source + uses: actions/checkout@v4.1.7 + with: + repository: JuliaIO/HDF5.jl + path: . + + - name: Generate LocalPreferences + run: | + echo '[HDF5]' >> LocalPreferences.toml + echo 'libhdf5 = "/tmp/lib/libhdf5.so"' >> LocalPreferences.toml + echo 'libhdf5_hl = "/tmp/lib/libhdf5_hl.so"' >> LocalPreferences.toml + + - uses: julia-actions/julia-buildpkg@latest + + - name: Julia Run Tests + uses: julia-actions/julia-runtest@latest + env: + JULIA_DEBUG: Main diff --git a/.github/workflows/main-auto-par.yml b/.github/workflows/main-auto-par.yml index bd1b1c45902..ad893d04724 100644 --- a/.github/workflows/main-auto-par.yml +++ b/.github/workflows/main-auto-par.yml @@ -50,8 +50,9 @@ jobs: - name: Get Sources uses: actions/checkout@v4.1.7 - # AUTOTOOLS CONFIGURE - name: Autotools Configure + env: + NPROCS: 2 run: | sh ./autogen.sh mkdir "${{ runner.workspace }}/build" @@ -62,6 +63,7 @@ jobs: --with-default-api-version=v114 \ --enable-shared \ --enable-parallel \ + --enable-subfiling-vfd \ --disable-cxx \ --enable-fortran \ --disable-java \ @@ -71,7 +73,28 @@ jobs: --with-szlib=yes shell: bash - # BUILD - name: Autotools Build run: make -j3 working-directory: ${{ runner.workspace }}/build + + # ph5diff tests are in the tools/tests directory so they will get run + # here, so leave NPROCS set here as well + - name: Autotools Run Tests + env: + NPROCS: 2 + run: | + cd test && make check -j2 && cd .. + cd tools && make check -j2 && cd .. + cd hl && make check -j2 && cd .. + cd fortran/test && make check -j2 && cd ../.. + working-directory: ${{ runner.workspace }}/build + if: ${{ inputs.thread_safety == 'disable' }} + + - name: Autotools Run Parallel Tests + env: + NPROCS: 2 + run: | + cd testpar && make check && cd .. + cd fortran/testpar && make check -j2 && cd ../.. + working-directory: ${{ runner.workspace }}/build + if: ${{ inputs.thread_safety == 'disable' }} diff --git a/.github/workflows/main-auto-spc.yml b/.github/workflows/main-auto-spc.yml index b7ac7c4f50b..f019bb12501 100644 --- a/.github/workflows/main-auto-spc.yml +++ b/.github/workflows/main-auto-spc.yml @@ -428,7 +428,7 @@ jobs: --enable-cxx \ --disable-fortran \ --enable-java \ - --disable-mirror-vfd \ + --enable-mirror-vfd \ --enable-direct-vfd \ --disable-ros3-vfd \ --with-szlib=yes @@ -491,7 +491,7 @@ jobs: --enable-cxx \ --disable-fortran \ --enable-java \ - --disable-mirror-vfd \ + --enable-mirror-vfd \ --enable-direct-vfd \ --disable-ros3-vfd \ --with-szlib=yes diff --git a/.github/workflows/main-cmake-par.yml b/.github/workflows/main-cmake-par.yml index 9a87dead10d..c00caa713c9 100644 --- a/.github/workflows/main-cmake-par.yml +++ b/.github/workflows/main-cmake-par.yml @@ -47,6 +47,7 @@ jobs: -DBUILD_SHARED_LIBS=ON \ -DHDF5_ENABLE_ALL_WARNINGS=ON \ -DHDF5_ENABLE_PARALLEL:BOOL=ON \ + -DHDF5_ENABLE_SUBFILING_VFD:BOOL=ON \ -DHDF5_BUILD_CPP_LIB:BOOL=OFF \ -DHDF5_BUILD_FORTRAN=ON \ -DHDF5_BUILD_JAVA=OFF \ @@ -62,3 +63,16 @@ jobs: - name: CMake Build run: cmake --build . --parallel 3 --config ${{ inputs.build_mode }} working-directory: ${{ runner.workspace }}/build + + # + # RUN TESTS + # + - name: CMake Run Tests + run: ctest . -E MPI_TEST --parallel 2 -C ${{ inputs.build_mode }} -V + working-directory: ${{ runner.workspace }}/build + if: ${{ matrix.run_tests && (inputs.thread_safety != 'TS') }} + + - name: CMake Run Parallel Tests + run: ctest . -R MPI_TEST -C ${{ inputs.build_mode }} -V + working-directory: ${{ runner.workspace }}/build + if: ${{ matrix.run_tests && (inputs.thread_safety != 'TS') }} diff --git a/.github/workflows/main-cmake.yml b/.github/workflows/main-cmake.yml index 50f3400a225..9c58f0ba8e2 100644 --- a/.github/workflows/main-cmake.yml +++ b/.github/workflows/main-cmake.yml @@ -141,7 +141,7 @@ jobs: id: setup-fortran with: compiler: gcc - version: 12 + version: 14 if: ${{ matrix.os == 'macos-latest' }} - name: Install Dependencies @@ -266,7 +266,7 @@ jobs: - name: Save published binary (Mac_latest) uses: actions/upload-artifact@v4 with: - name: tgz-osx-${{ inputs.build_mode }}-binary + name: tgz-macos14_clang-${{ inputs.build_mode }}-binary path: ${{ runner.workspace }}/build/HDF5-*-Darwin.tar.gz if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` if: ${{ (matrix.os == 'macos-latest') && (inputs.thread_safety != 'TS') }} @@ -274,7 +274,7 @@ jobs: - name: Save published dmg binary (Mac_latest) uses: actions/upload-artifact@v4 with: - name: tgz-osx-${{ inputs.build_mode }}-dmg-binary + name: tgz-macos14_clang-${{ inputs.build_mode }}-dmg-binary path: ${{ runner.workspace }}/build/HDF5-*-Darwin.dmg if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` if: ${{ (matrix.os == 'macos-latest') && (inputs.thread_safety != 'TS') }} diff --git a/.github/workflows/nvhpc-auto.yml b/.github/workflows/nvhpc-auto.yml index 775143f84c9..bca490485d7 100644 --- a/.github/workflows/nvhpc-auto.yml +++ b/.github/workflows/nvhpc-auto.yml @@ -35,21 +35,21 @@ jobs: curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list sudo apt-get update -y - sudo apt-get install -y nvhpc-24-5 + sudo apt-get install -y nvhpc-24-7 echo "NVHPCSDK=/opt/nvidia/hpc_sdk" >> $GITHUB_ENV - echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin/nvc++" >> $GITHUB_ENV - echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin/nvc" >> $GITHUB_ENV - echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin/nvfortran" >> $GITHUB_ENV - echo "CC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/comm_libs/openmpi4/bin/mpicc" >> $GITHUB_ENV - echo "FC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/comm_libs/openmpi4/bin/mpifort" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/lib" >> $GITHUB_ENV + echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin/nvc++" >> $GITHUB_ENV + echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin/nvc" >> $GITHUB_ENV + echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin/nvfortran" >> $GITHUB_ENV + echo "CC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin/mpicc" >> $GITHUB_ENV + echo "FC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin/mpifort" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/lib" >> $GITHUB_ENV echo "DESTDIR=/tmp" >> $GITHUB_ENV - name: Autotools Configure shell: bash run: | export RUNPARALLEL="mpiexec -np 2" - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin:$PATH sh ./autogen.sh mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" @@ -63,13 +63,33 @@ jobs: - name: Autotools Build shell: bash run: | - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin:$PATH make -j3 working-directory: ${{ runner.workspace }}/build + # ph5diff tests are in the tools/tests directory so they will get run + # here, so leave NPROCS set here as well + - name: Autotools Run Tests + env: + NPROCS: 2 + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin:$PATH + cd tools && make check -j2 && cd .. + cd hl && make check -j2 && cd .. + cd fortran && make check -j2 && cd .. + working-directory: ${{ runner.workspace }}/build + + - name: Autotools Run Parallel Tests + env: + NPROCS: 2 + run: | + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin:$PATH + cd testpar && make check && cd .. + working-directory: ${{ runner.workspace }}/build + - name: Autotools Install shell: bash run: | - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin:$PATH make install working-directory: ${{ runner.workspace }}/build diff --git a/.github/workflows/nvhpc-cmake.yml b/.github/workflows/nvhpc-cmake.yml index 08da821702c..3dbefa22861 100644 --- a/.github/workflows/nvhpc-cmake.yml +++ b/.github/workflows/nvhpc-cmake.yml @@ -34,20 +34,20 @@ jobs: curl https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg echo 'deb [signed-by=/usr/share/keyrings/nvidia-hpcsdk-archive-keyring.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list sudo apt-get update -y - sudo apt-get install -y nvhpc-24-5 + sudo apt-get install -y nvhpc-24-7 echo "NVHPCSDK=/opt/nvidia/hpc_sdk" >> $GITHUB_ENV - echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin/nvc++" >> $GITHUB_ENV - echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin/nvc" >> $GITHUB_ENV - echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin/nvfortran" >> $GITHUB_ENV - echo "CC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/comm_libs/openmpi4/bin/mpicc" >> $GITHUB_ENV - echo "FC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/comm_libs/openmpi4/bin/mpifort" >> $GITHUB_ENV - echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/cuda/12.3/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/lib" >> $GITHUB_ENV + echo "OMPI_CXX=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin/nvc++" >> $GITHUB_ENV + echo "OMPI_CC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin/nvc" >> $GITHUB_ENV + echo "OMPI_FC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin/nvfortran" >> $GITHUB_ENV + echo "CC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin/mpicc" >> $GITHUB_ENV + echo "FC=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin/mpifort" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/cuda/12.3/lib64:/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/lib" >> $GITHUB_ENV echo "DESTDIR=/tmp" >> $GITHUB_ENV - name: CMake Configure shell: bash run: | - export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.5/compilers/bin:$PATH + export PATH=/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/comm_libs/openmpi4/bin:/opt/nvidia/hpc_sdk/Linux_x86_64/24.7/compilers/bin:$PATH mkdir "${{ runner.workspace }}/build" cd "${{ runner.workspace }}/build" cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake -G Ninja \ @@ -68,3 +68,16 @@ jobs: run: | cmake --build . --parallel 3 --config ${{ inputs.build_mode }} working-directory: ${{ runner.workspace }}/build + + # Skipping dt_arith and dtransform while we investigate long double failures + - name: CMake Run Tests + shell: bash + run: | + ctest . -E "MPI_TEST|H5TEST-dt_arith|H5TEST-dtransform" --parallel 2 -C ${{ inputs.build_mode }} -V + working-directory: ${{ runner.workspace }}/build + + - name: CMake Run Parallel Tests + shell: bash + run: | + ctest . -R MPI_TEST -C ${{ inputs.build_mode }} -V + working-directory: ${{ runner.workspace }}/build diff --git a/.github/workflows/publish-branch.yml b/.github/workflows/publish-branch.yml index 1e5b99bd0b0..6c52c75308a 100644 --- a/.github/workflows/publish-branch.yml +++ b/.github/workflows/publish-branch.yml @@ -22,7 +22,7 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - name: Get Sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@9a9194f87191a7e9055e3e9b95b8cfb13023bb08 # v4.1.7 with: fetch-depth: 0 ref: '${{ github.head_ref || github.ref_name }}' diff --git a/.github/workflows/publish-release.yml b/.github/workflows/publish-release.yml index c153d217d87..dd7f4bfbc86 100644 --- a/.github/workflows/publish-release.yml +++ b/.github/workflows/publish-release.yml @@ -26,7 +26,7 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - name: Get Sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@9a9194f87191a7e9055e3e9b95b8cfb13023bb08 # v4.1.7 with: fetch-depth: 0 ref: '${{ github.head_ref || github.ref_name }}' diff --git a/.github/workflows/release-files.yml b/.github/workflows/release-files.yml index b9d76d26dba..af5f98ba6c6 100644 --- a/.github/workflows/release-files.yml +++ b/.github/workflows/release-files.yml @@ -40,7 +40,7 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - name: Get Sources - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@9a9194f87191a7e9055e3e9b95b8cfb13023bb08 # v4.1.7 with: fetch-depth: 0 ref: '${{ github.head_ref || github.ref_name }}' @@ -109,13 +109,13 @@ jobs: - name: Get published binary (MacOS) uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: tgz-osx-binary + name: tgz-macos14_clang-binary path: ${{ github.workspace }} - name: Get published dmg binary (MacOS) uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: tgz-osx-dmg-binary + name: tgz-macos14_clang-dmg-binary path: ${{ github.workspace }} - name: Get published binary (Linux) @@ -185,17 +185,17 @@ jobs: sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.doxygen.zip > ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-osx.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-osx.dmg.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-macos14_clang.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-macos14_clang.dmg >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc_s3.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_intel.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt + sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt sha256sum ${{ steps.get-file-base.outputs.FILE_BASE }}.html.abi.reports.tar.gz >> ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt - name: Create sha256 sums for files for nonversioned files @@ -231,17 +231,17 @@ jobs: ${{ steps.get-file-base.outputs.FILE_BASE }}.doxygen.zip ${{ steps.get-file-base.outputs.FILE_BASE }}.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-osx.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-osx.dmg.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-macos14_clang.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-macos14_clang.dmg ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb + ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc_s3.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_intel.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi ${{ steps.get-file-base.outputs.FILE_BASE }}.html.abi.reports.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` @@ -260,17 +260,17 @@ jobs: ${{ steps.get-file-base.outputs.FILE_BASE }}.zip hdf5.tar.gz hdf5.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-osx.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-osx.dmg.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-macos14_clang.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-macos14_clang.dmg ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb + ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc_s3.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_intel.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi ${{ steps.get-file-base.outputs.FILE_BASE }}.html.abi.reports.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}.sha256sums.txt if-no-files-found: error # 'warn' or 'ignore' are also available, defaults to `warn` diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9890c8abb45..e41b30c4d37 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -40,12 +40,22 @@ jobs: file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} snap_name: hdf5-${{ needs.call-workflow-tarball.outputs.source_base }} use_environ: release + secrets: + APPLE_CERTS_BASE64: ${{ secrets.APPLE_CERTS_BASE64 }} + APPLE_CERTS_BASE64_PASSWD: ${{ secrets.APPLE_CERTS_BASE64_PASSWD }} + KEYCHAIN_PASSWD: ${{ secrets.KEYCHAIN_PASSWD }} + AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} + AZURE_ENDPOINT: ${{ secrets.AZURE_ENDPOINT }} + AZURE_CODE_SIGNING_NAME: ${{ secrets.AZURE_CODE_SIGNING_NAME }} + AZURE_CERT_PROFILE_NAME: ${{ secrets.AZURE_CERT_PROFILE_NAME }} call-workflow-abi: needs: [log-the-inputs, call-workflow-tarball, call-workflow-ctest] uses: ./.github/workflows/abi-report.yml with: - file_ref: '1_14_3' + file_ref: '1.14.4.3' file_base: ${{ needs.call-workflow-tarball.outputs.file_base }} use_tag: ${{ needs.log-the-inputs.outputs.rel_tag }} use_environ: release diff --git a/.github/workflows/remove-files.yml b/.github/workflows/remove-files.yml index e34cc131414..418065c8e00 100644 --- a/.github/workflows/remove-files.yml +++ b/.github/workflows/remove-files.yml @@ -50,14 +50,14 @@ jobs: ${{ steps.get-file-base.outputs.FILE_BASE }}.doxygen.zip ${{ steps.get-file-base.outputs.FILE_BASE }}.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-osx.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-osx.dmg.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-macos14_clang.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-macos14_clang.dmg ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb.tar.gz - ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm.tar.gz + ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.deb + ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc.rpm ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_gcc_s3.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_cl.msi ${{ steps.get-file-base.outputs.FILE_BASE }}-ubuntu-2204_intel.tar.gz ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.zip - ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi.zip + ${{ steps.get-file-base.outputs.FILE_BASE }}-win-vs2022_intel.msi diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index e67627fd885..d59f8c9bc6b 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -32,7 +32,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + uses: actions/checkout@9a9194f87191a7e9055e3e9b95b8cfb13023bb08 # v4.1.7 with: persist-credentials: false @@ -67,6 +67,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 + uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 with: sarif_file: results.sarif diff --git a/CITATION.cff b/CITATION.cff index 4e611a57468..c96341f138c 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -9,4 +9,4 @@ authors: website: 'https://www.hdfgroup.org' repository-code: 'https://github.com/HDFGroup/hdf5' url: 'https://www.hdfgroup.org/HDF5/' -repository-artifact: 'https://www.hdfgroup.org/downloads/hdf5/' +repository-artifact: 'https://support.hdfgroup.org/downloads/index.html' diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index 313dbb2e8e3..a2564e68ad5 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -268,6 +268,11 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES) endif () set (CPACK_PACKAGE_ICON "${HDF_RESOURCES_DIR}/hdf.bmp") + set (CPACK_ORIG_SOURCE_DIR ${CMAKE_SOURCE_DIR}) + if ("$ENV{BINSIGN}" STREQUAL "exists") + set (CPACK_PRE_BUILD_SCRIPTS ${CMAKE_SOURCE_DIR}/config/cmake/SignPackageFiles.cmake) + endif () + set (CPACK_GENERATOR "TGZ") if (WIN32) set (CPACK_GENERATOR "ZIP") diff --git a/CMakePresets.json b/CMakePresets.json index ac436e2c9e6..472a33df0b6 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -53,6 +53,8 @@ "BLOSC_ZLIB_PACKAGE_NAME": {"type": "STRING", "value": "zlib"}, "BLOSC2_TGZ_NAME": {"type": "STRING", "value": "c-blosc2-2.14.4.tar.gz"}, "BLOSC2_PACKAGE_NAME": {"type": "STRING", "value": "blosc2"}, + "BLOSC2_ZLIB_TGZ_NAME": {"type": "STRING", "value": "zlib-1.3.tar.gz"}, + "BLOSC2_ZLIB_PACKAGE_NAME": {"type": "STRING", "value": "zlib"}, "BZ2_TGZ_NAME": {"type": "STRING", "value": "bzip2-bzip2-1.0.8.tar.gz"}, "BZ2_PACKAGE_NAME": {"type": "STRING", "value": "bz2"}, "FPZIP_TGZ_NAME": {"type": "STRING", "value": "fpzip-1.3.0.tar.gz"}, @@ -153,6 +155,26 @@ "ci-StdShar" ] }, + { + "name": "ci-StdShar-macos-Clang", + "description": "Clang Standard Config for macos (Release)", + "inherits": [ + "ci-macos-Release-Clang", + "ci-CPP", + "ci-Java", + "ci-StdShar" + ] + }, + { + "name": "ci-StdShar-macos-GNUC", + "description": "GNUC Standard Config for macos (Release)", + "inherits": [ + "ci-macos-Release-GNUC", + "ci-CPP", + "ci-Java", + "ci-StdShar" + ] + }, { "name": "ci-StdShar-GNUC", "description": "GNUC Standard Config for x64 (Release)", @@ -201,6 +223,23 @@ "ci-x64-Release-Clang" ] }, + { + "name": "ci-StdShar-macos-Clang", + "description": "Clang Standard Build for macos (Release)", + "configurePreset": "ci-StdShar-macos-Clang", + "inherits": [ + "ci-macos-Release-Clang" + ] + }, + { + "name": "ci-StdShar-macos-GNUC", + "description": "GNUC Standard Build for macos (Release)", + "configurePreset": "ci-StdShar-macos-GNUC", + "verbose": true, + "inherits": [ + "ci-macos-Release-GNUC" + ] + }, { "name": "ci-StdShar-GNUC", "description": "GNUC Standard Build for x64 (Release)", @@ -250,22 +289,24 @@ ] }, { - "name": "ci-StdShar-OSX-Clang", - "configurePreset": "ci-StdShar-Clang", + "name": "ci-StdShar-macos-Clang", + "configurePreset": "ci-StdShar-macos-Clang", "inherits": [ - "ci-x64-Release-Clang" + "ci-macos-Release-Clang" ], "execution": { "noTestsAction": "error", "timeout": 180, "jobs": 2 - }, - "condition": { - "type": "equals", - "lhs": "${hostSystemName}", - "rhs": "Darwin" } }, + { + "name": "ci-StdShar-macos-GNUC", + "configurePreset": "ci-StdShar-macos-GNUC", + "inherits": [ + "ci-macos-Release-GNUC" + ] + }, { "name": "ci-StdShar-GNUC", "configurePreset": "ci-StdShar-GNUC", @@ -316,6 +357,16 @@ "configurePreset": "ci-StdShar-Clang", "inherits": "ci-x64-Release-Clang" }, + { + "name": "ci-StdShar-macos-Clang", + "configurePreset": "ci-StdShar-macos-Clang", + "inherits": "ci-macos-Release-Clang" + }, + { + "name": "ci-StdShar-macos-GNUC", + "configurePreset": "ci-StdShar-macos-GNUC", + "inherits": "ci-macos-Release-GNUC" + }, { "name": "ci-StdShar-GNUC", "configurePreset": "ci-StdShar-GNUC", @@ -352,12 +403,12 @@ ] }, { - "name": "ci-StdShar-OSX-Clang", + "name": "ci-StdShar-macos-Clang", "steps": [ - {"type": "configure", "name": "ci-StdShar-Clang"}, - {"type": "build", "name": "ci-StdShar-Clang"}, - {"type": "test", "name": "ci-StdShar-OSX-Clang"}, - {"type": "package", "name": "ci-StdShar-Clang"} + {"type": "configure", "name": "ci-StdShar-macos-Clang"}, + {"type": "build", "name": "ci-StdShar-macos-Clang"}, + {"type": "test", "name": "ci-StdShar-macos-Clang"}, + {"type": "package", "name": "ci-StdShar-macos-Clang"} ] }, { @@ -369,6 +420,15 @@ {"type": "package", "name": "ci-StdShar-GNUC"} ] }, + { + "name": "ci-StdShar-macos-GNUC", + "steps": [ + {"type": "configure", "name": "ci-StdShar-macos-GNUC"}, + {"type": "build", "name": "ci-StdShar-macos-GNUC"}, + {"type": "test", "name": "ci-StdShar-macos-GNUC"}, + {"type": "package", "name": "ci-StdShar-macos-GNUC"} + ] + }, { "name": "ci-StdShar-GNUC-S3", "steps": [ diff --git a/HDF5Examples/C/CMakeLists.txt b/HDF5Examples/C/CMakeLists.txt index 97a9ea1f672..b394212150e 100644 --- a/HDF5Examples/C/CMakeLists.txt +++ b/HDF5Examples/C/CMakeLists.txt @@ -11,7 +11,7 @@ add_subdirectory (${PROJECT_SOURCE_DIR}/H5T) if (${H5_LIBVER_DIR} GREATER 16) # add_subdirectory (${PROJECT_SOURCE_DIR}/Perf) - if (USE_SHARED_LIBS AND HDF_BUILD_FILTERS AND HDF5_ENABLE_PLUGIN_SUPPORT) + if (USE_SHARED_LIBS AND H5EX_BUILD_FILTERS AND HDF5_ENABLE_PLUGIN_SUPPORT) add_subdirectory (${PROJECT_SOURCE_DIR}/H5FLT) endif () endif () diff --git a/HDF5Examples/CMakeLists.txt b/HDF5Examples/CMakeLists.txt index b155fb4025d..5ce806f6253 100644 --- a/HDF5Examples/CMakeLists.txt +++ b/HDF5Examples/CMakeLists.txt @@ -39,8 +39,8 @@ message (STATUS "HDF5 H5_LIBVER_DIR: ${H5_LIBVER_DIR} HDF5_VERSION_MAJOR: ${HDF5 #----------------------------------------------------------------------------- # Option to build JAVA examples #----------------------------------------------------------------------------- -option (HDF_BUILD_JAVA "Build JAVA support" OFF) -if (HDF_BUILD_JAVA) +option (H5EX_BUILD_JAVA "Build JAVA support" OFF) +if (H5EX_BUILD_JAVA) find_package (Java) include (${H5EX_RESOURCES_DIR}/UseJava.cmake) @@ -73,8 +73,8 @@ endif () # Option to Enable MPI Parallel #----------------------------------------------------------------------------- set (CMAKE_MODULE_PATH ${H5EX_RESOURCES_DIR} ${CMAKE_MODULE_PATH}) -option (HDF_ENABLE_PARALLEL "Enable parallel build (requires MPI)" OFF) -if (HDF_ENABLE_PARALLEL) +option (H5EX_ENABLE_PARALLEL "Enable parallel build (requires MPI)" OFF) +if (H5EX_ENABLE_PARALLEL) find_package(MPI REQUIRED) if (MPI_C_FOUND) set (H5_HAVE_PARALLEL 1) @@ -99,55 +99,6 @@ if (H5_HAVE_PARALLEL) INCLUDE_DIRECTORIES (${MPI_C_INCLUDE_DIRS}) endif () -# Determine if a threading package is available on this system -option (HDF5_ENABLE_THREADS "Enable thread support" ON) -set (THREADS_PREFER_PTHREAD_FLAG ON) -find_package (Threads) -if (Threads_FOUND) - set (H5_HAVE_THREADS 1) - set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) - - # Determine which threading package to use - # Comment out check for C11 threads for now, since it conflicts with the - # current --std=c99 compile flags at configuration time. When we switch to - # --std=c11, this can be uncommented. - #CHECK_INCLUDE_FILE("threads.h" HAVE_THREADS_H) - if (WIN32) - # When Win32 is available, we use those threads - set (H5_HAVE_WIN_THREADS 1) - elseif (HAVE_THREADS_H) - # When C11 threads are available, those are the top choice - set (H5_HAVE_C11_THREADS 1) - elseif (CMAKE_USE_PTHREADS_INIT) - set (H5_HAVE_PTHREAD_H 1) - else () - message (FATAL_ERROR " **** thread support requires C11 threads, Win32 threads or Pthreads **** ") - endif () - - # Check for compiler support for atomic variables - CHECK_INCLUDE_FILE("stdatomic.h" HAVE_STDATOMIC_H) - if (HAVE_STDATOMIC_H) - set (H5_HAVE_STDATOMIC_H 1) - endif() -endif () - -#----------------------------------------------------------------------------- -# Option to use threadsafe -#----------------------------------------------------------------------------- -option (HDF_ENABLE_THREADSAFE "Enable Threadsafety" OFF) -# Note that HDF_ENABLE_THREADSAFE is the CMake option for determining -# whether to enable thread-safety in the examples. HDF5_ENABLE_THREADSAFE -# is the CMake option determining whether HDF5 was configured with -# thread-safety enabled. -if (HDF_ENABLE_THREADSAFE AND HDF5_ENABLE_THREADSAFE) - # Check for threading package - if (NOT Threads_FOUND) - message (FATAL_ERROR " **** thread-safety option requires a threading package and none was found **** ") - endif () - - set (H5_HAVE_THREADSAFE 1) -endif () - set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES "${H5EX_HDF5_INCLUDE_DIRS}" ) @@ -176,8 +127,8 @@ if (${H5_LIBVER_DIR} GREATER 16) set (H5_FC_FUNC "H5_FC_FUNC(name,NAME) name ## _") set (H5_FC_FUNC_ "H5_FC_FUNC_(name,NAME) name ## _") if (EXISTS "${H5EXAMPLES_SOURCE_DIR}/FORTRAN" AND IS_DIRECTORY "${H5EXAMPLES_SOURCE_DIR}/FORTRAN") - option (HDF_BUILD_FORTRAN "Build examples FORTRAN support" OFF) - if (HDF_BUILD_FORTRAN AND HDF5_BUILD_FORTRAN) + option (H5EX_BUILD_FORTRAN "Build examples FORTRAN support" OFF) + if (H5EX_BUILD_FORTRAN AND HDF5_BUILD_FORTRAN) set (H5EX_LINK_Fortran_LIBS ${H5EX_HDF5_LINK_LIBS}) # Parallel IO usage requires MPI to be Linked and Included @@ -191,10 +142,10 @@ if (${H5_LIBVER_DIR} GREATER 16) configure_file (${H5EX_F90_SRC_DIR}/H5D/h5_version.h.in ${PROJECT_BINARY_DIR}/FORTRAN/H5D/h5_version.h @ONLY) configure_file (${H5EX_F90_SRC_DIR}/H5D/h5_version.h.in ${PROJECT_BINARY_DIR}/FORTRAN/H5G/h5_version.h @ONLY) else () - set (HDF_BUILD_FORTRAN OFF CACHE BOOL "Build examples FORTRAN support" FORCE) + set (H5EX_BUILD_FORTRAN OFF CACHE BOOL "Build examples FORTRAN support" FORCE) endif () else () - set (HDF_BUILD_FORTRAN OFF CACHE BOOL "Build examples FORTRAN support" FORCE) + set (H5EX_BUILD_FORTRAN OFF CACHE BOOL "Build examples FORTRAN support" FORCE) endif () if (${H5_LIBVER_DIR} GREATER 18) @@ -202,29 +153,29 @@ if (${H5_LIBVER_DIR} GREATER 16) # Option to build JAVA examples #----------------------------------------------------------------------------- if (EXISTS "${H5EXAMPLES_SOURCE_DIR}/JAVA" AND IS_DIRECTORY "${H5EXAMPLES_SOURCE_DIR}/JAVA") - option (HDF_BUILD_JAVA "Build examples JAVA support" OFF) + option (H5EX_BUILD_JAVA "Build examples JAVA support" OFF) else () - set (HDF_BUILD_JAVA OFF CACHE BOOL "Build examples JAVA support" FORCE) + set (H5EX_BUILD_JAVA OFF CACHE BOOL "Build examples JAVA support" FORCE) endif () else () - set (HDF_BUILD_JAVA OFF CACHE BOOL "Build examples JAVA support" FORCE) + set (H5EX_BUILD_JAVA OFF CACHE BOOL "Build examples JAVA support" FORCE) endif () #----------------------------------------------------------------------------- # Build the CPP Examples #----------------------------------------------------------------------------- if (EXISTS "${H5EXAMPLES_SOURCE_DIR}/CXX" AND IS_DIRECTORY "${H5EXAMPLES_SOURCE_DIR}/CXX") - option (HDF_BUILD_CPP_LIB "Build examples C++ support" OFF) + option (H5EX_BUILD_CPP_LIB "Build examples C++ support" OFF) else () - set (HDF_BUILD_CPP_LIB OFF CACHE BOOL "Build examples C++ support" FORCE) + set (H5EX_BUILD_CPP_LIB OFF CACHE BOOL "Build examples C++ support" FORCE) endif () #----------------------------------------------------------------------------- # Option to build filter examples #----------------------------------------------------------------------------- if (EXISTS "${H5EXAMPLES_SOURCE_DIR}/C/H5FLT" AND IS_DIRECTORY "${H5EXAMPLES_SOURCE_DIR}/C/H5FLT") - option (HDF_BUILD_FILTERS "Build examples PLUGIN filter support" OFF) - if (HDF_BUILD_FILTERS AND HDF5_ENABLE_PLUGIN_SUPPORT) + option (H5EX_BUILD_FILTERS "Build examples PLUGIN filter support" OFF) + if (H5EX_BUILD_FILTERS AND HDF5_ENABLE_PLUGIN_SUPPORT) if(DEFINED ENV{HDF5_PLUGIN_PATH}) message (STATUS "ENV PATH=$ENV{HDF5_PLUGIN_PATH}") set (H5EX_HDF5_PLUGIN_PATH $ENV{HDF5_PLUGIN_PATH}) @@ -236,32 +187,32 @@ if (${H5_LIBVER_DIR} GREATER 16) endif () message (STATUS "H5EX_HDF5_PLUGIN_PATH=${H5EX_HDF5_PLUGIN_PATH}") else () - set (HDF_BUILD_FILTERS OFF CACHE BOOL "Build examples PLUGIN filter support" FORCE) + set (H5EX_BUILD_FILTERS OFF CACHE BOOL "Build examples PLUGIN filter support" FORCE) endif () else () - set (HDF_BUILD_FILTERS OFF CACHE BOOL "Build examples PLUGIN filter support" FORCE) + set (H5EX_BUILD_FILTERS OFF CACHE BOOL "Build examples PLUGIN filter support" FORCE) endif () else () - set (HDF_BUILD_FORTRAN OFF} CACHE BOOL "Build examples FORTRAN support" FORCE) - set (HDF_BUILD_JAVA OFF CACHE BOOL "Build examples JAVA support" FORCE) - set (HDF_BUILD_CPP_LIB OFF CACHE BOOL "Build examples C++ support" FORCE) - set (HDF_BUILD_FILTERS OFF CACHE BOOL "Build examples PLUGIN filter support" FORCE) + set (H5EX_BUILD_FORTRAN OFF} CACHE BOOL "Build examples FORTRAN support" FORCE) + set (H5EX_BUILD_JAVA OFF CACHE BOOL "Build examples JAVA support" FORCE) + set (H5EX_BUILD_CPP_LIB OFF CACHE BOOL "Build examples C++ support" FORCE) + set (H5EX_BUILD_FILTERS OFF CACHE BOOL "Build examples PLUGIN filter support" FORCE) endif () #----------------------------------------------------------------------------- # Build examples #----------------------------------------------------------------------------- add_subdirectory (C) -if (HDF_BUILD_FORTRAN AND HDF5_BUILD_FORTRAN) +if (H5EX_BUILD_FORTRAN AND HDF5_BUILD_FORTRAN) add_subdirectory (FORTRAN) endif () -if (HDF_BUILD_JAVA AND HDF5_BUILD_JAVA) +if (H5EX_BUILD_JAVA AND HDF5_BUILD_JAVA) add_subdirectory (JAVA) endif () -if (HDF_BUILD_CPP_LIB AND HDF5_BUILD_CPP_LIB) +if (H5EX_BUILD_CPP_LIB AND HDF5_BUILD_CPP_LIB) add_subdirectory (CXX) endif () -if (HDF_BUILD_PYTHON) +if (H5EX_BUILD_PYTHON) add_subdirectory (PYTHON) endif () diff --git a/HDF5Examples/CMakePresets.json b/HDF5Examples/CMakePresets.json index 1dc335ea715..0f2c1b5e013 100644 --- a/HDF5Examples/CMakePresets.json +++ b/HDF5Examples/CMakePresets.json @@ -18,21 +18,21 @@ "name": "ci-StdJava", "hidden": true, "cacheVariables": { - "HDF_BUILD_JAVA": "ON" + "H5EX_BUILD_JAVA": "ON" } }, { "name": "ci-StdFortran", "hidden": true, "cacheVariables": { - "HDF_BUILD_FORTRAN": "ON" + "H5EX_BUILD_FORTRAN": "ON" } }, { "name": "ci-StdPlugins", "hidden": true, "cacheVariables": { - "HDF_BUILD_FILTERS": "ON" + "H5EX_BUILD_FILTERS": "ON" } }, { @@ -137,7 +137,7 @@ ] }, { - "name": "ci-StdShar-OSX-Clang", + "name": "ci-StdShar-MACOS-Clang", "configurePreset": "ci-StdShar-Clang", "inherits": [ "ci-x64-Release-Clang" @@ -203,11 +203,11 @@ ] }, { - "name": "ci-StdShar-OSX-Clang", + "name": "ci-StdShar-MACOS-Clang", "steps": [ {"type": "configure", "name": "ci-StdShar-Clang"}, {"type": "build", "name": "ci-StdShar-Clang"}, - {"type": "test", "name": "ci-StdShar-OSX-Clang"} + {"type": "test", "name": "ci-StdShar-MACOS-Clang"} ] }, { diff --git a/HDF5Examples/FORTRAN/H5D/CMakeLists.txt b/HDF5Examples/FORTRAN/H5D/CMakeLists.txt index b784df864a2..e7c59b420e2 100644 --- a/HDF5Examples/FORTRAN/H5D/CMakeLists.txt +++ b/HDF5Examples/FORTRAN/H5D/CMakeLists.txt @@ -10,7 +10,7 @@ project (HDF5Examples_FORTRAN_H5D Fortran) # Setup include Directories #----------------------------------------------------------------------------- set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES - "${CMAKE_Fortran_MODULE_DIRECTORY}${HDF_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" + "${CMAKE_Fortran_MODULE_DIRECTORY}${H5EX_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" ) #----------------------------------------------------------------------------- diff --git a/HDF5Examples/FORTRAN/H5G/CMakeLists.txt b/HDF5Examples/FORTRAN/H5G/CMakeLists.txt index 4b85e4eba37..6a58c5258ab 100644 --- a/HDF5Examples/FORTRAN/H5G/CMakeLists.txt +++ b/HDF5Examples/FORTRAN/H5G/CMakeLists.txt @@ -10,7 +10,7 @@ project (HDF5Examples_FORTRAN_H5G Fortran) # Setup include Directories #----------------------------------------------------------------------------- set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES - "${CMAKE_Fortran_MODULE_DIRECTORY}${HDF_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" + "${CMAKE_Fortran_MODULE_DIRECTORY}${H5EX_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" ) #----------------------------------------------------------------------------- @@ -47,7 +47,7 @@ foreach (example_name ${common_examples}) endif () endforeach () -#if (HDF_ENABLE_F2003) +#if (H5EX_ENABLE_F2003) # foreach (example_name ${f03examples}) # add_executable (${EXAMPLE_VARNAME}_f90_${example_name} ${PROJECT_SOURCE_DIR}/${example_name}.c) # target_compile_options(${EXAMPLE_VARNAME}_f90_${example_name} @@ -149,7 +149,7 @@ if (HDF5_BUILD_TOOLS) ) endif () -#if (HDF_ENABLE_F2003) +#if (H5EX_ENABLE_F2003) # foreach (example_name ${f03examples}) # add_custom_command ( # TARGET ${EXAMPLE_VARNAME}_f90_${example_name} @@ -432,7 +432,7 @@ if (H5EX_BUILD_TESTING) #ADD_H5_CMP_TEST (h5ex_g_traverse) #endif() else () - if (HDF_ENABLE_F2003) + if (H5EX_ENABLE_F2003) ADD_H5_CMP_TEST (h5ex_g_intermediate) ADD_H5_CMP_TEST (h5ex_g_iterate) # ADD_H5_CMP_TEST (h5ex_g_traverse) diff --git a/HDF5Examples/FORTRAN/H5G/Fortran_sourcefiles.cmake b/HDF5Examples/FORTRAN/H5G/Fortran_sourcefiles.cmake index e2e8e9d42b8..f5586e61f8c 100644 --- a/HDF5Examples/FORTRAN/H5G/Fortran_sourcefiles.cmake +++ b/HDF5Examples/FORTRAN/H5G/Fortran_sourcefiles.cmake @@ -23,7 +23,7 @@ if (HDF5_VERSION_STRING VERSION_GREATER_EQUAL "1.10.0") ) endif() else () - if (HDF_ENABLE_F2003) + if (H5EX_ENABLE_F2003) set (common_examples ${common_examples} h5ex_g_intermediate diff --git a/HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt b/HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt index c8301c78a5e..e1d147d434c 100644 --- a/HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt +++ b/HDF5Examples/FORTRAN/H5PAR/CMakeLists.txt @@ -10,7 +10,7 @@ project (HDF5Examples_FORTRAN_H5PAR Fortran) # Setup include Directories #----------------------------------------------------------------------------- set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES - "${CMAKE_Fortran_MODULE_DIRECTORY}${HDF_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" + "${CMAKE_Fortran_MODULE_DIRECTORY}${H5EX_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" ) #----------------------------------------------------------------------------- diff --git a/HDF5Examples/FORTRAN/H5T/CMakeLists.txt b/HDF5Examples/FORTRAN/H5T/CMakeLists.txt index 63d4dd78b7b..14fd8528664 100644 --- a/HDF5Examples/FORTRAN/H5T/CMakeLists.txt +++ b/HDF5Examples/FORTRAN/H5T/CMakeLists.txt @@ -5,7 +5,7 @@ project (HDF5Examples_FORTRAN_H5T Fortran) # Setup include Directories #----------------------------------------------------------------------------- set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES - "${CMAKE_Fortran_MODULE_DIRECTORY}${HDF_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" + "${CMAKE_Fortran_MODULE_DIRECTORY}${H5EX_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" ) #----------------------------------------------------------------------------- @@ -13,7 +13,7 @@ set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES #----------------------------------------------------------------------------- include (Fortran_sourcefiles.cmake) -#if (HDF_ENABLE_F2003) +#if (H5EX_ENABLE_F2003) foreach (example_name ${f03_examples}) add_executable (${EXAMPLE_VARNAME}_f90_${example_name} ${PROJECT_SOURCE_DIR}/${example_name}.F90) target_compile_options(${EXAMPLE_VARNAME}_f90_${example_name} @@ -126,7 +126,7 @@ if (HDF5_BUILD_TOOLS) ) endforeach () - #if (HDF_ENABLE_F2003) + #if (H5EX_ENABLE_F2003) foreach (example_name ${f03_examples}) if (NOT ${example_name} STREQUAL "h5ex_t_convert_F03") if (${example_name} STREQUAL "h5ex_t_vlen_F03" OR ${example_name} STREQUAL "h5ex_t_vlenatt_F03") @@ -404,7 +404,7 @@ if (H5EX_BUILD_TESTING) endif () endmacro () - #if (HDF_ENABLE_F2003) + #if (H5EX_ENABLE_F2003) foreach (example_name ${f03_examples} ${common_examples}) TEST_EXAMPLE (${example_name}) endforeach () diff --git a/HDF5Examples/FORTRAN/TUTR/CMakeLists.txt b/HDF5Examples/FORTRAN/TUTR/CMakeLists.txt index 4b866d4ec17..7c5b2189838 100644 --- a/HDF5Examples/FORTRAN/TUTR/CMakeLists.txt +++ b/HDF5Examples/FORTRAN/TUTR/CMakeLists.txt @@ -5,7 +5,7 @@ project (HDF5Examples_FORTRAN_TUTR Fortran) # Setup include Directories #----------------------------------------------------------------------------- set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES - "${CMAKE_Fortran_MODULE_DIRECTORY}${HDF_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" + "${CMAKE_Fortran_MODULE_DIRECTORY}${H5EX_MOD_EXT};${HDF5_F90_BINARY_DIR};${PROJECT_BINARY_DIR};${CMAKE_LIBRARY_OUTPUT_DIRECTORY}" ) #----------------------------------------------------------------------------- @@ -13,7 +13,7 @@ set_directory_properties(PROPERTIES INCLUDE_DIRECTORIES #----------------------------------------------------------------------------- include (Fortran_sourcefiles.cmake) -#if (HDF_ENABLE_F2003) +#if (H5EX_ENABLE_F2003) foreach (example_name ${f03_examples}) add_executable (${EXAMPLE_VARNAME}_f90_tutr_${example_name} ${PROJECT_SOURCE_DIR}/${example_name}.f90) target_compile_options(${EXAMPLE_VARNAME}_f90_tutr_${example_name} @@ -112,7 +112,7 @@ if (H5EX_BUILD_TESTING) set (last_test "${EXAMPLE_VARNAME}_f90_tutr_${testname}") endmacro () - #if (HDF_ENABLE_F2003) + #if (H5EX_ENABLE_F2003) foreach (example_name ${f03_examples} ${common_examples}) ADD_H5_TEST (${example_name}) endforeach () diff --git a/HDF5Examples/README.md b/HDF5Examples/README.md index 2f0090ba02c..82b3cfec46d 100644 --- a/HDF5Examples/README.md +++ b/HDF5Examples/README.md @@ -9,9 +9,7 @@ in science, engineering, and research communities worldwide. The HDF Group is the developer, maintainer, and steward of HDF5 software. Find more information about The HDF Group, the HDF5 Community, and other HDF5 software projects, -tools, and services at The HDF Group's website. - - https://www.hdfgroup.org/ +tools, and services at [The HDF Group's website](https://www.hdfgroup.org/). @@ -19,46 +17,46 @@ HELP AND SUPPORT ---------------- Information regarding Help Desk and Support services is available at - https://hdfgroup.atlassian.net/servicedesk/customer/portals + https://help.hdfgroup.org FORUM and NEWS -------------- -The following public forums are provided for public announcements and discussions +The [HDF Forum](https://forum.hdfgroup.org) is provided for public announcements and discussions of interest to the general HDF5 Community. - - Homepage of the Forum - https://forum.hdfgroup.org - - - News and Announcement + - News and Announcements https://forum.hdfgroup.org/c/news-and-announcements-from-the-hdf-group - - HDF5 and HDF4 Topics + - HDF5 Topics https://forum.hdfgroup.org/c/hdf5 These forums are provided as an open and public service for searching and reading. Posting requires completing a simple registration and allows one to join in the -conversation. Please read the following instructions pertaining to the Forum's -use and configuration - https://forum.hdfgroup.org/t/quickstart-guide-welcome-to-the-new-hdf-forum +conversation. Please read the [instructions](https://forum.hdfgroup.org/t/quickstart-guide-welcome-to-the-new-hdf-forum +) pertaining to the Forum's use and configuration. HDF5 SNAPSHOTS, PREVIOUS RELEASES AND SOURCE CODE -------------------------------------------- Full Documentation and Programming Resources for this HDF5 can be found at - https://portal.hdfgroup.org/documentation/index.html + https://support.hdfgroup.org/documentation/index.html Periodically development code snapshots are provided at the following URL: - - https://gamma.hdfgroup.org/ftp/pub/outgoing/hdf5/snapshots/ + + https://github.com/HDFGroup/hdf5/releases/tag/snapshot Source packages for current and previous releases are located at: - - https://portal.hdfgroup.org/downloads/ + + hdf5 1.14 releases: + https://support.hdfgroup.org/releases/hdf5/v1_14/index.html + + Archived releases: + https://support.hdfgroup.org/archive/support/ftp/HDF5/releases/index.html Development code is available at our Github location: - + https://github.com/HDFGroup/hdf5.git diff --git a/HDF5Examples/Using_CMake.txt b/HDF5Examples/Using_CMake.txt index 778fa7534b5..baef3565194 100644 --- a/HDF5Examples/Using_CMake.txt +++ b/HDF5Examples/Using_CMake.txt @@ -99,8 +99,8 @@ These steps are described in more detail below. is: * H5EX_BUILD_TESTING:BOOL=ON * BUILD_SHARED_LIBS:BOOL=[ON | OFF] - * HDF_BUILD_FORTRAN:BOOL=[ON | OFF] - * HDF_BUILD_JAVA:BOOL=[ON | OFF] + * H5EX_BUILD_FORTRAN:BOOL=[ON | OFF] + * H5EX_BUILD_JAVA:BOOL=[ON | OFF] if the hdf5 library was built with a namespace (i.e. "hdf5::") add: -D HDF5_NAMESPACE:STRING=hdf5:: diff --git a/HDF5Examples/config/cmake/HDFExampleMacros.cmake b/HDF5Examples/config/cmake/HDFExampleMacros.cmake index 9888c06d36a..bbb042177a3 100644 --- a/HDF5Examples/config/cmake/HDFExampleMacros.cmake +++ b/HDF5Examples/config/cmake/HDFExampleMacros.cmake @@ -34,7 +34,7 @@ macro (BASIC_SETTINGS varname) set (CMAKE_C_STANDARD 99) set (CMAKE_C_STANDARD_REQUIRED TRUE) - if (HDF_BUILD_CPP_LIB) + if (H5EX_BUILD_CPP_LIB) ENABLE_LANGUAGE (CXX) set (CMAKE_CXX_STANDARD 98) @@ -66,12 +66,12 @@ macro (BASIC_SETTINGS varname) #----------------------------------------------------------------------------- # Option to allow the user to disable compiler warnings #----------------------------------------------------------------------------- - option (HDF_DISABLE_COMPILER_WARNINGS "Disable compiler warnings" OFF) - if (HDF_DISABLE_COMPILER_WARNINGS) + option (H5EX_DISABLE_COMPILER_WARNINGS "Disable compiler warnings" OFF) + if (H5EX_DISABLE_COMPILER_WARNINGS) # MSVC uses /w to suppress warnings. It also complains if another # warning level is given, so remove it. if (MSVC) - set (HDF_WARNINGS_BLOCKED 1) + set (H5EX_WARNINGS_BLOCKED 1) string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /w") if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") @@ -84,12 +84,12 @@ macro (BASIC_SETTINGS varname) endif () # Borland uses -w- to suppress warnings. if (BORLAND) - set (HDF_WARNINGS_BLOCKED 1) + set (H5EX_WARNINGS_BLOCKED 1) set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w-") endif () # Most compilers use -w to suppress warnings. - if (NOT HDF_WARNINGS_BLOCKED) + if (NOT H5EX_WARNINGS_BLOCKED) set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") if (CMAKE_CXX_COMPILER_LOADED AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w") @@ -106,8 +106,8 @@ macro (BASIC_SETTINGS varname) endmacro () macro (PYTHON_SUPPORT) - option (HDF_BUILD_PYTHON "Test Python3 support" OFF) - if (HDF_BUILD_PYTHON) + option (H5EX_BUILD_PYTHON "Test Python3 support" OFF) + if (H5EX_BUILD_PYTHON) find_package (Python3 COMPONENTS Interpreter Development NumPy) if (Python3_FOUND AND Python3_NumPy_FOUND) include (ExternalProject) @@ -122,7 +122,7 @@ macro (PYTHON_SUPPORT) INSTALL_COMMAND python3 -m pip --no-cache-dir install -v . ) else () - set (HDF_BUILD_PYTHON OFF CACHE BOOL "Test Python3 support" FORCE) + set (H5EX_BUILD_PYTHON OFF CACHE BOOL "Test Python3 support" FORCE) message (STATUS "Python3:${Python3_FOUND} or numpy:${Python3_NumPy_FOUND} not found - disable test of Python examples") endif () endif () @@ -137,16 +137,16 @@ macro (HDF5_SUPPORT) set (FIND_HDF_COMPONENTS C shared) else () set (FIND_HDF_COMPONENTS C static) - set (HDF_BUILD_JAVA OFF CACHE BOOL "Build Java support" FORCE) + set (HDEXF_BUILD_JAVA OFF CACHE BOOL "Build Java support" FORCE) message (STATUS "Using static HDF5 - disable build of Java examples") endif () - if (HDF_BUILD_FORTRAN) + if (H5EX_BUILD_FORTRAN) set (FIND_HDF_COMPONENTS ${FIND_HDF_COMPONENTS} Fortran) endif () - if (HDF_BUILD_CPP_LIB) + if (H5EX_BUILD_CPP_LIB) set (FIND_HDF_COMPONENTS ${FIND_HDF_COMPONENTS} CXX) endif () - if (HDF_BUILD_JAVA) + if (H5EX_BUILD_JAVA) set (FIND_HDF_COMPONENTS ${FIND_HDF_COMPONENTS} Java) set (HDF5_Java_FOUND 1) #default setting for 1.10.1 and earlier endif () @@ -161,13 +161,13 @@ macro (HDF5_SUPPORT) if (NOT HDF5_static_C_FOUND AND NOT HDF5_shared_C_FOUND) #find library from non-dual-binary package set (FIND_HDF_COMPONENTS C) - if (HDF_BUILD_FORTRAN) + if (H5EX_BUILD_FORTRAN) set (FIND_HDF_COMPONENTS ${FIND_HDF_COMPONENTS} Fortran) endif () - if (HDF_BUILD_JAVA) + if (H5EX_BUILD_JAVA) set (FIND_HDF_COMPONENTS ${FIND_HDF_COMPONENTS} Java) endif () - if (HDF_BUILD_CPP_LIB) + if (H5EX_BUILD_CPP_LIB) set (FIND_HDF_COMPONENTS ${FIND_HDF_COMPONENTS} CXX) endif () message (STATUS "HDF5 find comps: ${FIND_HDF_COMPONENTS}") @@ -185,7 +185,7 @@ macro (HDF5_SUPPORT) else () set_property (TARGET ${HDF5_NAMESPACE}h5dump PROPERTY IMPORTED_LOCATION "${HDF5_TOOLS_DIR}/h5dump") endif () - if (HDF_BUILD_JAVA) + if (H5EX_BUILD_JAVA) set (CMAKE_JAVA_INCLUDE_PATH "${CMAKE_JAVA_INCLUDE_PATH};${HDF5_JAVA_INCLUDE_DIRS}") message (STATUS "HDF5 jars:${HDF5_JAVA_INCLUDE_DIRS}") endif () @@ -218,21 +218,21 @@ macro (HDF5_SUPPORT) endif() if (NOT HDF5_static_Fortran_FOUND AND NOT HDF5_shared_Fortran_FOUND) - set (HDF_BUILD_FORTRAN OFF CACHE BOOL "Build FORTRAN support" FORCE) + set (H5EX_BUILD_FORTRAN OFF CACHE BOOL "Build FORTRAN support" FORCE) message (STATUS "HDF5 Fortran libs not found - disable build of Fortran examples") else () - if (HDF_BUILD_FORTRAN AND ${HDF5_BUILD_FORTRAN}) + if (H5EX_BUILD_FORTRAN AND ${HDF5_BUILD_FORTRAN}) if (BUILD_SHARED_LIBS AND HDF5_shared_Fortran_FOUND) set (H5EX_HDF5_LINK_LIBS ${H5EX_HDF5_LINK_LIBS} ${HDF5_FORTRAN_SHARED_LIBRARY}) elseif (HDF5_static_Fortran_FOUND) set (H5EX_HDF5_LINK_LIBS ${H5EX_HDF5_LINK_LIBS} ${HDF5_FORTRAN_STATIC_LIBRARY}) else () - set (HDF_BUILD_FORTRAN OFF CACHE BOOL "Build FORTRAN support" FORCE) + set (H5EX_BUILD_FORTRAN OFF CACHE BOOL "Build FORTRAN support" FORCE) message (STATUS "HDF5 Fortran libs not found - disable build of Fortran examples") endif () endif () endif () - if (HDF_BUILD_JAVA AND HDF5_Java_FOUND) + if (H5EX_BUILD_JAVA AND HDF5_Java_FOUND) if (${HDF5_BUILD_JAVA}) set (CMAKE_JAVA_INCLUDE_PATH "${CMAKE_JAVA_INCLUDE_PATH};${HDF5_JAVA_INCLUDE_DIRS}") get_target_property (libsoname ${HDF5_JAVA_LIBRARY} IMPORTED_SONAME${UPPER_BUILD_TYPE}) @@ -243,11 +243,11 @@ macro (HDF5_SUPPORT) set (H5EX_JAVA_LIBRARIES ${HDF5_JAVA_LIBRARY}) message (STATUS "HDF5 lib:${H5EX_JAVA_LIBRARY} jars:${HDF5_JAVA_INCLUDE_DIRS}}") else () - set (HDF_BUILD_JAVA OFF CACHE BOOL "Build Java support" FORCE) + set (H5EX_BUILD_JAVA OFF CACHE BOOL "Build Java support" FORCE) message (STATUS "HDF5 Java libs not found - disable build of Java examples") endif () else () - set (HDF_BUILD_JAVA OFF CACHE BOOL "Build Java support" FORCE) + set (H5EX_BUILD_JAVA OFF CACHE BOOL "Build Java support" FORCE) endif () endif () else () @@ -283,7 +283,7 @@ macro (HDF5_SUPPORT) set (H5EX_HDF5_HAVE_HDF5 1) message (STATUS "HDF5-${HDF5_VERSION_STRING} used") endif () - if (HDF_BUILD_FORTRAN) + if (H5EX_BUILD_FORTRAN) list (APPEND H5EX_HDF5_INCLUDE_DIRS ${HDF5_INCLUDE_DIR_FORTRAN}) endif () message (STATUS "HDF5 link libs: ${H5EX_HDF5_LINK_LIBS} Includes: ${H5EX_HDF5_INCLUDE_DIRS}") diff --git a/HDF5Examples/config/cmake/cacheinit.cmake b/HDF5Examples/config/cmake/cacheinit.cmake index 3e5a8eb8c5a..0ef3a0e1b9c 100644 --- a/HDF5Examples/config/cmake/cacheinit.cmake +++ b/HDF5Examples/config/cmake/cacheinit.cmake @@ -8,17 +8,15 @@ set (BUILD_SHARED_LIBS ON CACHE BOOL "Build Shared Libraries" FORCE) set (H5EX_BUILD_TESTING ON CACHE BOOL "Build HDF5 Unit Testing" FORCE) -#set (HDF_ENABLE_PARALLEL ON CACHE BOOL "Enable parallel build (requires MPI)" FORCE) +#set (H5EX_ENABLE_PARALLEL ON CACHE BOOL "Enable parallel build (requires MPI)" FORCE) -#set (HDF_BUILD_FORTRAN ON CACHE BOOL "Build FORTRAN support" FORCE) +#set (H5EX_BUILD_FORTRAN ON CACHE BOOL "Build FORTRAN support" FORCE) -#set (HDF_BUILD_FILTERS ON CACHE BOOL "Build filter support" FORCE) +#set (H5EX_BUILD_FILTERS ON CACHE BOOL "Build filter support" FORCE) -#set (HDF_ENABLE_F2003 ON CACHE BOOL "Enable FORTRAN 2003 Standard" FORCE) +#set (H5EX_ENABLE_F2003 ON CACHE BOOL "Enable FORTRAN 2003 Standard" FORCE) -#set (HDF_ENABLE_THREADSAFE ON CACHE BOOL "Enable Threadsafety" FORCE) - -#set (HDF_BUILD_JAVA ON CACHE BOOL "Build JAVA support" FORCE) +#set (H5EX_BUILD_JAVA ON CACHE BOOL "Build JAVA support" FORCE) set (HDF5_PACKAGE_NAME "hdf5" CACHE STRING "Name of HDF5 package" FORCE) diff --git a/README.md b/README.md index f5d6a54c695..7c1173f6868 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,6 @@ HDF5 version 1.15.0 currently under development [![develop cmake build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/cmake.yml?branch=develop&label=HDF5%20develop%20CMake%20CI)](https://github.com/HDFGroup/hdf5/actions/workflows/cmake.yml?query=branch%3Adevelop) [![develop autotools build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/autotools.yml?branch=develop&label=HDF5%20develop%20Autotools%20CI)](https://github.com/HDFGroup/hdf5/actions/workflows/autotools.yml?query=branch%3Adevelop) -[![HDF-EOS5 build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/hdfeos5.yml?branch=develop&label=HDF-EOS5)](https://github.com/HDFGroup/hdf5/actions/workflows/hdfeos5.yml?query=branch%3Adevelop) [![netCDF build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/netcdf.yml?branch=develop&label=netCDF)](https://github.com/HDFGroup/hdf5/actions/workflows/netcdf.yml?query=branch%3Adevelop) [![h5py build status](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/h5py.yml?branch=develop&label=h5py)](https://github.com/HDFGroup/hdf5/actions/workflows/h5py.yml?query=branch%3Adevelop) [![CVE regression](https://img.shields.io/github/actions/workflow/status/HDFGroup/hdf5/cve.yml?branch=develop&label=CVE)](https://github.com/HDFGroup/hdf5/actions/workflows/cve.yml?query=branch%3Adevelop) @@ -104,14 +103,18 @@ in which a feature is introduced may change. SNAPSHOTS, PREVIOUS RELEASES AND SOURCE CODE -------------------------------------------- Periodically development code snapshots are provided at the following URL: - + https://github.com/HDFGroup/hdf5/releases/tag/snapshot Source packages for current and previous releases are located at: - - https://portal.hdfgroup.org/Downloads + + hdf5 1.14 releases: + https://support.hdfgroup.org/releases/hdf5/v1_14/index.html + + Archived releases: + https://support.hdfgroup.org/archive/support/ftp/HDF5/releases/index.html Development code is available at our Github location: - + https://github.com/HDFGroup/hdf5.git diff --git a/bin/cmakehdf5 b/bin/cmakehdf5 index bdd724f4a14..2ce05d12b81 100755 --- a/bin/cmakehdf5 +++ b/bin/cmakehdf5 @@ -150,7 +150,7 @@ INSTALL_HDF5() install_file=./HDF5-${version}-Linux.sh $install_file --skip-license $* ;; - Darwin) # Mac OSX DMG file + Darwin) # MacOS DMG file # These steps were a kludge. Need proper support from Cmake engineering. echo Darwin install step needs proper implementation. Quit. return 1 diff --git a/config/cmake-presets/hidden-presets.json b/config/cmake-presets/hidden-presets.json index d4e52cab752..8051eb66be0 100644 --- a/config/cmake-presets/hidden-presets.json +++ b/config/cmake-presets/hidden-presets.json @@ -10,7 +10,7 @@ "binaryDir": "${sourceParentDir}/build/${presetName}", "installDir": "${sourceParentDir}/install/${presetName}" }, - { + { "name": "ci-x64", "architecture": { "value": "x64", @@ -21,7 +21,15 @@ { "name": "ci-x86", "architecture": { - "value": "x86", + "value": "Win32", + "strategy": "external" + }, + "hidden": true + }, + { + "name": "ci-arm64", + "architecture": { + "value": "ARM64", "strategy": "external" }, "hidden": true @@ -48,25 +56,29 @@ "CMAKE_C_COMPILER": "cl", "CMAKE_CXX_COMPILER": "cl" }, - "toolset": { - "value": "host=x64", - "strategy": "external" - }, "condition": { "type": "equals", "lhs": "${hostSystemName}", "rhs": "Windows" } }, + { + "name": "ci-macos", + "hidden": true, + "cacheVariables": { + "CMAKE_OSX_ARCHITECTURES": "arm64;x86_64" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Darwin" + } + }, { "name": "ci-Clang", "hidden": true, "cacheVariables": { "CMAKE_TOOLCHAIN_FILE": "config/toolchain/clang.cmake" - }, - "toolset": { - "value": "host=x64", - "strategy": "external" } }, { @@ -79,29 +91,17 @@ "type": "equals", "lhs": "${hostSystemName}", "rhs": "Linux" - }, - "toolset": { - "value": "host=x64", - "strategy": "external" } }, { "name": "ci-Intel", - "hidden": true, - "toolset": { - "value": "host=x64", - "strategy": "external" - } + "hidden": true }, { "name": "ci-Fortran", "hidden": true, "cacheVariables": { "HDF5_BUILD_FORTRAN": "ON" - }, - "toolset": { - "value": "host=x64", - "strategy": "external" } }, { @@ -129,10 +129,6 @@ "hidden": true, "cacheVariables": { "HDF5_BUILD_JAVA": "ON" - }, - "toolset": { - "value": "host=x64", - "strategy": "external" } }, { @@ -201,6 +197,50 @@ "ci-GNUC" ] }, + { + "name": "ci-macos-Debug-Clang", + "description": "Clang/LLVM for x64 (Debug)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-macos", + "ci-Debug", + "ci-Clang" + ] + }, + { + "name": "ci-macos-Release-Clang", + "description": "Clang/LLVM for x64 (Release)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-macos", + "ci-Release", + "ci-Clang" + ] + }, + { + "name": "ci-macos-Debug-GNUC", + "description": "GNUC for x64 (Debug)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-macos", + "ci-Debug", + "ci-GNUC" + ] + }, + { + "name": "ci-macos-Release-GNUC", + "description": "GNUC for x64 (Release)", + "hidden": true, + "inherits": [ + "ci-base", + "ci-macos", + "ci-Release", + "ci-GNUC" + ] + }, { "name": "ci-x64-Debug-Intel", "description": "Intel for x64 (Debug)", @@ -328,6 +368,38 @@ "ci-base" ] }, + { + "name": "ci-macos-Debug-Clang", + "configurePreset": "ci-macos-Debug-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-Release-Clang", + "configurePreset": "ci-macos-Release-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-Debug-GNUC", + "configurePreset": "ci-macos-Debug-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-Release-GNUC", + "configurePreset": "ci-macos-Release-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, { "name": "ci-x64-Debug-Intel", "configurePreset": "ci-x64-Debug-Intel", @@ -453,6 +525,38 @@ "ci-base" ] }, + { + "name": "ci-macos-Debug-Clang", + "configurePreset": "ci-macos-Debug-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-Release-Clang", + "configurePreset": "ci-macos-Release-Clang", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-Debug-GNUC", + "configurePreset": "ci-macos-Debug-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, + { + "name": "ci-macos-Release-GNUC", + "configurePreset": "ci-macos-Release-GNUC", + "hidden": true, + "inherits": [ + "ci-base" + ] + }, { "name": "ci-x64-Debug-Intel", "configurePreset": "ci-x64-Debug-Intel", @@ -536,6 +640,18 @@ "hidden": true, "inherits": "ci-base" }, + { + "name": "ci-macos-Release-Clang", + "configurePreset": "ci-macos-Release-Clang", + "hidden": true, + "inherits": "ci-base" + }, + { + "name": "ci-macos-Release-GNUC", + "configurePreset": "ci-macos-Release-GNUC", + "hidden": true, + "inherits": "ci-base" + }, { "name": "ci-x64-Release-Intel", "configurePreset": "ci-x64-Release-Intel", diff --git a/config/cmake/CPack.Info.plist.in b/config/cmake/CPack.Info.plist.in index b936470fc29..f6e316e105b 100644 --- a/config/cmake/CPack.Info.plist.in +++ b/config/cmake/CPack.Info.plist.in @@ -1,5 +1,5 @@ - - + + CFBundleDevelopmentRegion @@ -16,11 +16,16 @@ FMWK CFBundleSignature ???? + + LSApplicationCategoryType + public.app-category.developer-tools CFBundleVersion @CPACK_PACKAGE_VERSION@ CFBundleShortVersionString @CPACK_SHORT_VERSION_STRING@ + NSHumanReadableCopyright + Copyright 2006 by The HDF Group CSResourcesFileMapped - + true diff --git a/config/cmake/FindHDFS.cmake b/config/cmake/FindHDFS.cmake index e401a94fb81..74de99a422d 100644 --- a/config/cmake/FindHDFS.cmake +++ b/config/cmake/FindHDFS.cmake @@ -7,8 +7,8 @@ # HDFS_LIBRARIES, location of libhdfs.so # HDFS_FOUND, whether HDFS is found. -exec_program($ENV{HADOOP_HOME}/bin/hadoop ARGS version OUTPUT_VARIABLE Hadoop_VERSION - RETURN_VALUE Hadoop_RETURN) +execute_process(COMMAND $ENV{HADOOP_HOME}/bin/hadoop version OUTPUT_VARIABLE Hadoop_VERSION + RESULT_VARIABLE Hadoop_RETURN) # currently only looking in HADOOP_HOME find_path(HDFS_INCLUDE_DIR hdfs.h PATHS diff --git a/config/cmake/HDF5ExampleCache.cmake b/config/cmake/HDF5ExampleCache.cmake index 7d5b7be0c40..99232cc06ca 100644 --- a/config/cmake/HDF5ExampleCache.cmake +++ b/config/cmake/HDF5ExampleCache.cmake @@ -7,13 +7,13 @@ # set example options to match build options set (H5EX_BUILD_TESTING ${BUILD_TESTING} CACHE BOOL "Enable examples testing" FORCE) set (H5EX_BUILD_EXAMPLES ${HDF5_BUILD_EXAMPLES} CACHE BOOL "Build Examples" FORCE) -set (HDF_BUILD_FORTRAN ${HDF5_BUILD_FORTRAN} CACHE BOOL "Build examples FORTRAN support" FORCE) -set (HDF_BUILD_JAVA ${HDF5_BUILD_JAVA} CACHE BOOL "Build examples JAVA support" FORCE) -set (HDF_BUILD_FILTERS ${HDF5_ENABLE_PLUGIN_SUPPORT} CACHE BOOL "Build examples PLUGIN filter support" FORCE) -set (HDF_BUILD_CPP_LIB ${HDF5_BUILD_CPP_LIB} CACHE BOOL "Build HDF5 C++ Library" FORCE) -set (HDF_BUILD_HL_LIB ${HDF5_BUILD_HL_LIB} CACHE BOOL "Build HIGH Level examples" FORCE) -set (HDF_ENABLE_THREADSAFE ${HDF5_ENABLE_THREADSAFE} CACHE BOOL "Enable examples thread-safety" FORCE) -set (HDF_ENABLE_PARALLEL ${HDF5_ENABLE_PARALLEL} CACHE BOOL "Enable examples parallel build (requires MPI)" FORCE) +set (H5EX_BUILD_FORTRAN ${HDF5_BUILD_FORTRAN} CACHE BOOL "Build examples FORTRAN support" FORCE) +set (H5EX_BUILD_JAVA ${HDF5_BUILD_JAVA} CACHE BOOL "Build examples JAVA support" FORCE) +set (H5EX_BUILD_FILTERS ${HDF5_ENABLE_PLUGIN_SUPPORT} CACHE BOOL "Build examples PLUGIN filter support" FORCE) +set (H5EX_BUILD_CPP_LIB ${HDF5_BUILD_CPP_LIB} CACHE BOOL "Build HDF5 C++ Library" FORCE) +set (H5EX_BUILD_HL_LIB ${HDF5_BUILD_HL_LIB} CACHE BOOL "Build HIGH Level examples" FORCE) +set (H5EX_ENABLE_THREADSAFE ${HDF5_ENABLE_THREADSAFE} CACHE BOOL "Enable examples thread-safety" FORCE) +set (H5EX_ENABLE_PARALLEL ${HDF5_ENABLE_PARALLEL} CACHE BOOL "Enable examples parallel build (requires MPI)" FORCE) set (H5EX_USE_GNU_DIRS ${HDF5_USE_GNU_DIRS} CACHE BOOL "ON to use GNU Coding Standard install directory variables, OFF to use historical settings" FORCE) #preset HDF5 cache vars to this projects libraries instead of searching @@ -46,7 +46,7 @@ if (NOT BUILD_SHARED_LIBS AND BUILD_STATIC_LIBS) set (H5EX_HDF5_LINK_LIBS ${HDF5_LIB_TARGET} CACHE STRING "HDF5 target" FORCE) if (HDF5_BUILD_FORTRAN) set (H5EX_HDF5_LINK_LIBS ${H5EX_HDF5_LINK_LIBS} ${HDF5_F90_LIB_TARGET}) - set (HDF_MOD_EXT "/static" CACHE STRING "Use Static Modules for Examples" FORCE) + set (H5EX_MOD_EXT "/static" CACHE STRING "Use Static Modules for Examples" FORCE) endif () if (HDF5_BUILD_CPP_LIB) set (H5EX_HDF5_LINK_LIBS ${H5EX_HDF5_LINK_LIBS} ${HDF5_CPP_LIB_TARGET}) @@ -56,7 +56,7 @@ else () set (H5EX_HDF5_LINK_LIBS ${HDF5_LIBSH_TARGET} CACHE STRING "HDF5 target" FORCE) if (HDF5_BUILD_FORTRAN) set (H5EX_HDF5_LINK_LIBS ${H5EX_HDF5_LINK_LIBS} ${HDF5_F90_LIBSH_TARGET}) - set (HDF_MOD_EXT "/shared" CACHE STRING "Use Shared Modules for Examples" FORCE) + set (H5EX_MOD_EXT "/shared" CACHE STRING "Use Shared Modules for Examples" FORCE) endif () if (HDF5_BUILD_CPP_LIB) set (H5EX_HDF5_LINK_LIBS ${H5EX_HDF5_LINK_LIBS} ${HDF5_CPP_LIBSH_TARGET}) diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake index d8a444b84d2..e3364826ba6 100644 --- a/config/cmake/HDFCompilerFlags.cmake +++ b/config/cmake/HDFCompilerFlags.cmake @@ -54,46 +54,8 @@ if (CMAKE_C_COMPILER_ID STREQUAL "NVHPC" ) set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s") endif () else () - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -g") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Mbounds -gopt") endif () - - # With at least NVHPC 23.5 - 23.9, compiling with -O2 or higher and -DNDEBUG - # appears to have issues that manifest in the tests as incorrect metadata - # checksums being read or memory being corrupted. Compiling without -DNDEBUG - # does not appear to have these issues, but is not ideal due to compiling in - # asserts and other library debug code. Compiling with -O1 also does not appear - # to have these issues, so set maximum optimization level to -O1 for now until - # it can be determined whether these issues are compiler-specific or issues - # in the library. - set (cmake_c_flags_minsizerel_edited "${CMAKE_C_FLAGS_MINSIZEREL}") - string (REPLACE "-O2" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") - string (REPLACE "-O3" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") - string (REPLACE "-O4" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") - string (REPLACE "-Ofast" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") - string (REPLACE "-fast" "" cmake_c_flags_minsizerel_edited "${cmake_c_flags_minsizerel_edited}") - string (STRIP "${cmake_c_flags_minsizerel_edited}" cmake_c_flags_minsizerel_edited) - string (PREPEND cmake_c_flags_minsizerel_edited "-O1 ") - set (CMAKE_C_FLAGS_MINSIZEREL "${cmake_c_flags_minsizerel_edited}") - - set (cmake_c_flags_release_edited "${CMAKE_C_FLAGS_RELEASE}") - string (REPLACE "-O2" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") - string (REPLACE "-O3" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") - string (REPLACE "-O4" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") - string (REPLACE "-Ofast" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") - string (REPLACE "-fast" "" cmake_c_flags_release_edited "${cmake_c_flags_release_edited}") - string (STRIP "${cmake_c_flags_release_edited}" cmake_c_flags_release_edited) - string (PREPEND cmake_c_flags_release_edited "-O1 ") - set (CMAKE_C_FLAGS_RELEASE "${cmake_c_flags_release_edited}") - - set (cmake_c_flags_relwithdebinfo_edited "${CMAKE_C_FLAGS_RELWITHDEBINFO}") - string (REPLACE "-O2" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") - string (REPLACE "-O3" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") - string (REPLACE "-O4" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") - string (REPLACE "-Ofast" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") - string (REPLACE "-fast" "" cmake_c_flags_relwithdebinfo_edited "${cmake_c_flags_relwithdebinfo_edited}") - string (STRIP "${cmake_c_flags_relwithdebinfo_edited}" cmake_c_flags_relwithdebinfo_edited) - string (PREPEND cmake_c_flags_relwithdebinfo_edited "-O1 ") - set (CMAKE_C_FLAGS_RELWITHDEBINFO "${cmake_c_flags_relwithdebinfo_edited}") endif () if (CMAKE_C_COMPILER_ID STREQUAL "GNU") diff --git a/config/cmake/HDFMacros.cmake b/config/cmake/HDFMacros.cmake index 3545d4e9ff2..3be3e6a6a60 100644 --- a/config/cmake/HDFMacros.cmake +++ b/config/cmake/HDFMacros.cmake @@ -327,8 +327,10 @@ macro (HDF_README_PROPERTIES target_fortran) set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2019") elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "^19.3.*") set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2022") + elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "^19.4.*") + set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2022") else () - set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO ???") + set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO ????") endif () else () set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO ${CMAKE_C_COMPILER_VERSION}") diff --git a/config/cmake/README.md.cmake.in b/config/cmake/README.md.cmake.in index 7f6af3646a2..40294c15255 100644 --- a/config/cmake/README.md.cmake.in +++ b/config/cmake/README.md.cmake.in @@ -75,6 +75,6 @@ For more information see USING_CMake_Examples.txt in the install folder. =========================================================================== Documentation for this release can be found at the following URL: - https://portal.hdfgroup.org/documentation/index.html#hdf5 + https://support.hdfgroup.org/releases/hdf5/@${H5_VERS_MAJOR}@_@${H5_VERS_MINOR}@/@${H5_VERS_MAJOR}@_@${H5_VERS_MINOR}@_@${H5_VERS_RELEASE}@/documentation/doxygen/index.html Bugs should be reported to help@hdfgroup.org. diff --git a/config/cmake/SignPackageFiles.cmake b/config/cmake/SignPackageFiles.cmake new file mode 100644 index 00000000000..81cc5bfff21 --- /dev/null +++ b/config/cmake/SignPackageFiles.cmake @@ -0,0 +1,43 @@ +# This script signs the targets for the package +message(STATUS "Signing script in ${CPACK_TEMPORARY_INSTALL_DIRECTORY} and ${CPACK_PACKAGE_INSTALL_DIRECTORY}") + +# RPM needs ALL_COMPONENTS_IN_ONE added to path between ${CPACK_TEMPORARY_INSTALL_DIRECTORY} and ${CPACK_PACKAGE_INSTALL_DIRECTORY} +if (CPACK_GENERATOR MATCHES "RPM") + set (CPACK_TARGET_FILE_DIRECTORY "${CPACK_TEMPORARY_INSTALL_DIRECTORY}/ALL_COMPONENTS_IN_ONE/${CPACK_PACKAGE_INSTALL_DIRECTORY}") +elseif (CPACK_GENERATOR MATCHES "WIX" OR CPACK_GENERATOR MATCHES "NSIS") + set (CPACK_TARGET_FILE_DIRECTORY "${CPACK_TEMPORARY_INSTALL_DIRECTORY}/libraries") +elseif (CPACK_GENERATOR MATCHES "ZIP") + set (CPACK_TARGET_FILE_DIRECTORY "${CPACK_TEMPORARY_INSTALL_DIRECTORY}") +elseif (CPACK_GENERATOR MATCHES "DragNDrop") + set (CPACK_TARGET_FILE_DIRECTORY "${CPACK_TEMPORARY_INSTALL_DIRECTORY}/ALL_IN_ONE/${CPACK_PACKAGE_INSTALL_DIRECTORY}") +else () + set (CPACK_TARGET_FILE_DIRECTORY "${CPACK_TEMPORARY_INSTALL_DIRECTORY}/${CPACK_PACKAGE_INSTALL_DIRECTORY}") +endif () +file (GLOB target_list LIST_DIRECTORIES false "${CPACK_TARGET_FILE_DIRECTORY}/lib/*" "${CPACK_TARGET_FILE_DIRECTORY}/bin/*" "${CPACK_TARGET_FILE_DIRECTORY}/lib/plugin/*") +foreach (targetfile IN LISTS target_list) + if (WIN32) + # Sign the targets + execute_process (COMMAND $ENV{SIGNTOOLDIR}/signtool + sign /v /debug /fd SHA256 /tr http://timestamp.acs.microsoft.com /td SHA256 + /dlib "Microsoft.Trusted.Signing.Client/bin/x64/Azure.CodeSigning.Dlib.dll" /dmdf ${CPACK_ORIG_SOURCE_DIR}/credentials.json + ${targetfile} + ) + execute_process ( + COMMAND ${CMAKE_COMMAND} -E echo "Signing the target ${targetfile}" + ) + elseif (APPLE) + # Sign the targets + execute_process (COMMAND codesign + --force --timestamp --options runtime --entitlements ${CPACK_ORIG_SOURCE_DIR}/config/cmake/distribution.entitlements + --verbose=4 --strict --sign "$ENV{SIGNER}" + ${targetfile} + ) + execute_process ( + COMMAND ${CMAKE_COMMAND} -E echo "Signing the target ${targetfile}" + ) + else () + execute_process ( + COMMAND ${CMAKE_COMMAND} -E echo "Signing the target ${targetfile}" + ) + endif () +endforeach () diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake index 4e56a1d5383..75936d1d298 100644 --- a/config/cmake/cacheinit.cmake +++ b/config/cmake/cacheinit.cmake @@ -130,6 +130,14 @@ set (BLOSC2_TGZ_NAME "c-blosc2-2.14.4.tar.gz" CACHE STRING "Use BLOSC2 from comp set (BLOSC2_PACKAGE_NAME "blosc2" CACHE STRING "Name of BLOSC2 package" FORCE) +set (BLOSC2_ZLIB_GIT_URL "https://github.com/madler/zlib.git" CACHE STRING "Use ZLIB from GitHub repository" FORCE) +set (BLOSC2_ZLIB_GIT_BRANCH "develop" CACHE STRING "" FORCE) + +set (BLOSC2_ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.3" CACHE STRING "Use PLUGINS from original location" FORCE) +set (BLOSC2_ZLIB_TGZ_NAME "zlib-1.3.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE) + +set (BLOSC2_ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of BLOSC2_ZLIB package" FORCE) + ######## # bzip2 ######## diff --git a/config/cmake/distribution.entitlements b/config/cmake/distribution.entitlements new file mode 100644 index 00000000000..0e0df6c7627 --- /dev/null +++ b/config/cmake/distribution.entitlements @@ -0,0 +1,16 @@ + + + + + com.apple.security.cs.allow-jit + + com.apple.security.cs.allow-unsigned-executable-memory + + com.apple.security.cs.disable-executable-page-protection + + com.apple.security.cs.disable-library-validation + + com.apple.security.cs.allow-dyld-environment-variables + + + diff --git a/config/cmake/examples/CTestScript.cmake b/config/cmake/examples/CTestScript.cmake index 657806ce3c2..b1bfa8a9fc1 100644 --- a/config/cmake/examples/CTestScript.cmake +++ b/config/cmake/examples/CTestScript.cmake @@ -137,7 +137,7 @@ set (CTEST_CONFIGURE_COMMAND #----------------------------------------------------------------------------- ## -- set output to english -set ($ENV{LC_MESSAGES} "en_EN") +set (ENV{LC_MESSAGES} "en_EN") #----------------------------------------------------------------------------- configure_file(${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake) diff --git a/config/cmake/examples/HDF5_Examples.cmake.in b/config/cmake/examples/HDF5_Examples.cmake.in index d5a6051d346..962bfea147a 100644 --- a/config/cmake/examples/HDF5_Examples.cmake.in +++ b/config/cmake/examples/HDF5_Examples.cmake.in @@ -42,6 +42,14 @@ if(DEFINED CTEST_SCRIPT_ARG) endforeach() endif() +if(${CTEST_VSVERS} STREQUAL "64_VS2022") # 64-bit Visual Studio 2022 + set(CTEST_CMAKE_GENERATOR "Visual Studio 17 2022") + set(CMAKE_GENERATOR_ARCHITECTURE "x64") +elseif(${VS_VERS} STREQUAL "64_VS2019") # 64-bit Visual Studio 2019 + set(CTEST_CMAKE_GENERATOR "Visual Studio 16 2019") + set(CMAKE_GENERATOR_ARCHITECTURE "x64") +endif() + ################################################################### ### Following Line is one of [Release, RelWithDebInfo, Debug] ##### set(CTEST_CONFIGURATION_TYPE "$ENV{CMAKE_CONFIG_TYPE}") diff --git a/config/cmake/examples/HDF5_Examples_options.cmake b/config/cmake/examples/HDF5_Examples_options.cmake index 684ec5bf641..2fe145c4704 100644 --- a/config/cmake/examples/HDF5_Examples_options.cmake +++ b/config/cmake/examples/HDF5_Examples_options.cmake @@ -14,14 +14,13 @@ #### format: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ") ### #### DEFAULT: ### #### BUILD_SHARED_LIBS:BOOL=OFF ### -#### HDF_BUILD_C:BOOL=ON ### -#### HDF_BUILD_CXX:BOOL=OFF ### -#### HDF_BUILD_FORTRAN:BOOL=OFF ### -#### HDF_BUILD_JAVA:BOOL=OFF ### -#### HDF_BUILD_FILTERS:BOOL=OFF ### -#### BUILD_TESTING:BOOL=OFF ### -#### HDF_ENABLE_PARALLEL:BOOL=OFF ### -#### HDF_ENABLE_THREADSAFE:BOOL=OFF ### +#### H5EX_BUILD_C:BOOL=ON ### +#### H5EX_BUILD_CXX:BOOL=OFF ### +#### H5EX_BUILD_FORTRAN:BOOL=OFF ### +#### H5EX_BUILD_JAVA:BOOL=OFF ### +#### H5EX_BUILD_FILTERS:BOOL=OFF ### +#### H5EX_BUILD_TESTING:BOOL=OFF ### +#### H5EX_ENABLE_PARALLEL:BOOL=OFF ### ############################################################################################# ### uncomment/comment and change the following lines for other configuration options @@ -44,38 +43,34 @@ ############################################################################################# #### languages #### ### disable C builds -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_C:BOOL=OFF") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DH5EX_BUILD_C:BOOL=OFF") ### enable C++ builds -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_CXX:BOOL=ON") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DH5EX_BUILD_CXX:BOOL=ON") ### enable Fortran builds -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=ON") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DH5EX_BUILD_FORTRAN:BOOL=ON") ### enable JAVA builds -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=ON") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DH5EX_BUILD_JAVA:BOOL=ON") ############################################################################################# ### enable FILTERS builds -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_FILTERS:BOOL=ON") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DH5EX_BUILD_FILTERS:BOOL=ON") ### default HDF5_PLUGIN_PATH to where the filter libraries are located #set(ENV{HDF5_PLUGIN_PATH} "${INSTALLDIR}/lib/plugin") ############################################################################################# ### enable parallel program builds -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_ENABLE_PARALLEL:BOOL=ON") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DH5EX_ENABLE_PARALLEL:BOOL=ON") ############################################################################################# ### match the hdf5 library namespace set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_NAMESPACE:STRING=hdf5::") -############################################################################################# -### enable threadsafe program builds -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_ENABLE_THREADSAFE:BOOL=ON") - ############################################################################################# ### enable test program builds, requires reference files in testfiles subdirectory -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_TESTING:BOOL=ON") -#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCOMPARE_TESTING:BOOL=ON") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DH5EX_BUILD_TESTING:BOOL=ON") +#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DH5EX_COMPARE_TESTING:BOOL=ON") ############################################################################################# diff --git a/config/cmake/grepTest.cmake b/config/cmake/grepTest.cmake index 529649e4e7c..44aa1975510 100644 --- a/config/cmake/grepTest.cmake +++ b/config/cmake/grepTest.cmake @@ -116,7 +116,7 @@ if (TEST_ERRREF) RESULT_VARIABLE TEST_ERRREF_RESULT ) endif () - message (FATAL_ERROR "Failed: The error output of ${TEST_PROGRAM} did not contain ${TEST_ERRREF}") + message (FATAL_ERROR "Failed: The error output of ${TEST_PROGRAM} did not contain '${TEST_ERRREF}'. Error output was: '${TEST_ERR_STREAM}'") endif () endif () endif () diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in index 5ee4d85e4d3..325492ca9ec 100644 --- a/config/cmake/hdf5-config.cmake.in +++ b/config/cmake/hdf5-config.cmake.in @@ -92,7 +92,7 @@ if (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL) find_package(MPI QUIET REQUIRED) endif () -if (${HDF5_PACKAGE_NAME}_ENABLE_THREADSAFE OR ${HDF5_PACKAGE_NAME}_ENABLE_SUBFILING_VFD) +if (${HDF5_PACKAGE_NAME}_ENABLE_THREADS) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads QUIET REQUIRED) endif () diff --git a/config/cmake/scripts/CTestScript.cmake b/config/cmake/scripts/CTestScript.cmake index ad6cd44014b..46037f573b1 100644 --- a/config/cmake/scripts/CTestScript.cmake +++ b/config/cmake/scripts/CTestScript.cmake @@ -51,15 +51,15 @@ endif () set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}") # Launchers work only with Makefile and Ninja generators. -if(NOT "${CTEST_CMAKE_GENERATOR}" MATCHES "Make|Ninja" OR LOCAL_SKIP_TEST) - set(CTEST_USE_LAUNCHERS 0) - set(ENV{CTEST_USE_LAUNCHERS_DEFAULT} 0) - set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=OFF") -else() - set(CTEST_USE_LAUNCHERS 1) - set(ENV{CTEST_USE_LAUNCHERS_DEFAULT} 1) - set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON") -endif() +if (NOT "${CTEST_CMAKE_GENERATOR}" MATCHES "Make|Ninja" OR LOCAL_SKIP_TEST) + set (CTEST_USE_LAUNCHERS 0) + set (ENV{CTEST_USE_LAUNCHERS_DEFAULT} 0) + set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=OFF") +else () + set (CTEST_USE_LAUNCHERS 1) + set (ENV{CTEST_USE_LAUNCHERS_DEFAULT} 1) + set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON") +endif () #----------------------------------------------------------------------------- # MAC machines need special option @@ -206,7 +206,7 @@ endif () #----------------------------------------------------------------------------- ## -- set output to english -set ($ENV{LC_MESSAGES} "en_EN") +set (ENV{LC_MESSAGES} "en_EN") # Print summary information. foreach (v diff --git a/config/commence.am b/config/commence.am index 1f0dc2edda9..05e4b653d89 100644 --- a/config/commence.am +++ b/config/commence.am @@ -37,7 +37,7 @@ LIBH5CPP=$(top_builddir)/c++/src/libhdf5_cpp.la LIBH5JNI=$(top_builddir)/java/src/jni/libhdf5_java.la LIBH5TOOLS=$(top_builddir)/tools/lib/libh5tools.la LIBH5_HL=$(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL=$(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la +LIBH5F_HL=$(top_builddir)/hl/fortran/src/libhdf5_hl_fortran.la LIBH5CPP_HL=$(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la # Install directories that automake doesn't know about diff --git a/config/lt_vers.am b/config/lt_vers.am index fade5bf1594..7ef44760918 100644 --- a/config/lt_vers.am +++ b/config/lt_vers.am @@ -19,17 +19,18 @@ LT_VERS_INTERFACE = 1000 LT_VERS_AGE = 0 LT_VERS_REVISION = 0 -## If the API changes *at all*, increment LT_VERS_INTERFACE and +## 1. If the API changes *at all*, increment LT_VERS_INTERFACE and ## reset LT_VERS_REVISION to 0. ## -## If the API changes but no function signatures are removed or +## 2. If the API changes but no function signatures are removed or ## changed, also increment LT_VERS_AGE. -## If any functions are removed from the API, or their signatures +## +## 3. If any functions are removed from the API, or their signatures ## are changed reset LT_VERS_AGE to 0 to indicate that previous ## versions of the API are not necessarily compatible with this ## version. ## -## If the source changes but there are no API changes, increment +## 4. If the source changes but there are no API changes, increment ## LT_VERS_REVISION. This will happen automatically when ## bin/h5vers is run, but doing it manually shouldn't hurt ## anything. diff --git a/config/nvidia-flags b/config/nvidia-flags index c140edd9830..39bca831314 100644 --- a/config/nvidia-flags +++ b/config/nvidia-flags @@ -92,7 +92,7 @@ if test "X-nvc" = "X-$cc_vendor" -o "X-nvcc" = "X-$cc_vendor"; then ########### NO_SYMBOLS_CFLAGS="-s" - SYMBOLS_CFLAGS="-g" + SYMBOLS_CFLAGS="-gopt" ############# # Profiling # @@ -106,9 +106,8 @@ if test "X-nvc" = "X-$cc_vendor" -o "X-nvcc" = "X-$cc_vendor"; then # Optimization # ################ - HIGH_OPT_CFLAGS="-O1" # -O2+ currently has test failures. - #DEBUG_OPT_CFLAGS="-gopt -O2" - DEBUG_OPT_CFLAGS="-gopt -O1" # -O2+ currently has test failures. + HIGH_OPT_CFLAGS="-O3" + DEBUG_OPT_CFLAGS="-O1" # -O0 can be very slow NO_OPT_CFLAGS="-O0" ################# diff --git a/config/toolchain/mingw64.cmake b/config/toolchain/mingw64.cmake index 1b138919087..d4d2e4e0532 100644 --- a/config/toolchain/mingw64.cmake +++ b/config/toolchain/mingw64.cmake @@ -1,4 +1,4 @@ -set(TOOLCHAIN_PREFIX x86_64-w64-mingw32) +set (TOOLCHAIN_PREFIX x86_64-w64-mingw32) set (CMAKE_SYSTEM_NAME Windows) set (CMAKE_C_COMPILER ${TOOLCHAIN_PREFIX}-gcc) set (CMAKE_CXX_COMPILER ${TOOLCHAIN_PREFIX}-g++) diff --git a/configure.ac b/configure.ac index d2406ed81d2..c51e19ddca3 100644 --- a/configure.ac +++ b/configure.ac @@ -3833,10 +3833,10 @@ AC_DEFINE_UNQUOTED([DEFAULT_PLUGINDIR], ["$default_plugindir"], ## for the speed optimization of hard conversions. Soft conversions can ## actually benefit little. ## -AC_MSG_CHECKING([whether exception handling functions is checked during data conversions]) +AC_MSG_CHECKING([whether exception handling functions are checked during data conversions]) AC_ARG_ENABLE([dconv-exception], [AS_HELP_STRING([--enable-dconv-exception], - [if exception handling functions is checked during + [Check exception handling functions during data conversions [default=yes]])], [DCONV_EXCEPTION=$enableval], [DCONV_EXCEPTION=yes]) @@ -3857,7 +3857,7 @@ fi AC_MSG_CHECKING([whether data accuracy is guaranteed during data conversions]) AC_ARG_ENABLE([dconv-accuracy], [AS_HELP_STRING([--enable-dconv-accuracy], - [if data accuracy is guaranteed during + [Guarantee data accuracy during data conversions [default=yes]])], [DATA_ACCURACY=$enableval], [DATA_ACCURACY=yes]) diff --git a/doc/img/release-schedule.plantuml b/doc/img/release-schedule.plantuml index 741160e31d3..d0fd79b1426 100644 --- a/doc/img/release-schedule.plantuml +++ b/doc/img/release-schedule.plantuml @@ -23,14 +23,14 @@ Project starts 2023-01-01 [1.12.3] happens 2023-11-30 [1.12] is colored in #88CCEE -[1.14] starts at 2023-01-01 and lasts 122 weeks +[1.14] starts at 2023-01-01 and lasts 118 weeks [1.14.1] happens at 2023-04-30 [1.14.2] happens at 2023-08-31 [1.14.3] happens at 2023-10-31 [1.14.4.2] happens at 2024-04-15 [1.14.4.3] happens at 2024-05-22 [1.14.5] happens at 2024-09-30 -[1.14.6] happens at 2025-04-30 +[1.14.6] happens at 2025-03-31 [1.14.1] displays on same row as [1.14.1] [1.14.2] displays on same row as [1.14.1] [1.14.3] displays on same row as [1.14.1] @@ -39,8 +39,8 @@ Project starts 2023-01-01 [1.14.6] displays on same row as [1.14.1] [1.14] is colored in #B187CF -[1.16] starts at 2025-04-30 and lasts 35 weeks -[1.16.0] happens at 2025-04-30 +[1.16] starts at 2025-03-31 and lasts 35 weeks +[1.16.0] happens at 2025-03-31 [1.16.1] happens at 2025-09-30 [1.16.1] displays on same row as [1.16.0] [1.16] is colored in #02BFA0 diff --git a/doc/img/release-schedule.png b/doc/img/release-schedule.png index 82e1cf5d495..20a0a55986d 100644 Binary files a/doc/img/release-schedule.png and b/doc/img/release-schedule.png differ diff --git a/doc/parallel-compression.md b/doc/parallel-compression.md index 48ed4c3c37d..9879f6efc91 100644 --- a/doc/parallel-compression.md +++ b/doc/parallel-compression.md @@ -64,9 +64,9 @@ H5Dwrite(..., dxpl_id, ...); The following are two simple examples of using the parallel compression feature: -[ph5_filtered_writes.c](https://github.com/HDFGroup/hdf5/blob/develop/HDF5Examples/C/H5PAR/ph5_filtered_writes.c) +[ph5_filtered_writes.c][u1] -[ph5_filtered_writes_no_sel.c](https://github.com/HDFGroup/hdf5/blob/develop/HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c) +[ph5_filtered_writes_no_sel.c][u2] The former contains simple examples of using the parallel compression feature to write to compressed datasets, while the @@ -79,7 +79,7 @@ participate in the collective write call. ## Multi-dataset I/O support The parallel compression feature is supported when using the -multi-dataset I/O API routines ([H5Dwrite_multi](https://hdfgroup.github.io/hdf5/develop/group___h5_d.html#gaf6213bf3a876c1741810037ff2bb85d8)/[H5Dread_multi](https://hdfgroup.github.io/hdf5/develop/group___h5_d.html#ga8eb1c838aff79a17de385d0707709915)), but the +multi-dataset I/O API routines ([H5Dwrite_multi][u3]/[H5Dread_multi][u4]), but the following should be kept in mind: - Parallel writes to filtered datasets **must** still be collective, @@ -99,7 +99,7 @@ following should be kept in mind: ## Incremental file space allocation support -HDF5's [file space allocation time](https://hdfgroup.github.io/hdf5/develop/group___d_c_p_l.html#ga85faefca58387bba409b65c470d7d851) +HDF5's [file space allocation time][u5] is a dataset creation property that can have significant effects on application performance, especially if the application uses parallel HDF5. In a serial HDF5 application, the default file space @@ -118,7 +118,7 @@ While this strategy has worked in the past, it has some noticeable drawbacks. For one, the larger the chunked dataset being created, the more noticeable overhead there will be during dataset creation as all of the data chunks are being allocated in the HDF5 file. -Further, these data chunks will, by default, be [filled](https://hdfgroup.github.io/hdf5/develop/group___d_c_p_l.html#ga4335bb45b35386daa837b4ff1b9cd4a4) +Further, these data chunks will, by default, be [filled][u6] with HDF5's default fill data value, leading to extraordinary dataset creation overhead and resulting in pre-filling large portions of a dataset that the application might have been planning @@ -126,12 +126,12 @@ to overwrite anyway. Even worse, there will be more initial overhead from compressing that fill data before writing it out, only to have it read back in, unfiltered and modified the first time a chunk is written to. In the past, it was typically suggested that parallel -HDF5 applications should use [H5Pset_fill_time](https://hdfgroup.github.io/hdf5/develop/group___d_c_p_l.html#ga6bd822266b31f86551a9a1d79601b6a2) +HDF5 applications should use [H5Pset_fill_time][u7] with a value of `H5D_FILL_TIME_NEVER` in order to disable writing of the fill value to dataset chunks, but this isn't ideal if the application actually wishes to make use of fill values. -With [improvements made](https://www.hdfgroup.org/2022/03/parallel-compression-improvements-in-hdf5-1-13-1/) +With [improvements made][u8] to the parallel compression feature for the HDF5 1.13.1 release, "incremental" file space allocation is now the default for datasets created in parallel *only if they have filters applied to them*. @@ -154,7 +154,7 @@ optimal performance out of the parallel compression feature. ### Begin with a good chunking strategy -[Starting with a good chunking strategy](https://portal.hdfgroup.org/documentation/hdf5-docs/chunking_in_hdf5.html) +[Starting with a good chunking strategy][u9] will generally have the largest impact on overall application performance. The different chunking parameters can be difficult to fine-tune, but it is essential to start with a well-performing @@ -166,7 +166,7 @@ chosen chunk size becomes a very important factor when compression is involved, as data chunks have to be completely read and re-written to perform partial writes to the chunk. -[Improving I/O performance with HDF5 compressed datasets](https://docs.hdfgroup.org/archive/support/HDF5/doc/TechNotes/TechNote-HDF5-ImprovingIOPerformanceCompressedDatasets.pdf) +[Improving I/O performance with HDF5 compressed datasets][u10] is a useful reference for more information on getting good performance when using a chunked dataset layout. @@ -220,14 +220,14 @@ chunks to end up at addresses in the file that do not align well with the underlying file system, possibly leading to poor performance. As an example, Lustre performance is generally good when writes are aligned with the chosen stripe size. -The HDF5 application can use [H5Pset_alignment](https://hdfgroup.github.io/hdf5/develop/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a) +The HDF5 application can use [H5Pset_alignment][u11] to have a bit more control over where objects in the HDF5 file end up. However, do note that setting the alignment of objects generally wastes space in the file and has the potential to dramatically increase its resulting size, so caution should be used when choosing the alignment parameters. -[H5Pset_alignment](https://hdfgroup.github.io/hdf5/develop/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a) +[H5Pset_alignment][u11] has two parameters that control the alignment of objects in the HDF5 file, the "threshold" value and the alignment value. The threshold value specifies that any object greater @@ -264,19 +264,19 @@ in a file, this can create significant amounts of free space in the file over its lifetime and eventually cause performance issues. -An HDF5 application can use [H5Pset_file_space_strategy](https://hdfgroup.github.io/hdf5/develop/group___f_c_p_l.html#ga167ff65f392ca3b7f1933b1cee1b9f70) +An HDF5 application can use [H5Pset_file_space_strategy][u12] with a value of `H5F_FSPACE_STRATEGY_PAGE` to enable the paged aggregation feature, which can accumulate metadata and raw data for dataset data chunks into well-aligned, configurably sized "pages" for better performance. However, note that using the paged aggregation feature will cause any setting from -[H5Pset_alignment](https://hdfgroup.github.io/hdf5/develop/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a) +[H5Pset_alignment][u11] to be ignored. While an application should be able to get -comparable performance effects by [setting the size of these pages](https://hdfgroup.github.io/hdf5/develop/group___f_c_p_l.html#gad012d7f3c2f1e1999eb1770aae3a4963) to be equal to the value that -would have been set for [H5Pset_alignment](https://hdfgroup.github.io/hdf5/develop/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a), +comparable performance effects by [setting the size of these pages][u13] +to be equal to the value that would have been set for [H5Pset_alignment][u11], this may not necessarily be the case and should be studied. -Note that [H5Pset_file_space_strategy](https://hdfgroup.github.io/hdf5/develop/group___f_c_p_l.html#ga167ff65f392ca3b7f1933b1cee1b9f70) +Note that [H5Pset_file_space_strategy][u12] has a `persist` parameter. This determines whether or not the file free space manager should include extra metadata in the HDF5 file about free space sections in the file. If this @@ -300,12 +300,12 @@ hid_t file_id = H5Fcreate("file.h5", H5F_ACC_TRUNC, fcpl_id, fapl_id); While the parallel compression feature requires that the HDF5 application set and maintain collective I/O at the application -interface level (via [H5Pset_dxpl_mpio](https://hdfgroup.github.io/hdf5/develop/group___d_x_p_l.html#ga001a22b64f60b815abf5de8b4776f09e)), +interface level (via [H5Pset_dxpl_mpio][u14]), it does not require that the actual MPI I/O that occurs at the lowest layers of HDF5 be collective; independent I/O may perform better depending on the application I/O patterns and parallel file system performance, among other factors. The -application may use [H5Pset_dxpl_mpio_collective_opt](https://hdfgroup.github.io/hdf5/develop/group___d_x_p_l.html#gacb30d14d1791ec7ff9ee73aa148a51a3) +application may use [H5Pset_dxpl_mpio_collective_opt][u15] to control this setting and see which I/O method provides the best performance. @@ -318,7 +318,7 @@ H5Dwrite(..., dxpl_id, ...); ### Runtime HDF5 Library version -An HDF5 application can use the [H5Pset_libver_bounds](https://hdfgroup.github.io/hdf5/develop/group___f_a_p_l.html#gacbe1724e7f70cd17ed687417a1d2a910) +An HDF5 application can use the [H5Pset_libver_bounds][u16] routine to set the upper and lower bounds on library versions to use when creating HDF5 objects. For parallel compression specifically, setting the library version to the latest available @@ -332,3 +332,20 @@ H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); hid_t file_id = H5Fcreate("file.h5", H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); ... ``` + +[u1]: https://github.com/HDFGroup/hdf5/blob/develop/HDF5Examples/C/H5PAR/ph5_filtered_writes.c +[u2]: https://github.com/HDFGroup/hdf5/blob/develop/HDF5Examples/C/H5PAR/ph5_filtered_writes_no_sel.c +[u3]: https://hdfgroup.github.io/hdf5/develop/group___h5_d.html#gaf6213bf3a876c1741810037ff2bb85d8 +[u4]: https://hdfgroup.github.io/hdf5/develop/group___h5_d.html#ga8eb1c838aff79a17de385d0707709915 +[u5]: https://hdfgroup.github.io/hdf5/develop/group___d_c_p_l.html#ga85faefca58387bba409b65c470d7d851 +[u6]: https://hdfgroup.github.io/hdf5/develop/group___d_c_p_l.html#ga4335bb45b35386daa837b4ff1b9cd4a4 +[u7]: https://hdfgroup.github.io/hdf5/develop/group___d_c_p_l.html#ga6bd822266b31f86551a9a1d79601b6a2 +[u8]: https://www.hdfgroup.org/2022/03/04/parallel-compression-improvements-in-hdf5-1-13-1/ +[u9]: https://support.hdfgroup.org/releases/hdf5/documentation/advanced_topics/chunking_in_hdf5.md +[u10]: https://support.hdfgroup.org/releases/hdf5/documentation/hdf5_topics/HDF5ImprovingIOPerformanceCompressedDatasets.pdf +[u11]: https://hdfgroup.github.io/hdf5/develop/group___f_a_p_l.html#gab99d5af749aeb3896fd9e3ceb273677a +[u12]: https://hdfgroup.github.io/hdf5/develop/group___f_c_p_l.html#ga167ff65f392ca3b7f1933b1cee1b9f70 +[u13]: https://hdfgroup.github.io/hdf5/develop/group___f_c_p_l.html#gad012d7f3c2f1e1999eb1770aae3a4963 +[u14]: https://hdfgroup.github.io/hdf5/develop/group___d_x_p_l.html#ga001a22b64f60b815abf5de8b4776f09e +[u15]: https://hdfgroup.github.io/hdf5/develop/group___d_x_p_l.html#gacb30d14d1791ec7ff9ee73aa148a51a3 +[u16]: https://hdfgroup.github.io/hdf5/develop/group___f_a_p_l.html#gacbe1724e7f70cd17ed687417a1d2a910 diff --git a/doxygen/aliases b/doxygen/aliases index da412d5c8f1..3c79b179df0 100644 --- a/doxygen/aliases +++ b/doxygen/aliases @@ -4,17 +4,16 @@ ALIASES += THG="The HDF Group" # Default URLs (Note that md files do not use any aliases) ################################################################################ # Default URL for HDF Group Files -ALIASES += HDFURL="docs.hdfgroup.org/hdf5" +ALIASES += HDFURL="support.hdfgroup.org" # URL for archived files -ALIASES += ARCURL="docs.hdfgroup.org/archive/support/HDF5/doc" -# URL for RFCs -ALIASES += RFCURL="docs.hdfgroup.org/hdf5/rfc" +ALIASES += ARCURL="\HDFURL/archive/support/HDF5/doc" # URL for documentation -ALIASES += DSPURL="portal.hdfgroup.org/display/HDF5" -ALIASES += DOCURL="portal.hdfgroup.org/documentation/hdf5-docs" +ALIASES += DOCURL="\HDFURL/releases/hdf5/documentation" # URL for downloads -ALIASES += DWNURL="portal.hdfgroup.org/downloads" -ALIASES += AEXURL="support.hdfgroup.org/ftp/HDF5/examples" +ALIASES += DWNURL="\HDFURL/releases/hdf5/downloads" +# URL for RFCs +ALIASES += RFCURL="\DOCURL/rfc" +ALIASES += AEXURL="\HDFURL/archive/support/ftp/HDF5/examples" # doxygen subdir (develop, v1_14) ALIASES += DOXURL="hdfgroup.github.io/hdf5/develop" #branch name (develop, hdf5_1_14) @@ -29,7 +28,7 @@ ALIASES += PLURL="github.com/HDFGroup/hdf5_plugins/blob/master" ALIASES += Bold{1}="\1" ALIASES += Emph{1}="\1" -ALIASES += Code{1}="\1" +ALIASES += TText{1}="\1" ################################################################################ # Return values @@ -249,7 +248,7 @@ ALIASES += es_id{1}="\param[in] \1 Event set identifier" # Others ################################################################################ -ALIASES += cpp_c_api_note="\attention \Bold{C++ Developers using HDF5 C-API functions beware:}\n Several functions in this C-API take function pointers or callbacks as arguments. Examples include H5Pset_elink_cb(), H5Pset_type_conv_cb(), H5Tconvert(), and H5Ewalk2(). Application code must ensure that those callback functions return normally such to allow the HDF5 to manage its resources and maintain a consistent state. For instance, those functions must not use the C \c setjmp / \c longjmp mechanism to leave those callback functions. Within the context of C++, any exceptions thrown within the callback function must be caught, such as with a \Code{catch(…)} statement. Any exception state can be placed within the provided user data function call arguments, and may be thrown again once the calling function has returned. Exceptions raised and not handled inside the callback are not supported as it might leave the HDF5 library in an inconsistent state. Similarly, using C++20 coroutines cannot be used as callbacks, since they do not support plain return statements. If a callback function yields execution to another C++20 coroutine calling HDF5 functions as well, this may lead to undefined behavior." +ALIASES += cpp_c_api_note="\attention \Bold{C++ Developers using HDF5 C-API functions beware:}\n Several functions in this C-API take function pointers or callbacks as arguments. Examples include H5Pset_elink_cb(), H5Pset_type_conv_cb(), H5Tconvert(), and H5Ewalk2(). Application code must ensure that those callback functions return normally such to allow the HDF5 to manage its resources and maintain a consistent state. For instance, those functions must not use the C \c setjmp / \c longjmp mechanism to leave those callback functions. Within the context of C++, any exceptions thrown within the callback function must be caught, such as with a \TText{catch(…)} statement. Any exception state can be placed within the provided user data function call arguments, and may be thrown again once the calling function has returned. Exceptions raised and not handled inside the callback are not supported as it might leave the HDF5 library in an inconsistent state. Similarly, using C++20 coroutines cannot be used as callbacks, since they do not support plain return statements. If a callback function yields execution to another C++20 coroutine calling HDF5 functions as well, this may lead to undefined behavior." ALIASES += par_compr_note="\attention If you are planning to use compression with parallel HDF5, ensure that calls to H5Dwrite() occur in collective mode. In other words, all MPI ranks (in the relevant communicator) call H5Dwrite() and pass a dataset transfer property list with the MPI-IO collective option property set to #H5FD_MPIO_COLLECTIVE_IO.\n Note that data transformations are currently \Bold{not} supported when writing to datasets in parallel and with compression enabled." ALIASES += sa_metadata_ops="\sa \li H5Pget_all_coll_metadata_ops() \li H5Pget_coll_metadata_write() \li H5Pset_all_coll_metadata_ops() \li H5Pset_coll_metadata_write() \li \ref maybe_metadata_reads" @@ -259,13 +258,13 @@ ALIASES += sa_metadata_ops="\sa \li H5Pget_all_coll_metadata_ops() \li H5Pget_co ALIASES += ref_cons_semantics="Enabling a Strict Consistency Semantics Model in Parallel HDF5" ALIASES += ref_file_image_ops="HDF5 File Image Operations" -ALIASES += ref_filter_pipe="Data Flow Pipeline for H5Dread()" +ALIASES += ref_filter_pipe="Data Flow Pipeline for H5Dread()" ALIASES += ref_group_impls="Group implementations in HDF5" ALIASES += ref_h5lib_relver="HDF5 Library Release Version Numbers" -ALIASES += ref_mdc_in_hdf5="Metadata Caching in HDF5" -ALIASES += ref_mdc_logging="Metadata Cache Logging" -ALIASES += ref_news_112="New Features in HDF5 Release 1.12" -ALIASES += ref_h5ocopy="Copying Committed Datatypes with H5Ocopy()" +ALIASES += ref_mdc_in_hdf5="Metadata Caching in HDF5" +ALIASES += ref_mdc_logging="Metadata Cache Logging" +ALIASES += ref_news_112="New Features in HDF5 Release 1.12" +ALIASES += ref_h5ocopy="Copying Committed Datatypes with H5Ocopy()" ALIASES += ref_sencode_fmt_change="RFC H5Sencode() / H5Sdecode() Format Change" ALIASES += ref_vlen_strings="\Emph{Creating variable-length string datatypes}" ALIASES += ref_vol_doc="VOL documentation" diff --git a/doxygen/dox/About.dox b/doxygen/dox/About.dox index d4a1db2bc62..73010b0c3de 100644 --- a/doxygen/dox/About.dox +++ b/doxygen/dox/About.dox @@ -33,8 +33,8 @@ Please refer to the \ref RMT for guidance on how to create a new reference manua \subsubsection new_example Adding and Referencing API Examples -For each HDF5 module, such as \Code{H5F}, there is an examples source file called -\Code{H5*_examples.c}. For example, the \Code{H5F} API examples are located in +For each HDF5 module, such as \TText{H5F}, there is an examples source file called +\TText{H5*_examples.c}. For example, the \TText{H5F} API examples are located in H5F_examples.c. Examples are code blocks marked as Doxygen snippets. @@ -83,7 +83,7 @@ as a general reference. All custom commands for this project are located in the aliases -file in the doxygen +file in the doxygen subdirectory of the main HDF5 repo. The custom commands are grouped in sections. Find a suitable section for your command or @@ -94,7 +94,7 @@ ask for help if unsure! For ease of reference, we define custom commands for each RFC in the RFCs section of the aliases -file. For example the custom command \Code{ref_rfc20141210} can be used to insert a +file. For example the custom command \TText{ref_rfc20141210} can be used to insert a reference to "RFC: Virtual Object Layer". In other words, the markup \verbatim \ref_rfc20141210 @@ -105,8 +105,8 @@ yields a clickable link: To add a new RFC, add a custom command for the RFC to the aliases -file. The naming convention for the custom command is \Code{ref_rfcYYYYMMDD}, -where \Code{YYYYMMDD} is the ID of the RFC. The URL is composed of the prefix +file. The naming convention for the custom command is \TText{ref_rfcYYYYMMDD}, +where \TText{YYYYMMDD} is the ID of the RFC. The URL is composed of the prefix \verbatim https://\RFCURL/ \endverbatim @@ -116,4 +116,4 @@ be https://\RFCURL/my_great_rfc_name.pdf \endverbatim -*/ \ No newline at end of file +*/ diff --git a/doxygen/dox/CollectiveCalls.dox b/doxygen/dox/CollectiveCalls.dox new file mode 100644 index 00000000000..9f26896262b --- /dev/null +++ b/doxygen/dox/CollectiveCalls.dox @@ -0,0 +1,1265 @@ +/** \page collective_calls Collective Calling Requirements in Parallel HDF5 Applications + * + * \section sec_collective_calls_intro Introduction + * This document addresses two topics of concern + in a parallel computing environment: + + + The term @ref options in the "Additional notes" + column indicates that the first item in the "Function" + column of the same row is a macro that is selectively mapped to one + of the two immediately-following functions. + For example, #H5Acreate is a macro that can be mapped to + either #H5Acreate1 or #H5Acreate2. + This mapping is configurable and is explained in + \ref api-compat-macros. + The macro structure was introduced at HDF5 Release 1.8.0. + * + * \section sec_collective_calls_func Always collective + * The following functions must always be called collectively. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ API + + Function + + All processes: +
+ same datatype & dataspace +
+ All processes: +
+ same access properties +
+ All processes: +
+ same creation properties +
+ Available in releases since + + Additional notes +
+ H5A + + #H5Acreate +
+ #H5Acreate1 +
+ #H5Acreate2 +
+ X + + X + + X + + 1.8.x + + @ref options +
+ The function #H5Acreate was renamed to + #H5Acreate1 at Release 1.8.0. +
+ + #H5Acreate_by_name + + X + + X + + X + + 1.8.x + +
+ + #H5Adelete + + + + + +
+ + #H5Adelete_by_idx + + + + + 1.8.x + +
+ + #H5Adelete_by_name + + + + + 1.8.x + +
+ + #H5Arename + + + + + 1.6.x + +
+ + #H5Arename_by_name + + + + + 1.8.x + +
+ + #H5Awrite + + + + + + Because raw data for an attribute is cached locally, + all processes must participate in order to guarantee that + future #H5Aread calls return correct results + on all processes. +
+
+ H5D + + #H5Dcreate +
+ #H5Dcreate1 +
+ #H5Dcreate2 +
+ X + + X + + X + + 1.8.x + + @ref options +
+ The function #H5Dcreate was renamed to + #H5Dcreate1 at Release 1.8.0. +
+ + #H5Dcreate_anon + + X + + X + + X + + 1.8.x + +
+ + #H5Dextend + + + + + + All processes must participate only if the number of chunks + in the dataset actually changes. +
+ All processes must use the same dataspace dimensions. +
+ + #H5Dset_extent + + + + + 1.6.x + + All processes must participate only if the number of chunks + in the dataset actually changes. +
+ All processes must use the same dataspace dimensions. +
+
+ H5F + + #H5Fclose + + + + + + All processes must participate only if this is the + last reference to the file identifier. +
+ + #H5Fcreate + + + X + + X + + +
+ + #H5Fflush + + + + + +
+ + #H5Fmount + + + + + +
+ + #H5Fopen + + + X + + + +
+ + #H5Freopen + + + + + +
+ + #H5Funmount + + + + + +
+
+ H5G + + #H5Gcreate +
+ #H5Gcreate1 +
+ #H5Gcreate2 +
+ + X + + X + + 1.8.x + + @ref options +
+ The function #H5Gcreate was renamed to + #H5Gcreate1 at Release 1.8.0. +
+ + #H5Gcreate_anon + + + X + + X + + 1.8.x + +
+ + #H5Glink + + + + + +
+ + #H5Glink2 + + + + + 1.6.x + +
+ + #H5Gmove + + + + + +
+ + #H5Gmove2 + + + + + 1.6.x + +
+ + #H5Gset_comment + + + + + +
+ + #H5Gunlink + + + + + +
+
+ H5I + + #H5Idec_ref + + + + + 1.6.x + + This function may be called independently if the object identifier + does not refer to an object that was collectively opened. +
+ + #H5Iinc_ref + + + + + 1.6.x + + This function may be called independently if the object identifier + does not refer to an object that was collectively opened. +
+
+ H5L + + #H5Lcopy + + + + + 1.8.x + +
+ + #H5Lcreate_external + + + + X + + 1.8.x + +
+ + #H5Lcreate_hard + + + + X + + 1.8.x + +
+ + #H5Lcreate_soft + + + + X + + 1.8.x + +
+ + #H5Lcreate_ud + + + + X + + 1.8.x + +
+ + #H5Ldelete + + + + + 1.8.x + +
+ + #H5Ldelete_by_idx + + + + + 1.8.x + +
+ + #H5Lmove + + + + + 1.8.x + +
+
+ H5O + + #H5Ocopy + + + + + 1.8.x + +
+ + #H5Odecr_refcount + + + + + 1.8.x + +
+ + #H5Oincr_refcount + + + + + 1.8.x + +
+ + #H5Olink + + + + + 1.8.x + +
+ + #H5Oset_comment + + + + + 1.8.x + +
+ + #H5Oset_comment_by_name + + + + + 1.8.x + +
+
+ H5R + + #H5Rcreate + + + + + +
+
+ H5T + + #H5Tcommit +
+ #H5Tcommit1 +
+ #H5Tcommit2 +
+ + X + + X + + 1.8.x + + @ref options +
+ The function #H5Tcommit was renamed to + #H5Tcommit1 at Release 1.8.0. +
+ + #H5Tcommit_anon + + + X + + X + + 1.8.x + +
+ * + * \section sec_collective_calls_nomod Collective, unless target object will not be modified + * The following functions must normally be called collectively. + * If, however, the target object will not be modified, + * they may be called independently. + * + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ API + + Function + + All processes: +
+ same datatype & dataspace +
+ All processes: +
+ same access properties +
+ All processes: +
+ same creation properties +
+ Available in releases since + + Additional notes +
+ H5A + + #H5Aclose + + + + + + All processes must participate only if + all file identifiers for a file have been closed and + this is the last outstanding object identifier. +
+ + #H5Aopen + + + X + + + 1.8.x + +
+ + #H5Aopen_by_idx + + + X + + + 1.8.x + +
+ + #H5Aopen_by_name + + + X + + + 1.8.x + +
+ + #H5Aopen_idx + + + X + + + +
+ + #H5Aopen_name + + + X + + + +
+
+ H5D + + #H5Dclose + + + + + + All processes must participate only if + all file identifiers for a file have been closed and + this is the last outstanding object identifier. +
+ + #H5Dopen +
+ #H5Dopen1 +
+ #H5Dopen2 +
+ + X + + + 1.8.x + + @ref options +
+ The function #H5Dopen was renamed to + #H5Dopen1 at Release 1.8.0. +
+
+ H5G + + #H5Gclose + + + + + + All processes must participate only if + all file identifiers for a file have been closed and + this is the last outstanding object identifier. +
+ + #H5Gopen +
+ #H5Gopen1 +
+ #H5Gopen2 +
+ + X + + + 1.8.x + + @ref options +
+ The function #H5Gopen was renamed to + #H5Gopen1 at Release 1.8.0. +
+
+ H5I + + #H5Iget_file_id + + + + + 1.8.x + +
+
+ H5O + + #H5Oclose + + + + + 1.8.x + + All processes must participate only if + all file identifiers for a file have been closed and + this is the last outstanding object identifier. +
+ + #H5Oopen + + + X + + + 1.8.x + +
+ + #H5Oopen_by_addr + + + X + + + 1.8.x + +
+ + #H5Oopen_by_idx + + + X + + + 1.8.x + +
+
+ H5R + + #H5Rdereference + + + + + +
+
+ H5T + + #H5Tclose + + + + + + All processes must participate only if + the datatype is for a committed datatype, + all the file identifiers for the file have been closed, and + this is the last outstanding object identifier. +
+ + #H5Topen +
+ #H5Topen1 +
+ #H5Topen2 +
+ + X + + + 1.8.x + + @ref options +
+ The function #H5Topen was renamed to + #H5Topen1 at Release 1.8.0. +
+ * + * \section sec_collective_calls_props Properties + * The following properties must be set to the same values + * for an object or link in all cases where the object or link is accessed + * in a parallel program. + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function + + Available in releases since +
+ Dataset creation properties +
+ #H5Pmodify_filter + + 1.8.x +
+ #H5Premove_filter + + 1.8.x +
+ #H5Pset_alloc_time + +
+ #H5Pset_chunk + +
+ #H5Pset_external + +
+ #H5Pset_fill_time + +
+ #H5Pset_fill_value + +
+ #H5Pset_filter + +
+ #H5Pset_fletcher32 + + 1.8.x +
+ #H5Pset_layout + +
+ #H5Pset_nbit + + 1.8.x +
+ #H5Pset_shuffle + +
+ #H5Pset_szip + +
+
+ Dataset transfer properties + +
+ #H5Pset_btree_ratios + +
+ #H5Pset_buffer + +
+ #H5Pset_dxpl_mpio + +
+ #H5Pset_preserve + +
+
+ File access properties + +
+ #H5Pset_alignment + +
+ #H5Pset_cache + +
+ #H5Pset_fapl_mpio + +
+ #H5Pset_fclose_degree + +
+ #H5Pset_gc_references + +
+ #H5Fset_latest_format + + 1.8.x +
+ #H5Pset_libver_bounds + + 1.8.x +
+ #H5Pset_mdc_config + +
+ #H5Pset_meta_block_size + +
+ #H5Pset_small_data_block_size + +
+ #H5Pset_sieve_buf_size + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Function + + Available in releases since +
+ File creation properties + +
+ #H5Pset_istore_k + +
+ #H5Pset_shared_mesg_index + + 1.8.x +
+ #H5Pset_shared_mesg_nindexes + + 1.8.x +
+ #H5Pset_shared_mesg_phase_change + + 1.8.x +
+ #H5Pset_sizes + +
+ #H5Pset_sym_k + +
+ #H5Pset_userblock + +
+
+ Group creation properties + +
+ #H5Pset_est_link_info + + 1.8.x +
+ #H5Pset_link_creation_order + + 1.8.x +
+ #H5Pset_link_phase_change + + 1.8.x +
+ #H5Pset_local_heap_size_hint + + 1.8.x +
+
+ Link creation properties + +
+ #H5Pset_char_encoding + + 1.8.x +
+ #H5Pset_create_intermediate_group + + 1.8.x +
+
+ Object creation properties + +
+ #H5Pset_attr_phase_change + + 1.8.x +
+ #H5Pset_attr_creation_order + + 1.8.x +
+ #H5Pset_obj_track_times + + 1.8.x +
+
+ Object copy properties + +
+ #H5Pset_copy_object + + 1.8.x +
+
+ + */ diff --git a/doxygen/dox/DDLBNF110.dox b/doxygen/dox/DDLBNF110.dox index 6d6b67ef7fd..b392526417a 100644 --- a/doxygen/dox/DDLBNF110.dox +++ b/doxygen/dox/DDLBNF110.dox @@ -1,7 +1,5 @@ /** \page DDLBNF110 DDL in BNF through HDF5 1.10 -\todo Revise this & break it up! - \section intro110 Introduction This document contains the data description language (DDL) for an HDF5 file. The diff --git a/doxygen/dox/DDLBNF112.dox b/doxygen/dox/DDLBNF112.dox index cfe34c321f9..c6463c23d5c 100644 --- a/doxygen/dox/DDLBNF112.dox +++ b/doxygen/dox/DDLBNF112.dox @@ -1,7 +1,5 @@ /** \page DDLBNF112 DDL in BNF for HDF5 1.12 through HDF5 1.14.3 -\todo Revise this & break it up! - \section intro112 Introduction This document contains the data description language (DDL) for an HDF5 file. The diff --git a/doxygen/dox/DDLBNF114.dox b/doxygen/dox/DDLBNF114.dox index 61e9157e560..baa7a57fea6 100644 --- a/doxygen/dox/DDLBNF114.dox +++ b/doxygen/dox/DDLBNF114.dox @@ -1,7 +1,5 @@ /** \page DDLBNF114 DDL in BNF for HDF5 1.14.4 and above -\todo Revise this & break it up! - \section intro114 Introduction This document contains the data description language (DDL) for an HDF5 file. The diff --git a/doxygen/dox/ExamplesAPI.dox b/doxygen/dox/ExamplesAPI.dox index c48b00e6dbb..dbd24f4d888 100644 --- a/doxygen/dox/ExamplesAPI.dox +++ b/doxygen/dox/ExamplesAPI.dox @@ -30,7 +30,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_alloc.h5 @@ -43,7 +43,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_checksum.h5 @@ -56,7 +56,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_chunk.h5 @@ -69,7 +69,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_compact.h5 @@ -82,7 +82,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_extern.h5 @@ -95,7 +95,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_fillval.h5 @@ -108,7 +108,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_gzip.h5 @@ -121,7 +121,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_hyper.h5 @@ -134,7 +134,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_nbit.h5 @@ -147,7 +147,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_rdwrc.h5 @@ -160,7 +160,7 @@ Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_shuffle.h5 @@ -173,7 +173,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_sofloat.h5 @@ -186,7 +186,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_soint.h5 @@ -199,7 +199,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_szip.h5 @@ -212,7 +212,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_transform.h5 @@ -225,7 +225,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_unlimadd.h5 @@ -238,7 +238,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_unlimgzip.h5 @@ -251,7 +251,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_d_unlimmod.h5 @@ -275,7 +275,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_g_compact.h5 @@ -289,7 +289,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_g_corder.h5 @@ -302,7 +302,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_g_create.h5 @@ -315,7 +315,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_g_intermediate.h5 @@ -328,7 +328,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_g_iterate.h5 @@ -341,7 +341,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_g_phase.h5 @@ -366,7 +366,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_g_visit.h5 @@ -388,9 +388,9 @@ FORTRAN Read / Write Array (Attribute) C -FORTRAN +FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_arrayatt.h5 @@ -401,9 +401,9 @@ FORTRAN Read / Write Array (Dataset) C -FORTRAN +FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_array.h5 @@ -414,9 +414,9 @@ FORTRAN Read / Write Bitfield (Attribute) C -FORTRAN +FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_bitatt.h5 @@ -427,9 +427,9 @@ FORTRAN Read / Write Bitfield (Dataset) C -FORTRAN +FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_bit.h5 @@ -440,9 +440,9 @@ FORTRAN Read / Write Compound (Attribute) C -FORTRAN +FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_cmpdatt.h5 @@ -453,9 +453,9 @@ FORTRAN Read / Write Compound (Dataset) C -FORTRAN +FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_cmpd.h5 @@ -468,7 +468,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_commit.h5 @@ -533,7 +533,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_floatatt.h5 @@ -546,7 +546,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_float.h5 @@ -559,7 +559,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_intatt.h5 @@ -572,7 +572,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_int.h5 @@ -585,7 +585,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_objrefatt.h5 @@ -598,7 +598,7 @@ FORTRAN C FORTRAN Java - JavaObj + JavaObj MATLAB PyHigh PyLow h5ex_t_objref.h5 @@ -611,7 +611,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_opaqueatt.h5 @@ -624,7 +624,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_opaque.h5 @@ -637,7 +637,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_regrefatt.h5 @@ -650,7 +650,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_regref.h5 @@ -661,9 +661,9 @@ FORTRAN Read / Write String (Attribute) C -FORTRAN +FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_stringatt.h5 @@ -676,7 +676,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_string.h5 @@ -709,8 +709,7 @@ FORTRAN Read / Write Variable Length String (Attribute) C -FORTRAN - Java JavaObj MATLAB PyHigh PyLow + FORTRAN Java JavaObj MATLAB PyHigh PyLow h5ex_t_vlstringatt.h5 h5ex_t_vlstringatt.tst @@ -722,7 +721,7 @@ FORTRAN C FORTRAN Java -JavaObj +JavaObj MATLAB PyHigh PyLow h5ex_t_vlstring.h5 @@ -843,7 +842,7 @@ FORTRAN Create/Read/Write an Attribute Java -JavaObj +JavaObj HDF5AttributeCreate.txt @@ -851,7 +850,7 @@ FORTRAN Create Datasets Java -JavaObj +JavaObj HDF5DatasetCreate.txt @@ -859,7 +858,7 @@ FORTRAN Read/Write Datasets Java -JavaObj +JavaObj HDF5DatasetRead.txt @@ -867,7 +866,7 @@ FORTRAN Create an Empty File Java -JavaObj +JavaObj HDF5FileCreate.txt @@ -883,9 +882,9 @@ FORTRAN Create Groups Java -JavaObj +JavaObj -HDF5GroupCreate.txt +HDF5GroupCreate.txt Select a Subset of a Dataset @@ -899,9 +898,9 @@ FORTRAN Create Two Datasets Within Groups Java -JavaObj +JavaObj -HDF5GroupDatasetCreate.txt +HDF5GroupDatasetCreate.txt @@ -918,7 +917,7 @@ FORTRAN Creating and Accessing a File C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5 @@ -928,7 +927,7 @@ FORTRAN Creating and Accessing a Dataset C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5 @@ -938,7 +937,7 @@ FORTRAN Writing and Reading Contiguous Hyperslabs C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5 @@ -948,7 +947,7 @@ FORTRAN Writing and Reading Regularly Spaced Data Hyperslabs C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5 @@ -958,7 +957,7 @@ FORTRAN Writing and Reading Pattern Hyperslabs C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5 @@ -968,7 +967,7 @@ FORTRAN Writing and Reading Chunk Hyperslabs C -FORTRAN +FORTRAN MATLAB PyHigh PyLow ph5_.h5 @@ -978,7 +977,8 @@ FORTRAN Using the Subfiling VFD to Write a File Striped Across Multiple Subfiles C - FORTRAN MATLAB PyHigh PyLow +FORTRAN + MATLAB PyHigh PyLow ph5_.h5 ph5_.tst @@ -996,7 +996,8 @@ FORTRAN Collectively Write Datasets with Filters and Not All Ranks have Data C - FORTRAN MATLAB PyHigh PyLow +FORTRAN + MATLAB PyHigh PyLow ph5_.h5 ph5_.tst diff --git a/doxygen/dox/GettingStarted.dox b/doxygen/dox/GettingStarted.dox index aa81ca28744..274598c9537 100644 --- a/doxygen/dox/GettingStarted.dox +++ b/doxygen/dox/GettingStarted.dox @@ -38,7 +38,7 @@ Step by step instructions for learning HDF5 that include programming examples \subsection subsec_learn_tutor The HDF Group Tutorials and Examples These tutorials and examples are available for learning about the HDF5 High Level APIs, tools, -Parallel HDF5, and the HDF5-1.10 VDS and SWMR new features: +Parallel HDF5, and the VDS and SWMR features: - @@ -91,7 +91,7 @@ These examples (C, C++, Fortran, Java, Python) are provided in the HDF5 source c - @@ -107,7 +107,7 @@ These examples (C, C++, Fortran, Java, Python) are provided in the HDF5 source c - @@ -131,7 +131,7 @@ These examples (C, C++, Fortran, Java, Python) are provided in the HDF5 source c - diff --git a/doxygen/dox/LearnBasics3.dox b/doxygen/dox/LearnBasics3.dox index 3e9dd8ea090..13cb4f43abd 100644 --- a/doxygen/dox/LearnBasics3.dox +++ b/doxygen/dox/LearnBasics3.dox @@ -181,8 +181,8 @@ created the dataset layout cannot be changed. The h5repack utility can be used t to a new with a new layout. \section secLBDsetLayoutSource Sources of Information -Chunking in HDF5 -(See the documentation on Advanced Topics in HDF5) +Chunking in HDF5 +(See the documentation on Advanced Topics in HDF5) \see \ref sec_plist in the HDF5 \ref UG.
@@ -201,7 +201,7 @@ certain initial dimensions, then to later increase the size of any of the initia HDF5 requires you to use chunking to define extendible datasets. This makes it possible to extend datasets efficiently without having to excessively reorganize storage. (To use chunking efficiently, -be sure to see the advanced topic, Chunking in HDF5.) +be sure to see the advanced topic, Chunking in HDF5.) The following operations are required in order to extend a dataset: \li Declare the dataspace of the dataset to have unlimited dimensions for all dimensions that might eventually be extended. @@ -243,7 +243,7 @@ Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics \section secLBComDsetCreate Creating a Compressed Dataset HDF5 requires you to use chunking to create a compressed dataset. (To use chunking efficiently, -be sure to see the advanced topic, Chunking in HDF5.) +be sure to see the advanced topic, Chunking in HDF5.) The following operations are required in order to create a compressed dataset: \li Create a dataset creation property list. @@ -251,7 +251,7 @@ The following operations are required in order to create a compressed dataset: \li Create the dataset. \li Close the dataset creation property list and dataset. -For more information on compression, see the FAQ question on Using Compression in HDF5. +For more information on compression, see the FAQ question on Using Compression in HDF5. \section secLBComDsetProg Programming Example @@ -720,7 +720,7 @@ Previous Chapter \ref LBQuiz - Next Chapter \ref LBCompiling Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics -/** @page LBCompiling Compiling HDF5 Applications +@page LBCompiling Compiling HDF5 Applications Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics
@@ -968,16 +968,15 @@ or on WINDOWS you may need to add the path to the bin folder to PATH. \section secLBCompilingCMake Compiling an Application with CMake \subsection subsecLBCompilingCMakeScripts CMake Scripts for Building Applications -Simple scripts are provided for building applications with different languages and options. -See CMake Scripts for Building Applications. +See Using CMake to Build Applications to build applications with different languages and options. For a more complete script (and to help resolve issues) see the script provided with the HDF5 Examples project. \subsection subsecLBCompilingCMakeExamples HDF5 Examples The installed HDF5 can be verified by compiling the HDF5 Examples project, included with the CMake built HDF5 binaries -in the share folder or you can go to the HDF5 Examples github repository. +in the share folder or you can go to the HDF5 Examples in the HDF5 github repository. -Go into the share directory and follow the instructions in USING_CMake_examples.txt to build the examples. +Go into the share directory and follow the instructions in Using CMake to Build Examples to build the examples. In general, users must first set the HDF5_ROOT environment variable to the installed location of the CMake configuration files for HDF5. For example, on Windows the following path might be set: @@ -1031,15 +1030,8 @@ For example, on Unix the log files will be in: There are log files for the configure, test, and build.
-Previous Chapter \ref LBQuizAnswers - Next Chapter \ref LBTraining +Previous Chapter \ref LBQuizAnswers Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics */ - -/ref LBTraining - -
-Navigate back: \ref index "Main" / \ref GettingStarted / \ref LearnBasics - -*/ diff --git a/doxygen/dox/LearnHDFView.dox b/doxygen/dox/LearnHDFView.dox index 2f0a0782e60..2a1ed610ef2 100644 --- a/doxygen/dox/LearnHDFView.dox +++ b/doxygen/dox/LearnHDFView.dox @@ -7,7 +7,7 @@ This tutorial enables you to get a feel for HDF5 by using the HDFView browser. I any programming experience. \section sec_learn_hv_install HDFView Installation -\li Download and install HDFView. It can be downloaded from the Download HDFView page. +\li Download and install HDFView. It can be downloaded from the Download HDFView page. \li Obtain the storm1.txt text file, used in the tutorial. \section sec_learn_hv_begin Begin Tutorial @@ -246,7 +246,7 @@ in the file). Please note that the chunk sizes used in this topic are for demonstration purposes only. For information on chunking and specifying an appropriate chunk size, see the -Chunking in HDF5 documentation. +Chunking in HDF5 documentation. Also see the HDF5 Tutorial topic on \ref secLBComDsetCreate.
@@ -68,7 +68,7 @@ A brief introduction to Parallel HDF5. If you are new to HDF5 please see the @re
-HDF5-1.10 New Features +New Features since HDF5-1.10 \li \ref VDS diff --git a/doxygen/dox/H5AC_cache_config_t.dox b/doxygen/dox/H5AC_cache_config_t.dox index 3faecd5d185..40d83301b2b 100644 --- a/doxygen/dox/H5AC_cache_config_t.dox +++ b/doxygen/dox/H5AC_cache_config_t.dox @@ -24,7 +24,7 @@ * Boolean field indicating whether the trace_file_name * field should be used to open a trace file for the cache. * - * \Emph{*** DEPRECATED ***} Use \Code{H5Fstart/stop} logging functions instead + * \Emph{*** DEPRECATED ***} Use \TText{H5Fstart/stop} logging functions instead * * The trace file is a debugging feature that allow the capture of * top level metadata cache requests for purposes of debugging and/or @@ -42,7 +42,7 @@ * Boolean field indicating whether the current trace * file (if any) should be closed. * - * \Emph{*** DEPRECATED ***} Use \Code{H5Fstart/stop} logging functions instead + * \Emph{*** DEPRECATED ***} Use \TText{H5Fstart/stop} logging functions instead * * See the above comments on the open_trace_file field. This field * should be set to \c FALSE unless there is an open trace file on the @@ -54,7 +54,7 @@ * Full path of the trace file to be opened if the * open_trace_file field is \c TRUE. * - * \Emph{*** DEPRECATED ***} Use \Code{H5Fstart/stop} logging functions instead + * \Emph{*** DEPRECATED ***} Use \TText{H5Fstart/stop} logging functions instead * * In the parallel case, an ascii representation of the mpi rank of * the process will be appended to the file name to yield a unique @@ -78,7 +78,7 @@ * soon as possible and monitor cache size. * * At present, evictions can only be disabled if automatic - * cache resizing is also disabled (that is, \Code{(incr_mode == + * cache resizing is also disabled (that is, \TText{(incr_mode == * H5C_incr__off ) && ( decr_mode == H5C_decr__off )}). There * is no logical reason why this should be so, but it simplifies * implementation and testing, and I can't think of any reason @@ -95,7 +95,7 @@ * \par initial_size * If enabled, this field contain the size the cache is * to be set to upon receipt of this structure. Needless to say, - * initial_size must lie in the closed interval \Code{[min_size, max_size]}. + * initial_size must lie in the closed interval \TText{[min_size, max_size]}. * * \par min_clean_fraction * \c double in the range 0 to 1 indicating the fraction @@ -105,13 +105,13 @@ * \par max_size * Maximum size to which the cache can be adjusted. The * supplied value must fall in the closed interval - * \Code{[MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]}. Also, \c max_size must + * \TText{[MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]}. Also, \c max_size must * be greater than or equal to \c min_size. * * \par min_size * Minimum size to which the cache can be adjusted. The * supplied value must fall in the closed interval - * \Code{[H5C__MIN_MAX_CACHE_SIZE, H5C__MAX_MAX_CACHE_SIZE]}. Also, \c min_size + * \TText{[H5C__MIN_MAX_CACHE_SIZE, H5C__MAX_MAX_CACHE_SIZE]}. Also, \c min_size * must be less than or equal to \c max_size. * * \par epoch_length @@ -122,7 +122,7 @@ * * At the end of an epoch, we discard prior hit rate data and start * collecting afresh. The epoch_length must lie in the closed - * interval \Code{[H5C__MIN_AR_EPOCH_LENGTH, H5C__MAX_AR_EPOCH_LENGTH]}. + * interval \TText{[H5C__MIN_AR_EPOCH_LENGTH, H5C__MAX_AR_EPOCH_LENGTH]}. * \endparblock * * @@ -201,8 +201,8 @@ * \li \c H5C_flash_incr__add_space: Let \c x be either the size of a newly * newly inserted entry, or the number of bytes by which the * size of an existing entry has been increased.\n - * If \Code{x > flash_threshold * current max cache size}, - * increase the current maximum cache size by \Code{x * flash_multiple} + * If \TText{x > flash_threshold * current max cache size}, + * increase the current maximum cache size by \TText{x * flash_multiple} * less any free space in the cache, and star a new epoch. For * now at least, pay no attention to the maximum increment. * @@ -213,7 +213,7 @@ * With a little thought, it should be obvious that the above flash * cache size increase algorithm is not sufficient for all circumstances * -- for example, suppose the user round robins through - * \Code{(1/flash_threshold) +1} groups, adding one data set to each on each + * \TText{(1/flash_threshold) +1} groups, adding one data set to each on each * pass. Then all will increase in size at about the same time, requiring * the max cache size to at least double to maintain acceptable * performance, however the above flash increment algorithm will not be @@ -319,7 +319,7 @@ * This field contains the number of epochs an entry must remain * unaccessed before it is evicted in an attempt to reduce the * cache size. If applicable, this field must lie in the range - * \Code{[1, H5C__MAX_EPOCH_MARKERS]}. + * \TText{[1, H5C__MAX_EPOCH_MARKERS]}. * \endparblock * * \par apply_empty_reserve @@ -412,4 +412,4 @@ * received from process zero.\n * To avoid possible messages from the past/future, all caches must * wait until all caches are done before leaving the sync point. - */ \ No newline at end of file + */ diff --git a/doxygen/dox/IntroHDF5.dox b/doxygen/dox/IntroHDF5.dox index 9ef55d3a573..acb497120da 100644 --- a/doxygen/dox/IntroHDF5.dox +++ b/doxygen/dox/IntroHDF5.dox @@ -262,7 +262,7 @@ FORTRAN routines are similar; they begin with “h5*” and end with “_f”.
  • Java routines are similar; the routine names begin with “H5*” and are prefixed with “H5.” as the class. Constants are in the HDF5Constants class and are prefixed with "HDF5Constants.". The function arguments -are usually similar, @see @ref HDF5LIB +are usually similar, see @ref HDF5LIB
  • For example: @@ -616,8 +616,7 @@ on the HDF-EOS Tools and Information Center pag \section secHDF5Examples Examples \li \ref LBExamples \li \ref ExAPI -\li Examples in the Source Code -\li Other Examples +\li Examples in the Source Code \section secHDF5ExamplesCompile How To Compile For information on compiling in C, C++ and Fortran, see: \ref LBCompiling diff --git a/doxygen/dox/IntroParHDF5.dox b/doxygen/dox/IntroParHDF5.dox index b8785d43c9d..58a6e7958b0 100644 --- a/doxygen/dox/IntroParHDF5.dox +++ b/doxygen/dox/IntroParHDF5.dox @@ -96,6 +96,8 @@ Once a file is opened by the processes of a communicator: \li Multiple processes write to the same dataset. \li Each process writes to an individual dataset. +@see \ref collective_calls + Please refer to the Supported Configuration Features Summary in the release notes for the current release of HDF5 for an up-to-date list of the platforms that we support Parallel HDF5 on. diff --git a/doxygen/dox/LearnBasics.dox b/doxygen/dox/LearnBasics.dox index ed83b367b6b..4db515c1a57 100644 --- a/doxygen/dox/LearnBasics.dox +++ b/doxygen/dox/LearnBasics.dox @@ -59,7 +59,7 @@ These examples (C, C++, Fortran, Java, Python) are provided in the HDF5 source c
    Create a file C Fortran C++ Java Python +C Fortran C++ Java Python
    Create a group C Fortran C++ Java Python +C Fortran C++ Java Python
    Create datasets in a group C Fortran C++ Java Python +C Fortran C++ Java Python
    Create a chunked and compressed dataset C Fortran C++ Java Python +C Fortran C++ Java Python
    Many VOL connectors are listed on The HDF Group's VOL plugin registration page, located at: -Registered VOL Connectors. +Registered VOL Connectors. Not all of these VOL connectors are supported by The HDF Group and the level of completeness varies, but the connectors found there can serve as examples of working implementations @@ -195,7 +195,7 @@ contact help@hdfgroup.org for help with this. We name you've chosen will appear on the registered VOL connectors page. As noted above, registered VOL connectors will be listed at: -Registered VOL Connectors +Registered VOL Connectors A new \b conn_version field has been added to the class struct for 1.13. This field is currently not used by the library so its use is determined by the connector author. Best practices for this field will be determined diff --git a/doxygen/dox/ViewTools.dox b/doxygen/dox/ViewTools.dox index 9ae42fe9f40..43686751bfb 100644 --- a/doxygen/dox/ViewTools.dox +++ b/doxygen/dox/ViewTools.dox @@ -986,7 +986,7 @@ See this section for more info \subsubsection subsubsecViewToolsViewDtypes_newref New References References were reworked in HDF5 1.12.0. The new reference datatype is #H5T_STD_REF. The old reference datatypes are deprecated. -@see sec_reference. +see @ref sec_reference. \subsubsection subsubsecViewToolsViewDtypes_objref Object Reference An Object Reference is a reference to an entire object (attribute, dataset, group, or named datatype). diff --git a/doxygen/dox/api-compat-macros.dox b/doxygen/dox/api-compat-macros.dox index 4a1578d7748..a899ef1165d 100644 --- a/doxygen/dox/api-compat-macros.dox +++ b/doxygen/dox/api-compat-macros.dox @@ -52,36 +52,36 @@ functions were retained and renamed to have an earlier number (for, e.g., '1') at the end of the original function name. - For example, consider the function \Code{H5Lvisit} in HDF5 release 1.10 + For example, consider the function \TText{H5Lvisit} in HDF5 release 1.10 as compared with 1.12:
    Original function name and signature in 1.10.0 - \Code{herr_t H5Lvisit(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate_t op, void *op_data)} + \TText{herr_t H5Lvisit(hid_t grp_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate_t op, void *op_data)}
    Updated function and signature, introduced in release 1.12.0 - \Code{herr_t H5Lvisit2(hid_t group_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate2_t op, void *op_data)} + \TText{herr_t H5Lvisit2(hid_t group_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate2_t op, void *op_data)}
    Original function and signature, renamed in release 1.12.0 - \Code{herr_t H5Lvisit1(hid_t group_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate1_t op, void *op_data)} + \TText{herr_t H5Lvisit1(hid_t group_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterate1_t op, void *op_data)}
    API compatibility macro, introduced in release 1.12.0 - \Code{H5Lvisit} -

    The macro, \Code{H5Lvisit}, will be mapped to either \Code{H5Lvisit1} or - \Code{H5Lvisit2}. The mapping is determined by a combination of the + \TText{H5Lvisit} +

    The macro, \TText{H5Lvisit}, will be mapped to either \TText{H5Lvisit1} or + \TText{H5Lvisit2}. The mapping is determined by a combination of the configuration options use to build the HDF5 library and compile-time options used to build the application. The calling parameters used with the - \Code{H5Lvisit} compatibility macro should match the number and type of the - function the macros will be mapped to (\Code{H5Lvisit1} or \Code{H5Lvisit2}). + \TText{H5Lvisit} compatibility macro should match the number and type of the + function the macros will be mapped to (\TText{H5Lvisit1} or \TText{H5Lvisit2}).

    The function names ending in '1' or '2' are referred to as \Emph{versioned names}, and the corresponding functions are referred to as \Emph{versioned functions}. @@ -135,7 +135,7 @@

    \subsection lib-options Library Mapping Options - When the HDF5 library is built, \Code{configure} flags can be used to control the API + When the HDF5 library is built, \TText{configure} flags can be used to control the API compatibility macro mapping behavior exhibited by the library. This behavior can be overridden by application and function mappings. One configure flag excludes deprecated functions from the HDF5 library, making them unavailable to applications linked with the @@ -144,85 +144,85 @@
    Table 1: Library Mapping Options - - - + + + - - + + - - + + - - + + - - + + - - + +
    \Code{configure} flagMacros map to release
    (versioned function; \Code{H5Lvisit} shown)
    Deprecated functions available?
    (\Code{H5Lvisit1})
    \TText{configure} flagMacros map to release
    (versioned function; \TText{H5Lvisit} shown)
    Deprecated functions available?
    (\TText{H5Lvisit1})
    \Code{--with-default-api-version=v112}
    (the default in 1.12)
    1.12.x (\Code{H5Lvisit2})\TText{--with-default-api-version=v112}
    (the default in 1.12)
    1.12.x (\TText{H5Lvisit2}) yes
    \Code{--with-default-api-version=v110}1.10.x (\Code{H5Lvisit1})\TText{--with-default-api-version=v110}1.10.x (\TText{H5Lvisit1}) yes
    \Code{--with-default-api-version=v18}1.8.x (\Code{H5Lvisit1})\TText{--with-default-api-version=v18}1.8.x (\TText{H5Lvisit1}) yes
    \Code{--with-default-api-version=v16}1.6.x (\Code{H5Lvisit1})\TText{--with-default-api-version=v16}1.6.x (\TText{H5Lvisit1}) yes
    \Code{--disable-deprecated-symbols}1.12.x (\Code{H5Lvisit2})\TText{--disable-deprecated-symbols}1.12.x (\TText{H5Lvisit2}) no
    - Refer to the file \Code{libhdf5.settings} in the directory where the HDF5 library is - installed to determine the \Code{configure} flags used to build the library. In particular, + Refer to the file \TText{libhdf5.settings} in the directory where the HDF5 library is + installed to determine the \TText{configure} flags used to build the library. In particular, look for the two lines shown here under \Emph{Features}: - \Code{Default API mapping: v112} + \TText{Default API mapping: v112} - \Code{With deprecated public symbols: yes} + \TText{With deprecated public symbols: yes} \subsection app-options Application Mapping Options When an application using HDF5 APIs is built and linked with the HDF5 library, - compile-time options to \Code{h5cc} can be used to control the API compatibility + compile-time options to \TText{h5cc} can be used to control the API compatibility macro mapping behavior exhibited by the application. The application mapping overrides the behavior specified by the library mapping, and can be overridden on a function-by-function basis by the function mappings. - If the HDF5 library was configured with the \Code{--disable-deprecated-symbols} flag, then + If the HDF5 library was configured with the \TText{--disable-deprecated-symbols} flag, then the deprecated functions will not be available, regardless of the application mapping options.
    Table 2: Application Mapping Options - - - + + + - - + + - - + + - - + + - - + + - - + +
    \Code{h5cc} optionMacros map to release
    (versioned function; \Code{H5Lvisit} shown)
    Deprecated functions available?
    (\Code{H5Lvisit1})
    \TText{h5cc} optionMacros map to release
    (versioned function; \TText{H5Lvisit} shown)
    Deprecated functions available?
    (\TText{H5Lvisit1})
    \Code{-DH5_USE_112_API}
    \Emph{(Default behavior if no option specified.)}
    1.12.x (\Code{HLvisit2})\TText{-DH5_USE_112_API}
    \Emph{(Default behavior if no option specified.)}
    1.12.x (\TText{HLvisit2}) yes*
    \Emph{*if available in library}
    \Code{-DH5_USE_110_API}1.10.x (\Code{HLvisit1})\TText{-DH5_USE_110_API}1.10.x (\TText{HLvisit1}) yes*
    \Emph{*if available in library}
    \Code{-DH5_USE_18_API}1.8.x (\Code{H5Lvisit1})\TText{-DH5_USE_18_API}1.8.x (\TText{H5Lvisit1}) yes*
    \Emph{*if available in library}
    \Code{-DH5_USE_16_API}1.6.x (\Code{H5Lvisit1})\TText{-DH5_USE_16_API}1.6.x (\TText{H5Lvisit1}) yes*
    \Emph{*if available in library}
    \Code{-DH5_NO_DEPRECATED_SYMBOLS}1.10.x (\Code{H5Lvisit1})\TText{-DH5_NO_DEPRECATED_SYMBOLS}1.10.x (\TText{H5Lvisit1}) no
    @@ -234,15 +234,15 @@ underlying functions on a function-by-function basis. The function mappings override the library and application mappings discussed earlier. - If the HDF5 library was configured with the \Code{--disable-deprecated-symbols} - flag, or \Code{-DH5_NO_DEPRECATED_SYMBOLS} is used to compile the application, + If the HDF5 library was configured with the \TText{--disable-deprecated-symbols} + flag, or \TText{-DH5_NO_DEPRECATED_SYMBOLS} is used to compile the application, then the deprecated functions will not be available, regardless of the function mapping options. For every function with multiple available versions, a compile-time version flag can be defined to selectively map the function macro to the desired versioned function. The function mapping consists of the function name followed by - "\Code{_vers}" which is mapped by number to a specific function or + "\TText{_vers}" which is mapped by number to a specific function or struct: @@ -250,33 +250,33 @@ - - - + + + - - + +
    Function Mapping Mapped to function or struct
    \Code{H5xxx}\Code{H5xxx_vers=1}\Code{H5xxx1}
    \TText{H5xxx}\TText{H5xxx_vers=1}\TText{H5xxx1}
    \Code{H5xxx_vers=2}\Code{H5xxx2}\TText{H5xxx_vers=2}\TText{H5xxx2}
    - For example, in version 1.10 the \Code{H5Rreference} macro can be mapped to - either \Code{H5Rreference1} or \Code{H5Rreference2}. When used, the value of - the \Code{H5Rreference_vers} compile-time version flag determines which + For example, in version 1.10 the \TText{H5Rreference} macro can be mapped to + either \TText{H5Rreference1} or \TText{H5Rreference2}. When used, the value of + the \TText{H5Rreference_vers} compile-time version flag determines which function will be called:
      -
    • When \Code{H5Rreference_vers} is set to \Code{1}, the macro \Code{H5Rreference} - will be mapped to \Code{H5Rreference1}.
      - \Code{H5cc ... -DH5Rreference_vers=1 ...}
    • -
    • When \Code{H5Rdereference_vers} is set to \Code{2}, the macro \Code{H5Rdereference} - will be mapped to \Code{H5Rdereference2}.
      - \Code{h5cc ... -DH5Rreference_vers=2 ...}
    • -
    • When \Code{H5Rreference_vers} is not set, the macro \Code{H5Rreference} will be - mapped to either \Code{H5Rreference1} or \Code{H5Rreference2}, based on the +
    • When \TText{H5Rreference_vers} is set to \TText{1}, the macro \TText{H5Rreference} + will be mapped to \TText{H5Rreference1}.
      + \TText{H5cc ... -DH5Rreference_vers=1 ...}
    • +
    • When \TText{H5Rdereference_vers} is set to \TText{2}, the macro \TText{H5Rdereference} + will be mapped to \TText{H5Rdereference2}.
      + \TText{h5cc ... -DH5Rreference_vers=2 ...}
    • +
    • When \TText{H5Rreference_vers} is not set, the macro \TText{H5Rreference} will be + mapped to either \TText{H5Rreference1} or \TText{H5Rreference2}, based on the application mapping, if one was specified, or on the library mapping.
      - \Code{h5cc ... }
    • + \TText{h5cc ... }
    \warning Please be aware that some function mappings use mapped structures, as @@ -285,10 +285,10 @@ plus EVERY function that uses the mapped structure, whether or not that function is used in the application. \Emph{In 1.12, mappings of structures are used by the H5L and H5O function mappings.}\n\n - For example, an application \Code{application.c} only calls \Code{H5Lvisit}, - \Code{H5Ovisit}, and \Code{H5Oget_info_by_name}. To compile this application + For example, an application \TText{application.c} only calls \TText{H5Lvisit}, + \TText{H5Ovisit}, and \TText{H5Oget_info_by_name}. To compile this application with 1.10 APIs in 1.12 with the function specific mappings, then not only must - \Code{H5Lvisit_vers}, \Code{H5Ovisit_vers}, and \Code{H5Oget_info_by_name_vers} + \TText{H5Lvisit_vers}, \TText{H5Ovisit_vers}, and \TText{H5Oget_info_by_name_vers} be specified on the command line, but the mapped structures and every function that uses the mapped structures must be included, as well. The full compile line is shown below: @@ -303,26 +303,26 @@ \subsubsection fun-options-112 Function Mapping Options in Releases 1.12.x - + @@ -330,14 +330,14 @@ @@ -345,14 +345,14 @@ @@ -360,14 +360,14 @@ @@ -375,14 +375,14 @@ @@ -390,14 +390,14 @@ @@ -405,28 +405,28 @@ @@ -434,14 +434,14 @@ @@ -449,13 +449,13 @@ @@ -463,14 +463,14 @@ @@ -478,12 +478,12 @@ @@ -491,12 +491,12 @@ @@ -508,84 +508,84 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
    Macro
    (\Code{H5xxx})
    Macro
    (\TText{H5xxx})
    Default function used if no macro specified -
    • Function/struct mapping:\Code{H5xxx_vers=N}
    +
    • Function/struct mapping:\TText{H5xxx_vers=N}
    Function used if specifying 1.10 -
    • Function/struct mapping: \Code{H5xxx_vers=1}
    +
    • Function/struct mapping: \TText{H5xxx_vers=1}
    H5Lget_info() H5Lget_info2()
      -
    • Function mapping:\Code{H5Lget_info_vers=2}
    • -
    • Struct mapping:\Code{H5L_info_t_vers=2}
    • +
    • Function mapping:\TText{H5Lget_info_vers=2}
    • +
    • Struct mapping:\TText{H5L_info_t_vers=2}
    H5Lget_info1()
      -
    • Function mapping \Code{H5Lget_info_vers=1}
    • -
    • Struct mapping: \Code{H5L_info_t_vers=1}
    • +
    • Function mapping \TText{H5Lget_info_vers=1}
    • +
    • Struct mapping: \TText{H5L_info_t_vers=1}
    H5Lget_info_by_idx() H5Lget_info_by_idx2()
      -
    • Function mapping: \Code{H5Lget_info_by_idx_vers=2}
    • -
    • Struct mapping: \Code{H5L_info_t_vers=2}
    • +
    • Function mapping: \TText{H5Lget_info_by_idx_vers=2}
    • +
    • Struct mapping: \TText{H5L_info_t_vers=2}
    H5Lget_info_by_idx1()
      -
    • Function mapping: \Code{H5Lget_info_by_idx_vers=1}
    • -
    • Struct mapping: \Code{H5L_info_t_vers=1}
    • +
    • Function mapping: \TText{H5Lget_info_by_idx_vers=1}
    • +
    • Struct mapping: \TText{H5L_info_t_vers=1}
    H5Literate() H5Literate2()
      -
    • Function mapping: \Code{H5Literate_vers=2}
    • -
    • Struct mapping: \Code{H5L_iterate_t_vers=2}
    • +
    • Function mapping: \TText{H5Literate_vers=2}
    • +
    • Struct mapping: \TText{H5L_iterate_t_vers=2}
    H5Literate1()
      -
    • Function mapping: \Code{H5Literate_vers=1}
    • -
    • Struct mapping: \Code{H5L_iterate_t_vers=1}
    • +
    • Function mapping: \TText{H5Literate_vers=1}
    • +
    • Struct mapping: \TText{H5L_iterate_t_vers=1}
    H5Literate_by_name() H5Literate_by_name2()
      -
    • Function mapping: \Code{H5Literate_by_name_vers=2}
    • -
    • Struct mapping: \Code{H5L_iterate_t_vers=2}
    • +
    • Function mapping: \TText{H5Literate_by_name_vers=2}
    • +
    • Struct mapping: \TText{H5L_iterate_t_vers=2}
    H5Literate_by_name1()
      -
    • Function mapping: \Code{H5Literate_by_name_vers=1}
    • -
    • Struct mapping: \Code{H5L_iterate_t_vers=1}
    • +
    • Function mapping: \TText{H5Literate_by_name_vers=1}
    • +
    • Struct mapping: \TText{H5L_iterate_t_vers=1}
    H5Lvisit() H5Lvisit2()
      -
    • Function mapping: \Code{H5Lvisit_vers=2}
    • -
    • Struct mapping: \Code{H5L_iterate_t_vers=2}
    • +
    • Function mapping: \TText{H5Lvisit_vers=2}
    • +
    • Struct mapping: \TText{H5L_iterate_t_vers=2}
    H5Lvisit1()
      -
    • Function mapping: \Code{H5Lvisit_vers=1}
    • -
    • Struct mapping: \Code{H5L_iterate_t_vers=1}
    • +
    • Function mapping: \TText{H5Lvisit_vers=1}
    • +
    • Struct mapping: \TText{H5L_iterate_t_vers=1}
    H5Lvisit_by_name() H5Lvisit_by_name2()
      -
    • Function mapping: \Code{H5Lvisit_by_name_vers=2}
    • -
    • Struct mapping: \Code{H5L_iterate_t_vers=2}
    • +
    • Function mapping: \TText{H5Lvisit_by_name_vers=2}
    • +
    • Struct mapping: \TText{H5L_iterate_t_vers=2}
    H5Lvisit_by_name1()
      -
    • Function mapping: \Code{H5Lvisit_by_name_vers=1}
    • -
    • Struct mapping: \Code{H5L_iterate_t_vers=1}
    • +
    • Function mapping: \TText{H5Lvisit_by_name_vers=1}
    • +
    • Struct mapping: \TText{H5L_iterate_t_vers=1}
    H5Oget_info() H5Oget_info3()
      -
    • Function mapping: \Code{H5Oget_info_vers=3}
    • -
    • Struct mapping: \Code{H5O_info_t_vers=2}
    • +
    • Function mapping: \TText{H5Oget_info_vers=3}
    • +
    • Struct mapping: \TText{H5O_info_t_vers=2}
    H5Oget_info1()
      -
    • Function mapping: \Code{H5Oget_info_vers=1}
    • -
    • Struct mapping: \Code{H5O_info_t_vers=1}
    • +
    • Function mapping: \TText{H5Oget_info_vers=1}
    • +
    • Struct mapping: \TText{H5O_info_t_vers=1}
    H5Oget_info_by_idx() H5Oget_info_by_idx3() -
    • Function mapping: \Code{H5Oget_info_by_idx_vers=3}
    • -
    • Struct mapping: \Code{H5O_info_t_vers=2}
    • +
      • Function mapping: \TText{H5Oget_info_by_idx_vers=3}
      • +
      • Struct mapping: \TText{H5O_info_t_vers=2}
    H5Oget_info_by_idx1()
      -
    • Function mapping: \Code{H5Oget_info_by_idx_vers=1}
    • -
    • Struct mapping: \Code{H5O_info_t_vers=1}
    • +
    • Function mapping: \TText{H5Oget_info_by_idx_vers=1}
    • +
    • Struct mapping: \TText{H5O_info_t_vers=1}
    H5Oget_info_by_name() H5Oget_info_by_name3()
      -
    • Function mapping: \Code{H5O_get_info_by_name_vers=3}
    • -
    • Struct mapping: \Code{H5O_info_t_vers=2}
    • +
    • Function mapping: \TText{H5O_get_info_by_name_vers=3}
    • +
    • Struct mapping: \TText{H5O_info_t_vers=2}
    H5Oget_info_by_name1()
      -
    • Function mapping: \Code{H5O_get_info_by_name_vers=1}
    • -
    • Struct mapping: \Code{H5O_info_t_vers=1}
    • +
    • Function mapping: \TText{H5O_get_info_by_name_vers=1}
    • +
    • Struct mapping: \TText{H5O_info_t_vers=1}
    H5Ovisit() H5Ovisit3()
      -
    • Function mapping: \Code{H5Ovisit_vers=3}
    • -
    • Struct mapping: \Code{H5O_iterate_t_vers=2}
    • +
    • Function mapping: \TText{H5Ovisit_vers=3}
    • +
    • Struct mapping: \TText{H5O_iterate_t_vers=2}
    H5Ovisit1() -
    • Function mapping: \Code{H5Ovisit_vers=1}
    • -
    • Struct mapping: \Code{H5O_iterate_t_vers=1}
    • +
      • Function mapping: \TText{H5Ovisit_vers=1}
      • +
      • Struct mapping: \TText{H5O_iterate_t_vers=1}
    H5Ovisit_by_name() H5Ovisit_by_name3()
      -
    • Function mapping: \Code{H5Ovisit_by_name_vers=3}
    • -
    • Struct mapping: \Code{H5O_iterate_t_vers=2}
    • +
    • Function mapping: \TText{H5Ovisit_by_name_vers=3}
    • +
    • Struct mapping: \TText{H5O_iterate_t_vers=2}
    H5Ovisit_by_name1()
      -
    • Function mapping: \Code{H5Ovisit_by_name_vers=1}
    • -
    • Struct mapping: \Code{H5O_iterate_t_vers=1}
    • +
    • Function mapping: \TText{H5Ovisit_by_name_vers=1}
    • +
    • Struct mapping: \TText{H5O_iterate_t_vers=1}
    H5Pencode() H5Pencode2()
      -
    • Function mapping: \Code{H5Pencode_vers=2}
    • +
    • Function mapping: \TText{H5Pencode_vers=2}
    H5Pencode1()
      -
    • Function mapping: \Code{H5Pencode_vers=1}
    • +
    • Function mapping: \TText{H5Pencode_vers=1}
    H5Sencode() H5Sencode2()
      -
    • Function mapping: \Code{H5Sencode_vers=2}
    • +
    • Function mapping: \TText{H5Sencode_vers=2}
    H5Sencode1()
      -
    • Function mapping: \Code{H5Sencode_vers=1}
    • +
    • Function mapping: \TText{H5Sencode_vers=1}
    Macro Default function used
    (if no macro specified)
    Introduced in\Code{h5cc} version flag and value\TText{h5cc} version flag and value Mapped to function or struct
    H5Rdereference() H5Rdereference2() HDF5-1.10.0\Code{-DH5Rdereference_vers=1}\TText{-DH5Rdereference_vers=1} H5Rdereference1()
    \Code{-DH5Rdereference_vers=2}\TText{-DH5Rdereference_vers=2} H5Rdereference2()
    H5Fget_info() H5Fget_info2() HDF5-1.10.0\Code{-DH5Fget_info_vers=1}\TText{-DH5Fget_info_vers=1} H5Fget_info1() with struct \ref H5F_info1_t
    \Code{-DH5Fget_info_vers=2}\TText{-DH5Fget_info_vers=2} H5Fget_info2() with struct \ref H5F_info2_t
    H5Oget_info() H5Oget_info1() HDF5-1.10.3\Code{-DH5Oget_info_vers=1}\TText{-DH5Oget_info_vers=1} H5Oget_info1()
    \Code{-DH5Oget_info_vers=2}\TText{-DH5Oget_info_vers=2} H5Oget_info2()
    H5Oget_info_by_idx() H5Oget_info_by_idx1() HDF5-1.10.3\Code{-DH5Oget_info_by_idx_vers=1}\TText{-DH5Oget_info_by_idx_vers=1} H5Oget_info_by_idx1()
    \Code{-DH5Oget_info_by_idx_vers=2}\TText{-DH5Oget_info_by_idx_vers=2} H5Oget_info_by_idx2()
    H5Oget_info_by_name() H5Oget_info_by_name1() HDF5-1.10.3\Code{-DH5Oget_info_by_name_vers=1}\TText{-DH5Oget_info_by_name_vers=1} H5Oget_info_by_name1()
    \Code{-DH5Oget_info_by_name_vers=2}\TText{-DH5Oget_info_by_name_vers=2} H5Oget_info_by_name2()
    H5Ovisit() H5Ovisit1() HDF5-1.10.3\Code{-DH5Ovisit_vers=1}\TText{-DH5Ovisit_vers=1} H5Ovisit1()
    \Code{-DH5Ovisit_vers=2}\TText{-DH5Ovisit_vers=2} H5Ovisit2()
    H5Ovisit_by_name() H5Ovisit_by_name1() HDF5-1.10.3\Code{-DH5Ovisit_by_name_vers=1}\TText{-DH5Ovisit_by_name_vers=1} H5Ovisit_by_name1()
    \Code{-DH5Ovisit_by_name_vers=2}\TText{-DH5Ovisit_by_name_vers=2} H5Ovisit_by_name2()
    @@ -606,208 +606,208 @@ H5Acreate() - \Code{DH5Acreate_vers=1} + \TText{DH5Acreate_vers=1} H5Acreate1() - \Code{DH5Acreate_vers=2} + \TText{DH5Acreate_vers=2} H5Acreate2() H5Aiterate() - \Code{DH5Aiterate_vers=1} + \TText{DH5Aiterate_vers=1} H5Aiterate1()
    with struct \ref H5A_operator1_t - \Code{DH5Aiterate_vers=2} + \TText{DH5Aiterate_vers=2} H5Aiterate2()
    with struct \ref H5A_operator2_t H5Dcreate() - \Code{DH5Dcreate_vers=1} + \TText{DH5Dcreate_vers=1} H5Dcreate1() - \Code{DH5Dcreate_vers=2} + \TText{DH5Dcreate_vers=2} H5Dcreate2() H5Dopen() - \Code{DH5Dopen_vers=1} + \TText{DH5Dopen_vers=1} H5Dopen1() - \Code{DH5Dopen_vers=2} + \TText{DH5Dopen_vers=2} H5Dopen2() H5Eclear() - \Code{DH5Eclear_vers=1} + \TText{DH5Eclear_vers=1} H5Eclear1() - \Code{DH5Eclear_vers=2} + \TText{DH5Eclear_vers=2} H5Eclear2() H5Eprint() - \Code{DH5Eprint_vers=1} + \TText{DH5Eprint_vers=1} H5Eprint1() - \Code{DH5Eprint_vers=2} + \TText{DH5Eprint_vers=2} H5Eprint2() H5Epush() - \Code{DH5Epush_vers=1} + \TText{DH5Epush_vers=1} H5Epush1() - \Code{DH5Epush_vers=2} + \TText{DH5Epush_vers=2} H5Epush2() H5Eset_auto() - \Code{DH5Eset_auto_vers=1} + \TText{DH5Eset_auto_vers=1} H5Eset_auto1() - \Code{DH5Eset_auto_vers=2} + \TText{DH5Eset_auto_vers=2} H5Eset_auto2() H5Eget_auto() - \Code{DH5Eget_auto_vers=1} + \TText{DH5Eget_auto_vers=1} H5Eget_auto1() - \Code{DH5Eget_auto_vers=2} + \TText{DH5Eget_auto_vers=2} H5Eget_auto2() \ref H5E_auto_t
    struct for H5Eset_auto()
    and H5Eget_auto() - \Code{DH5E_auto_t_vers=1} + \TText{DH5E_auto_t_vers=1} \ref H5E_auto1_t - \Code{DH5E_auto_t_vers=2} + \TText{DH5E_auto_t_vers=2} \ref H5E_auto2_t H5Ewalk() - \Code{DH5Ewalk_vers=1} + \TText{DH5Ewalk_vers=1} H5Ewalk1()
    with callback \ref H5E_walk1_t
    and struct \ref H5E_error1_t - \Code{DH5Ewalk_vers=2} + \TText{DH5Ewalk_vers=2} H5Ewalk2()
    with callback \ref H5E_walk2_t
    and struct \ref H5E_error2_t H5Gcreate() - \Code{DH5Gcreate_vers=1} + \TText{DH5Gcreate_vers=1} H5Gcreate1() - \Code{DH5Gcreate_vers=2} + \TText{DH5Gcreate_vers=2} H5Gcreate2() H5Gopen() - \Code{DH5Gopen_vers=1} + \TText{DH5Gopen_vers=1} H5Gopen1() - \Code{DH5Gopen_vers=2} + \TText{DH5Gopen_vers=2} H5Gopen2() H5Pget_filter() - \Code{DH5Pget_filter_vers=1} + \TText{DH5Pget_filter_vers=1} H5Pget_filter1() - \Code{DH5Pget_filter_vers=2} + \TText{DH5Pget_filter_vers=2} H5Pget_filter2() H5Pget_filter_by_id() - \Code{DH5Pget_filter_by_id_vers=1} + \TText{DH5Pget_filter_by_id_vers=1} H5Pget_filter_by_id1() - \Code{DH5Pget_filter_by_id_vers=2} + \TText{DH5Pget_filter_by_id_vers=2} H5Pget_filter_by_id2() H5Pinsert() - \Code{DH5Pinsert_vers=1} + \TText{DH5Pinsert_vers=1} H5Pinsert1() - \Code{DH5Pinsert_vers=2} + \TText{DH5Pinsert_vers=2} H5Pinsert2() H5Pregister() - \Code{DH5Pregister_vers=1} + \TText{DH5Pregister_vers=1} H5Pregister1() - \Code{DH5Pregister_vers=2} + \TText{DH5Pregister_vers=2} H5Pregister2() H5Rget_obj_type() - \Code{DH5Rget_obj_typevers=1} + \TText{DH5Rget_obj_typevers=1} H5Rget_obj_type1() - \Code{DH5Rget_obj_type_vers=2} + \TText{DH5Rget_obj_type_vers=2} H5Rget_obj_type2() H5Tarray_create() - \Code{DH5Tarray_create_vers=1} + \TText{DH5Tarray_create_vers=1} H5Tarray_create1() - \Code{DH5Tarray_create_vers=2} + \TText{DH5Tarray_create_vers=2} H5Tarray_create2() H5Tcommit() - \Code{DH5Tcommit_vers=1} + \TText{DH5Tcommit_vers=1} H5Tcommit1() - \Code{DH5Tcommit_vers=2} + \TText{DH5Tcommit_vers=2} H5Tcommit2() H5Tget_array_dims() - \Code{DH5Tget_array_dims_vers=1} + \TText{DH5Tget_array_dims_vers=1} H5Tget_array_dims1() - \Code{DH5Tget_array_dims_vers=2} + \TText{DH5Tget_array_dims_vers=2} H5Tget_array_dims2() H5Topen() - \Code{DH5Topen_vers=1} + \TText{DH5Topen_vers=1} H5Topen1() - \Code{DH5Topen_vers=2} + \TText{DH5Topen_vers=2} H5Topen2() \ref H5Z_class_t struct for H5Zregister() - \Code{DH5Z_class_t_vers=1} + \TText{DH5Z_class_t_vers=1} \ref H5Z_class1_t - \Code{DH5Z_class_t_vers=2} + \TText{DH5Z_class_t_vers=2} \ref H5Z_class2_t @@ -819,8 +819,8 @@ h5cc ... -DH5Rdereference_vers=1 -DH5Fget_info_vers=2 ... \endcode As a result of the function and struct mappings in this compile example, all - occurrences of the macro \Code{H5Rdereference} will be mapped to \Code{H5Rdereference1} - and all occurrences of the macro \Code{H5Fget_info} will be mapped to \Code{H5Fget_info2} + occurrences of the macro \TText{H5Rdereference} will be mapped to \TText{H5Rdereference1} + and all occurrences of the macro \TText{H5Fget_info} will be mapped to \TText{H5Fget_info2} for the application being built. The function and struct mappings can be used to guarantee that a given API compatibility @@ -832,17 +832,17 @@ As noted earlier, the function mappings can only reference versioned functions that are included in the HDF5 library, as determined by the configure flag used to build the library. For example, if the HDF5 library being linked with the application was built - with the \Code{--disable-deprecated-symbols} option, version 1 of the underlying functions - would not be available, and the example above that defined \Code{H5Rdereference_vers=1} + with the \TText{--disable-deprecated-symbols} option, version 1 of the underlying functions + would not be available, and the example above that defined \TText{H5Rdereference_vers=1} would not be supported. - The function mappings do not negate any available functions. If \Code{H5Rdereference1} + The function mappings do not negate any available functions. If \TText{H5Rdereference1} is available in the installed version of the HDF5 library, and the application was not - compiled with the \Code{-DH5_NO_DEPRECATED_SYMBOLS} flag, the function \Code{H5Rdereference1} + compiled with the \TText{-DH5_NO_DEPRECATED_SYMBOLS} flag, the function \TText{H5Rdereference1} will remain available to the application through its versioned name. Similarly, - \Code{H5Rdereference2} will remain available to the application as \Code{H5Rdereference2}. - The function mapping version flag \Code{H5Rdereference_vers} only controls the mapping of - the API compatibility macro \Code{H5Rdereference} to one of the two available functions. + \TText{H5Rdereference2} will remain available to the application as \TText{H5Rdereference2}. + The function mapping version flag \TText{H5Rdereference_vers} only controls the mapping of + the API compatibility macro \TText{H5Rdereference} to one of the two available functions. This can be especially useful in any case where the programmer does not have direct control over global macro definitions, such as when writing code meant to be copied to multiple @@ -857,8 +857,8 @@ These macros were strictly a forward-looking feature at that time; they were not necessary for compatibility in 1.6.x. These macros were created at that time to enable writing code that could be used with any version of the library after 1.6.8 - and any library compilation options except \Code{H5_NO_DEPRECATED_SYMBOLS}, by always - using the '1' version of versioned functions and types. For example, \Code{H5Dopen1} + and any library compilation options except \TText{H5_NO_DEPRECATED_SYMBOLS}, by always + using the '1' version of versioned functions and types. For example, \TText{H5Dopen1} will always be interpreted in exactly the same manner by any version of the library since 1.6.8. @@ -867,23 +867,23 @@ of an existing application to a new HDF5 release. An incremental migration plan is outlined here:
      -
    1. Build the HDF5 library without specifying any library mapping \Code{configure} +
    2. Build the HDF5 library without specifying any library mapping \TText{configure} flag. In this default mode, the 1.6.x, 1.8.x, and 1.10.x versions of the underlying functions are available, and the API compatibility macros will be mapped to the current HDF5 versioned functions.
    3. -
    4. Compile the application with the \Code{-DH5_USE_NN_API} application mapping +
    5. Compile the application with the \TText{-DH5_USE_NN_API} application mapping option if it was written for use with an earlier HDF5 library. Because the application mapping overrides the library mapping, the macros will all be mapped to the earlier versions of the functions.
    6. Remap one API compatibility macro at a time (or sets of macros), to use the current HDF5 versions. At each stage, use the function mappings to map the macros being worked on to the current versions. For example, use the - \Code{-DH5Rdereference_vers=2} version flag setting to remap the \Code{H5Rdereference} - macro to \Code{H5Rdereference2}, the 1.10.x version. + \TText{-DH5Rdereference_vers=2} version flag setting to remap the \TText{H5Rdereference} + macro to \TText{H5Rdereference2}, the 1.10.x version. During this step, the application code will need to be modified to change the calling parameters used with the API compatibility macros to match the number and type - of the 1.10.x versioned functions. The macro name, for example \Code{H5Rdereference}, + of the 1.10.x versioned functions. The macro name, for example \TText{H5Rdereference}, should continue to be used in the code, to allow for possible re-mappings to later versioned functions in a future release.
    7. After all macros have been migrated to the latest versioned functions in step 3, @@ -891,8 +891,8 @@ uses the library mappings set in step 1, and maps API compatibility macros to the latest versions.
    8. Finally, compile the application with the application mapping - \Code{-DH5_NO_DEPRECATED_SYMBOLS}, and address any failures to complete + \TText{-DH5_NO_DEPRECATED_SYMBOLS}, and address any failures to complete the application migration process.
    - */ \ No newline at end of file + */ diff --git a/doxygen/dox/cookbook/Files.dox b/doxygen/dox/cookbook/Files.dox index 489377153a0..4b133a615cb 100644 --- a/doxygen/dox/cookbook/Files.dox +++ b/doxygen/dox/cookbook/Files.dox @@ -20,7 +20,7 @@ free space tracking information via H5Pset_file_space_strategy(). Free space tracking is supported only in HDF5 versions 1.10.x and higher. This has implications for the accessibility of your HDF5 files and should be considered carefully. If compatibility with previous versions of -HDF5 must be maintained, space reclamation via \Code{h5repack} might be an option.\n +HDF5 must be maintained, space reclamation via \TText{h5repack} might be an option.\n The file space strategy #H5F_FSPACE_STRATEGY_FSM_AGGR is not the only option that supports free-space tracking. #H5F_FSPACE_STRATEGY_PAGE is another option, which adds paged allocation and is used most effectively with page buffering.\n @@ -37,7 +37,7 @@ See \ref CB_MaintainCompat for HDF5 compatibility implications. \subsection CB_RemoveUnusedSpace Removing Unused Space from HDF5 Files \par Problem -Based on estimates or \Code{h5stat} output you know that a large portion +Based on estimates or \TText{h5stat} output you know that a large portion of an HDF5 file consists of free or unaccounted space, and you would like to remove it. @@ -58,7 +58,7 @@ The user block begins at offset 0 and must be at least 512 bytes and a power of 2. The HDF5 library ignores any content between the beginning of the file and the end of the user block.\n You can add or strip a user block to/from an existing HDF5 file with the -\Code{h5jam}/\Code{h5unjam} tool, respectively. +\TText{h5jam}/\TText{h5unjam} tool, respectively. \warning If you try to embed content into the user block for use by other applications, pay close attention to how they handle space beyond the last used byte in the @@ -68,4 +68,4 @@ try to truncate the rest of the file and destroy the HDF5 portion of the file. \par See Also References to related recipes - */ \ No newline at end of file + */ diff --git a/doxygen/dox/high_level/extension.dox b/doxygen/dox/high_level/extension.dox index 20a099a0e0b..fc0da48ee83 100644 --- a/doxygen/dox/high_level/extension.dox +++ b/doxygen/dox/high_level/extension.dox @@ -7,23 +7,23 @@ * for working with region references, hyperslab selections, and bit-fields. * These functions were created as part of a project supporting * NPP/NPOESS Data Production and Exploitation ( - * - * project, + * + * project, * software ). * While they were written to facilitate access to NPP, NPOESS, and JPSS * data in the HDF5 format, these functions may be useful to anyone working * with region references, hyperslab selections, or bit-fields. * * Note that these functions are not part of the standard HDF5 distribution; - * the + * the * software * must be separately downloaded and installed. * * A comprehensive guide to this library, - * + * * User Guide to the HDF5 High-level Library for Handling Region References and Hyperslab Selections * is available at - * https://support.hdfgroup.org/projects/jpss/documentation/HL/UG/NPOESS_HL-UG.pdf. + * https://\PRJURL/jpss/documentation/HL/UG/NPOESS_HL-UG.pdf. * * - \ref H5LRcopy_reference * \n Copies data from the specified dataset to a new location and creates a reference to it. @@ -297,7 +297,7 @@ H5_HLRDLL herr_t H5LRcopy_region(hid_t obj_id, * - #H5_ITER_NATIVE Fastest available order * * For more detailed information on these two parameters, - * @see H5Lvisit(). + * see H5Lvisit(). * * \p ref_type specifies the type of the reference to be used. * Valid values include the following: diff --git a/doxygen/dox/rm-template.dox b/doxygen/dox/rm-template.dox index 003d5c4b862..ad5e8387c19 100644 --- a/doxygen/dox/rm-template.dox +++ b/doxygen/dox/rm-template.dox @@ -41,8 +41,8 @@ the - HDF5 File Format Discussion diff --git a/doxygen/examples/Filters.html b/doxygen/examples/Filters.html deleted file mode 100644 index 27207d5d962..00000000000 --- a/doxygen/examples/Filters.html +++ /dev/null @@ -1,450 +0,0 @@ - - - Filters -

    Filters in HDF5

    - - Note: Transient pipelines described in this document have not - been implemented. - -

    Introduction

    - -

    HDF5 allows chunked data to pass through user-defined filters - on the way to or from disk. The filters operate on chunks of an - H5D_CHUNKED dataset can be arranged in a pipeline - so output of one filter becomes the input of the next filter. - -

    Each filter has a two-byte identification number (type - H5Z_filter_t) allocated by The HDF Group and can also be - passed application-defined integer resources to control its - behavior. Each filter also has an optional ASCII comment - string. - -

    - - - - - - - - - - - - - - - - - - - -
    Values for H5Z_filter_tDescription
    0-255These values are reserved for filters predefined and - registered by the HDF5 library and of use to the general - public. They are described in a separate section - below.
    256-511Filter numbers in this range are used for testing only - and can be used temporarily by any organization. No - attempt is made to resolve numbering conflicts since all - definitions are by nature temporary.
    512-65535Reserved for future assignment. Please contact the - HDF5 development team - to reserve a value or range of values for - use by your filters.
    - -

    Defining and Querying the Filter Pipeline

    - -

    Two types of filters can be applied to raw data I/O: permanent - filters and transient filters. The permanent filter pipeline is - defined when the dataset is created while the transient pipeline - is defined for each I/O operation. During an - H5Dwrite() the transient filters are applied first - in the order defined and then the permanent filters are applied - in the order defined. For an H5Dread() the - opposite order is used: permanent filters in reverse order, then - transient filters in reverse order. An H5Dread() - must result in the same amount of data for a chunk as the - original H5Dwrite(). - -

    The permanent filter pipeline is defined by calling - H5Pset_filter() for a dataset creation property - list while the transient filter pipeline is defined by calling - that function for a dataset transfer property list. - -

    -
    herr_t H5Pset_filter (hid_t plist, - H5Z_filter_t filter, unsigned int flags, - size_t cd_nelmts, const unsigned int - cd_values[]) -
    This function adds the specified filter and - corresponding properties to the end of the transient or - permanent output filter pipeline (depending on whether - plist is a dataset creation or dataset transfer - property list). The flags argument specifies certain - general properties of the filter and is documented below. The - cd_values is an array of cd_nelmts integers - which are auxiliary data for the filter. The integer values - will be stored in the dataset object header as part of the - filter information. -
    int H5Pget_nfilters (hid_t plist) -
    This function returns the number of filters defined in the - permanent or transient filter pipeline depending on whether - plist is a dataset creation or dataset transfer - property list. In each pipeline the filters are numbered from - 0 through N-1 where N is the value returned - by this function. During output to the file the filters of a - pipeline are applied in increasing order (the inverse is true - for input). Zero is returned if there are no filters in the - pipeline and a negative value is returned for errors. -
    H5Z_filter_t H5Pget_filter (hid_t plist, - int filter_number, unsigned int *flags, - size_t *cd_nelmts, unsigned int - *cd_values, size_t namelen, char name[]) -
    This is the query counterpart of - H5Pset_filter() and returns information about a - particular filter number in a permanent or transient pipeline - depending on whether plist is a dataset creation or - dataset transfer property list. On input, cd_nelmts - indicates the number of entries in the cd_values - array allocated by the caller while on exit it contains the - number of values defined by the filter. The - filter_number should be a value between zero and - N-1 as described for H5Pget_nfilters() - and the function will return failure (a negative value) if the - filter number is out of range. If name is a pointer - to an array of at least namelen bytes then the filter - name will be copied into that array. The name will be null - terminated if the namelen is large enough. The - filter name returned will be the name appearing in the file or - else the name registered for the filter or else an empty string. -
    - -

    The flags argument to the functions above is a bit vector of - the following fields: - -

    - - - - - - - - - - -
    Values for flagsDescription
    H5Z_FLAG_OPTIONALIf this bit is set then the filter is optional. If - the filter fails (see below) during an - H5Dwrite() operation then the filter is - just excluded from the pipeline for the chunk for which - it failed; the filter will not participate in the - pipeline during an H5Dread() of the chunk. - This is commonly used for compression filters: if the - compression result would be larger than the input then - the compression filter returns failure and the - uncompressed data is stored in the file. If this bit is - clear and a filter fails then the - H5Dwrite() or H5Dread() also - fails.
    - -

    Defining Filters

    - -

    Each filter is bidirectional, handling both input and output to - the file, and a flag is passed to the filter to indicate the - direction. In either case the filter reads a chunk of data from - a buffer, usually performs some sort of transformation on the - data, places the result in the same or new buffer, and returns - the buffer pointer and size to the caller. If something goes - wrong the filter should return zero to indicate a failure. - -

    During output, a filter that fails or isn't defined and is - marked as optional is silently excluded from the pipeline and - will not be used when reading that chunk of data. A required - filter that fails or isn't defined causes the entire output - operation to fail. During input, any filter that has not been - excluded from the pipeline during output and fails or is not - defined will cause the entire input operation to fail. - -

    Filters are defined in two phases. The first phase is to - define a function to act as the filter and link the function - into the application. The second phase is to register the - function, associating the function with an - H5Z_filter_t identification number and a comment. - -

    -
    typedef size_t (*H5Z_func_t)(unsigned int - flags, size_t cd_nelmts, const unsigned int - cd_values[], size_t nbytes, size_t - *buf_size, void **buf) -
    The flags, cd_nelmts, and - cd_values are the same as for the - H5Pset_filter() function with the additional flag - H5Z_FLAG_REVERSE which is set when the filter is - called as part of the input pipeline. The input buffer is - pointed to by *buf and has a total size of - *buf_size bytes but only nbytes are valid - data. The filter should perform the transformation in place if - possible and return the number of valid bytes or zero for - failure. If the transformation cannot be done in place then - the filter should allocate a new buffer with - malloc() and assign it to *buf, - assigning the allocated size of that buffer to - *buf_size. The old buffer should be freed - by calling free(). - -

    -
    herr_t H5Zregister (H5Z_filter_t filter_id, - const char *comment, H5Z_func_t - filter) -
    The filter function is associated with a filter - number and a short ASCII comment which will be stored in the - hdf5 file if the filter is used as part of a permanent - pipeline during dataset creation. -
    - -

    Predefined Filters

    - -

    If zlib version 1.1.2 or later was found - during configuration then the library will define a filter whose - H5Z_filter_t number is - H5Z_FILTER_DEFLATE. Since this compression method - has the potential for generating compressed data which is larger - than the original, the H5Z_FLAG_OPTIONAL flag - should be turned on so such cases can be handled gracefully by - storing the original data instead of the compressed data. The - cd_nvalues should be one with cd_value[0] - being a compression aggression level between zero and nine, - inclusive (zero is the fastest compression while nine results in - the best compression ratio). - -

    A convenience function for adding the - H5Z_FILTER_DEFLATE filter to a pipeline is: - -

    -
    herr_t H5Pset_deflate (hid_t plist, unsigned - aggression) -
    The deflate compression method is added to the end of the - permanent or transient filter pipeline depending on whether - plist is a dataset creation or dataset transfer - property list. The aggression is a number between - zero and nine (inclusive) to indicate the tradeoff between - speed and compression ratio (zero is fastest, nine is best - ratio). -
    - -

    Even if the zlib isn't detected during - configuration the application can define - H5Z_FILTER_DEFLATE as a permanent filter. If the - filter is marked as optional (as with - H5Pset_deflate()) then it will always fail and be - automatically removed from the pipeline. Applications that read - data will fail only if the data is actually compressed; they - won't fail if H5Z_FILTER_DEFLATE was part of the - permanent output pipeline but was automatically excluded because - it didn't exist when the data was written. - -

    zlib can be acquired from - - https://zlib.net. - -

    Example

    - -

    This example shows how to define and register a simple filter - that adds a checksum capability to the data stream. - -

    The function that acts as the filter always returns zero - (failure) if the md5() function was not detected at - configuration time (left as an exercise for the reader). - Otherwise the function is broken down to an input and output - half. The output half calculates a checksum, increases the size - of the output buffer if necessary, and appends the checksum to - the end of the buffer. The input half calculates the checksum - on the first part of the buffer and compares it to the checksum - already stored at the end of the buffer. If the two differ then - zero (failure) is returned, otherwise the buffer size is reduced - to exclude the checksum. - -

    - - - - -
    -

    
    -                  size_t
    -                  md5_filter(unsigned int flags, size_t cd_nelmts,
    -                  const unsigned int cd_values[], size_t nbytes,
    -                  size_t *buf_size, void **buf)
    -                  {
    -                  #ifdef HAVE_MD5
    -                  unsigned char       cksum[16];
    -
    -                  if (flags & H5Z_REVERSE) {
    -                  /* Input */
    -                  assert(nbytes>=16);
    -                  md5(nbytes-16, *buf, cksum);
    -
    -                  /* Compare */
    -                  if (memcmp(cksum, (char*)(*buf)+nbytes-16, 16)) {
    -                  return 0; /*fail*/
    -                  }
    -
    -                  /* Strip off checksum */
    -                  return nbytes-16;
    -
    -                  } else {
    -                  /* Output */
    -                  md5(nbytes, *buf, cksum);
    -
    -                  /* Increase buffer size if necessary */
    -                  if (nbytes+16>*buf_size) {
    -                  *buf_size = nbytes + 16;
    -                  *buf = realloc(*buf, *buf_size);
    -                  }
    -
    -                  /* Append checksum */
    -                  memcpy((char*)(*buf)+nbytes, cksum, 16);
    -                  return nbytes+16;
    -                  }
    -                  #else
    -                  return 0; /*fail*/
    -                  #endif
    -                  }
    -	          
    -
    - -

    Once the filter function is defined it must be registered so - the HDF5 library knows about it. Since we're testing this - filter we choose one of the H5Z_filter_t numbers - from the reserved range. We'll randomly choose 305. - -

    -

    - - - - -
    -

    
    -                  #define FILTER_MD5 305
    -                  herr_t status = H5Zregister(FILTER_MD5, "md5 checksum", md5_filter);
    -	          
    -
    - -

    Now we can use the filter in a pipeline. We could have added - the filter to the pipeline before defining or registering the - filter as long as the filter was defined and registered by time - we tried to use it (if the filter is marked as optional then we - could have used it without defining it and the library would - have automatically removed it from the pipeline for each chunk - written before the filter was defined and registered). - -

    -

    - - - - -
    -

    
    -                  hid_t dcpl = H5Pcreate(H5P_DATASET_CREATE);
    -                  hsize_t chunk_size[3] = {10,10,10};
    -                  H5Pset_chunk(dcpl, 3, chunk_size);
    -                  H5Pset_filter(dcpl, FILTER_MD5, 0, 0, NULL);
    -                  hid_t dset = H5Dcreate(file, "dset", H5T_NATIVE_DOUBLE, space, dcpl);
    -	          
    -
    - -

    6. Filter Diagnostics

    - -

    If the library is compiled with debugging turned on for the H5Z - layer (usually as a result of configure - --enable-debug=z) then filter statistics are printed when - the application exits normally or the library is closed. The - statistics are written to the standard error stream and include - two lines for each filter that was used: one for input and one - for output. The following fields are displayed: - -

    -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Field NameDescription
    MethodThis is the name of the method as defined with - H5Zregister() with the characters - "< or ">" prepended to indicate - input or output.
    TotalThe total number of bytes processed by the filter - including errors. This is the maximum of the - nbytes argument or the return value. -
    ErrorsThis field shows the number of bytes of the Total - column which can be attributed to errors.
    User, System, ElapsedThese are the amount of user time, system time, and - elapsed time in seconds spent in the filter function. - Elapsed time is sensitive to system load. These times - may be zero on operating systems that don't support the - required operations.
    BandwidthThis is the filter bandwidth which is the total - number of bytes processed divided by elapsed time. - Since elapsed time is subject to system load the - bandwidth numbers cannot always be trusted. - Furthermore, the bandwidth includes bytes attributed to - errors which may significantly taint the value if the - function is able to detect errors without much - expense.
    - -

    -

    - - - - - -
    - Example: Filter Statistics -
    -

    H5Z: filter statistics accumulated ov=
    -                  er life of library:
    -                  Method     Total  Errors  User  System  Elapsed Bandwidth
    -                  ------     -----  ------  ----  ------  ------- ---------
    -                  >deflate  160000   40000  0.62    0.74     1.33 117.5 kBs
    -                  <deflate  120000       0  0.11    0.00     0.12 1.000 MBs
    -	          
    -
    - -
    - - -

    Footnote 1: Dataset chunks can be compressed - through the use of filters. Developers should be aware that - reading and rewriting compressed chunked data can result in holes - in an HDF5 file. In time, enough such holes can increase the - file size enough to impair application or library performance - when working with that file. See - - Freespace Management - in the chapter - - Performance Analysis and Issues.

    - diff --git a/doxygen/examples/H5.format.1.0.html b/doxygen/examples/H5.format.1.0.html index 32e377d4323..00da963c48e 100644 --- a/doxygen/examples/H5.format.1.0.html +++ b/doxygen/examples/H5.format.1.0.html @@ -3441,8 +3441,8 @@

    Name: Data Storage - Filter Pipeline

    library. Values 256 through 511 have been set aside for use when developing/testing new filters. The remaining values are allocated to specific filters by contacting the - HDF5 Development - Team. + HDF5 development team. + diff --git a/doxygen/examples/H5.format.1.1.html b/doxygen/examples/H5.format.1.1.html index 707bdc7c281..418afd5ab88 100644 --- a/doxygen/examples/H5.format.1.1.html +++ b/doxygen/examples/H5.format.1.1.html @@ -5558,9 +5558,9 @@

    Name: Data Storage - Filter Pipeline

    1If you are reading an earlier version of this document, this link may have changed. If the link does not work, use the latest version of this document - on The HDF Group’s website, - - https://support.hdfgroup.org/HDF5/doc/H5.format.html; + on The HDF Group’s website, + + H5.format.html; the link there will always be correct. (Return)

    diff --git a/doxygen/examples/H5DS_Spec.pdf b/doxygen/examples/H5DS_Spec.pdf new file mode 100644 index 00000000000..813f4ded3e1 Binary files /dev/null and b/doxygen/examples/H5DS_Spec.pdf differ diff --git a/doxygen/examples/IOFlow.html b/doxygen/examples/IOFlow.html index e890edbb766..b33196d502a 100644 --- a/doxygen/examples/IOFlow.html +++ b/doxygen/examples/IOFlow.html @@ -1,5 +1,4 @@ - HDF5 Raw I/O Flow Notes diff --git a/doxygen/examples/LibraryReleaseVersionNumbers.html b/doxygen/examples/LibraryReleaseVersionNumbers.html index 57b211cd61b..dedbece0c11 100644 --- a/doxygen/examples/LibraryReleaseVersionNumbers.html +++ b/doxygen/examples/LibraryReleaseVersionNumbers.html @@ -241,7 +241,7 @@

    Version Support from the Library<

    For more information on these and other function calls and macros, - see the HDF5 Reference Manual.

    + see the HDF5 Reference Manual.

    Use Cases

    diff --git a/doxygen/examples/intro_SWMR.html b/doxygen/examples/intro_SWMR.html deleted file mode 100644 index b1adb62bdb5..00000000000 --- a/doxygen/examples/intro_SWMR.html +++ /dev/null @@ -1,103 +0,0 @@ - - - Introduction to Single-Writer_Multiple-Reader (SWMR) - -

    Introduction to SWMR

    -

    The Single-Writer / Multiple-Reader (SWMR) feature enables multiple processes to read an HDF5 file while it is being written to (by a single process) without using locks or requiring communication between processes.

    -

    tutr-swmr1.png -

    All communication between processes must be performed via the HDF5 file. The HDF5 file under SWMR access must reside on a system that complies with POSIX write() semantics.

    -

    The basic engineering challenge for this to work was to ensure that the readers of an HDF5 file always see a coherent (though possibly not up to date) HDF5 file.

    -

    The issue is that when writing data there is information in the metadata cache in addition to the physical file on disk:

    -

    tutr-swmr2.png -

    However, the readers can only see the state contained in the physical file:

    -

    tutr-swmr3.png -

    The SWMR solution implements dependencies on when the metadata can be flushed to the file. This ensures that metadata cache flush operations occur in the proper order, so that there will never be internal file pointers in the physical file that point to invalid (unflushed) file addresses.

    -

    A beneficial side effect of using SWMR access is better fault tolerance. It is more difficult to corrupt a file when using SWMR.

    -

    Documentation

    -

    SWMR User's Guide

    -

    HDF5 Library APIs

    -
      -
    • H5F_START_SWMR_WRITE — Enables SWMR writing mode for a file
    • -
    • H5DO_APPEND — Appends data to a dataset along a specified dimension
    • -
    • H5P_SET_OBJECT_FLUSH_CB — Sets a callback function to invoke when an object flush occurs in the file
    • -
    • H5P_GET_OBJECT_FLUSH_CB — Retrieves the object flush property values from the file access property list
    • -
    • H5O_DISABLE_MDC_FLUSHES — Prevents metadata entries for an HDF5 object from being flushed from the metadata cache to storage
    • -
    • H5O_ENABLE_MDC_FLUSHES — Enables flushing of dirty metadata entries from a file’s metadata cache
    • -
    • H5O_ARE_MDC_FLUSHES_DISABLED — Determines if an HDF5 object has had flushes of metadata entries disabled
    • -
    -

    Tools

    -
      -
    • h5watch — Outputs new records appended to a dataset as the dataset grows
    • -
    • h5format_convert — Converts the layout format version and chunked indexing types of datasets created with HDF5-1.10 so that applications built with HDF5-1.8 can access them
    • -
    • h5clear — Clears superblock status_flags field, removes metadata cache image, prints EOA and EOF, or sets EOA of a file
    • -
    -

    Design Documents

    -

    Error while fetching page properties report data:

    -

    Programming Model

    -

    Please be aware that the SWMR feature requires that an HDF5 file be created with the latest file format. See H5P_SET_LIBVER_BOUNDS for more information.

    -

    To use SWMR follow the the general programming model for creating and accessing HDF5 files and objects along with the steps described below.

    -

    SWMR Writer:

    -

    The SWMR writer either opens an existing file and objects or creates them as follows.

    -

    Open an existing file:

    -

    Call H5Fopen using the H5F_ACC_SWMR_WRITE flag. -Begin writing datasets. -Periodically flush data. -Create a new file:

    -

    Call H5Fcreate using the latest file format. -Create groups, datasets and attributes, and then close the attributes. -Call H5F_START_SWMR_WRITE to start SWMR access to the file. -Periodically flush data.

    -

    Example Code:

    -

    Create the file using the latest file format property:

    -

    - fapl = H5Pcreate (H5P_FILE_ACCESS); - status = H5Pset_libver_bounds (fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - fid = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); -[Create objects (files, datasets, ...). Close any attributes and named datatype objects. Groups and datasets may remain open before starting SWMR access to them.]

    -

    Start SWMR access to the file:

    -

    status = H5Fstart_swmr_write (fid); -Reopen the datasets and start writing, periodically flushing data:

    -

    status = H5Dwrite (dset_id, ...); - status = H5Dflush (dset_id);

    -

    SWMR Reader:

    -

    The SWMR reader must continually poll for new data:

    -

    Call H5Fopen using the H5F_ACC_SWMR_READ flag. -Poll, checking the size of the dataset to see if there is new data available for reading. -Read new data, if any.

    -

    Example Code:

    -

    Open the file using the SWMR read flag:

    -

    fid = H5Fopen (filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, H5P_DEFAULT); -Open the dataset and then repeatedly poll the dataset, by getting the dimensions, reading new data, and refreshing:

    -

    dset_id = H5Dopen (...); - space_id = H5Dget_space (...); - while (...) { - status = H5Dread (dset_id, ...); - status = H5Drefresh (dset_id); - space_id = H5Dget_space (...); - }

    -

    Limitations and Scope

    -

    An HDF5 file under SWMR access must reside on a system that complies with POSIX write() semantics. It is also limited in scope as follows:

    -

    The writer process is only allowed to modify raw data of existing datasets by;

    -

    Appending data along any unlimited dimension. -Modifying existing data -The following operations are not allowed (and the corresponding HDF5 files will fail):

    -

    The writer cannot add new objects to the file. -The writer cannot delete objects in the file. -The writer cannot modify or append data with variable length, string or region reference datatypes. -File space recycling is not allowed. As a result the size of a file modified by a SWMR writer may be larger than a file modified by a non-SWMR writer.

    -

    Tools for Working with SWMR

    -

    Two new tools, h5watch and h5clear, are available for use with SWMR. The other HDF5 utilities have also been modified to recognize SWMR:

    -

    The h5watch tool allows a user to monitor the growth of a dataset. -The h5clear tool clears the status flags in the superblock of an HDF5 file. -The rest of the HDF5 tools will exit gracefully but not work with SWMR otherwise.

    -

    Programming Example

    -

    A good example of using SWMR is included with the HDF5 tests in the source code. You can run it while reading the file it creates. If you then interrupt the application and reader and look at the resulting file, you will see that the file is still valid. Follow these steps:

    -

    Download the HDF5-1.10 source code to a local directory on a filesystem (that complies with POSIX write() semantics). Build the software. No special configuration options are needed to use SWMR.

    -

    Invoke two command terminal windows. In one window go into the bin/ directory of the built binaries. In the other window go into the test/ directory of the HDF5-1.10 source code that was just built.

    -

    In the window in the test/ directory compile and run use_append_chunk.c. The example writes a three dimensional dataset by planes (with chunks of size 1 x 256 x 256).

    -

    In the other window (in the bin/ directory) run h5watch on the file created by use_append_chunk.c (use_append_chunk.h5). It should be run while use_append_chunk is executing and you will see valid data displayed with h5watch.

    -

    Interrupt use_append_chunk while it is running, and stop h5watch.

    -

    Use h5clear to clear the status flags in the superblock of the HDF5 file (use_append_chunk.h5).

    -

    View the file with h5dump. You will see that it is a valid file even though the application did not close properly. It will contain data up to the point that it was interrupted.

    - - diff --git a/doxygen/examples/intro_VDS.html b/doxygen/examples/intro_VDS.html deleted file mode 100644 index 6e573b9b75c..00000000000 --- a/doxygen/examples/intro_VDS.html +++ /dev/null @@ -1,72 +0,0 @@ - - - Introduction to the Virtual Dataset - VDS - -

    The HDF5 Virtual Dataset (VDS) feature enables users to access data in a collection of HDF5 files as a single HDF5 dataset and to use the HDF5 APIs to work with that dataset.

    -

    For example, your data may be collected into four files:

    - -

    tutrvds-multimgs.png - -

    You can map the datasets in the four files into a single VDS that can be accessed just like any other dataset:

    - -

    tutrvds-snglimg.png - -

    The mapping between a VDS and the HDF5 source datasets is persistent and transparent to an application. If a source file is missing the fill value will be displayed.

    -

    See the Virtual (VDS) Documentation for complete details regarding the VDS feature.

    -

    The VDS feature was implemented using hyperslab selection (H5S_SELECT_HYPERSLAB). See the tutorial on Reading From or Writing to a Subset of a Dataset for more information on selecting hyperslabs.

    -

    Programming Model -To create a Virtual Dataset you simply follow the HDF5 programming model and add a few additional API calls to map the source code datasets to the VDS.

    -

    Following are the steps for creating a Virtual Dataset:

    -

    Create the source datasets that will comprise the VDS -Create the VDS: ‐ Define a datatype and dataspace (can be unlimited) -‐ Define the dataset creation property list (including fill value) -‐ (Repeat for each source dataset) Map elements from the source dataset to elements of the VDS: -Select elements in the source dataset (source selection) -Select elements in the virtual dataset (destination selection) -Map destination selections to source selections (see Functions for Working with a VDS)

    -

    ‐ Call H5Dcreate using the properties defined above -Access the VDS as a regular HDF5 dataset -Close the VDS when finished

    -

    Functions for Working with a VDS -The H5P_SET_VIRTUAL API sets the mapping between virtual and source datasets. This is a dataset creation property list. Using this API will change the layout of the dataset to H5D_VIRTUAL. As with specifying any dataset creation property list, an instance of the property list is created, modified, passed into the dataset creation call and then closed:

    -

    dcpl = H5Pcreate (H5P_DATASET_CREATE);

    -

    src_space = H5screate_simple ... - status = H5Sselect_hyperslab (space, ... - status = H5Pset_virtual (dcpl, space, SRC_FILE[i], SRC_DATASET[i], src_space);

    -

    dset = H5Dcreate2 (file, DATASET, H5T_NATIVE_INT, space, H5P_DEFAULT, dcpl, H5P_DEFAULT);

    -

    status = H5Pclose (dcpl); -There are several other APIs introduced with Virtual Datasets, including query functions. For details see the complete list of HDF5 library APIs that support Virtual Datasets

    -

    Limitations -This feature requires HDF5-1.10. -The number of source datasets is unlimited. However, there is a limit on the size of each source dataset.

    -

    Programming Examples -Example 1 -This example creates three HDF5 files, each with a one-dimensional dataset of 6 elements. The datasets in these files are the source datasets that are then used to create a 4 x 6 Virtual Dataset with a fill value of -1. The first three rows of the VDS are mapped to the data from the three source datasets as shown below:

    -

    tutrvds-ex.png

    -

    In this example the three source datasets are mapped to the VDS with this code:

    -
    src\_space = H5Screate\_simple (RANK1, dims, NULL);
    -for (i = 0; i < 3; i++) {
    -    start[0] = (hsize\_t)i;
    -    /* Select i-th row in the virtual dataset; selection in the source datasets is the same. */
    -    status = H5Sselect\_hyperslab (space, H5S\_SELECT\_SET, start, NULL, count, block);
    -    status = H5Pset\_virtual (dcpl, space, SRC\_FILE[i], SRC\_DATASET[i], src\_space);
    -}
    -
    -

    After the VDS is created and closed, it is reopened. The property list is then queried to determine the layout of the dataset and its mappings, and the data in the VDS is read and printed.

    -

    This example is in the HDF5 source code and can be obtained from here:

    -

    C Example

    -

    For details on compiling an HDF5 application: [ Compiling HDF5 Applications ]

    -

    Example 2 -This example shows how to use a C-style printf statement for specifying multiple source datasets as one virtual dataset. Only one mapping is required. In other words only one H5P_SET_VIRTUAL call is needed to map multiple datasets. It creates a 2-dimensional unlimited VDS. Then it re-opens the file, makes queries, and reads the virtual dataset.

    -

    The source datasets are specified as A-0, A-1, A-2, and A-3. These are mapped to the virtual dataset with one call:

    -
    status = H5Pset\_virtual (dcpl, vspace, SRCFILE, "/A-%b", src\_space);
    -
    -

    The %b indicates that the block count of the selection in the dimension should be used.

    -

    C Example

    -

    For details on compiling an HDF5 application: [ Compiling HDF5 Applications ]

    -

    Using h5dump with a VDS -The h5dump utility can be used to view a VDS. The h5dump output for a VDS looks exactly like that for any other dataset. If h5dump cannot find a source dataset then the fill value will be displayed.

    -

    You can determine that a dataset is a VDS by looking at its properties with h5dump -p. It will display each source dataset mapping, beginning with Mapping 0. Below is an excerpt of the output of h5dump -p on the vds.h5 file created in Example 1.You can see that the entire source file a.h5 is mapped to the first row of the /VDS dataset:

    - -

    tutrvds-map.png

    - diff --git a/doxygen/examples/tables/propertyLists.dox b/doxygen/examples/tables/propertyLists.dox index 340e13c26a5..76727b58a59 100644 --- a/doxygen/examples/tables/propertyLists.dox +++ b/doxygen/examples/tables/propertyLists.dox @@ -490,12 +490,12 @@ and one raw data file. #H5Pget_filter Returns information about a filter in a pipeline. -The C function is a macro: \see \ref api-compat-macros. +The C function is a macro: @see @ref api-compat-macros. #H5Pget_filter_by_id Returns information about the specified filter. -The C function is a macro: \see \ref api-compat-macros. +The C function is a macro: @see @ref api-compat-macros. #H5Pmodify_filter @@ -739,12 +739,12 @@ of the library for reading or writing the actual data. #H5Pget_filter Returns information about a filter in a pipeline. The -C function is a macro: \see \ref api-compat-macros. +C function is a macro: @see @ref api-compat-macros. #H5Pget_filter_by_id Returns information about the specified filter. The -C function is a macro: \see \ref api-compat-macros. +C function is a macro: @see @ref api-compat-macros. #H5Pget_nfilters diff --git a/doxygen/hdf5doxy_layout.xml b/doxygen/hdf5doxy_layout.xml index d895b2dd5bd..8bd7aaf4c08 100644 --- a/doxygen/hdf5doxy_layout.xml +++ b/doxygen/hdf5doxy_layout.xml @@ -5,12 +5,12 @@ - + + --> diff --git a/fortran/src/h5fc.in b/fortran/src/h5fc.in index 3c044257a49..e0e35f5a1b8 100644 --- a/fortran/src/h5fc.in +++ b/fortran/src/h5fc.in @@ -296,7 +296,7 @@ if test "x$do_link" = "xyes"; then shared_link="" # conditionally link with the hl library if test "X$HL" = "Xhl"; then - libraries=" $libraries -lhdf5hl_fortran -lhdf5_hl -lhdf5_fortran -lhdf5 " + libraries=" $libraries -lhdf5_hl_fortran -lhdf5_hl -lhdf5_fortran -lhdf5 " else libraries=" $libraries -lhdf5_fortran -lhdf5 " fi @@ -330,8 +330,8 @@ if test "x$do_link" = "xyes"; then -lhdf5_fortran) new_libraries="$new_libraries ${libdir}/libhdf5_fortran.a" ;; - -lhdf5hl_fortran) - new_libraries="$new_libraries ${libdir}/libhdf5hl_fortran.a" + -lhdf5_hl_fortran) + new_libraries="$new_libraries ${libdir}/libhdf5_hl_fortran.a" ;; *) new_libraries="$new_libraries $lib" diff --git a/hl/fortran/src/Makefile.am b/hl/fortran/src/Makefile.am index 5834c9932d7..eef7e8b4dc1 100644 --- a/hl/fortran/src/Makefile.am +++ b/hl/fortran/src/Makefile.am @@ -24,10 +24,10 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/hl/src -I$(top_builddir)/hl/src AM_FCFLAGS+=-I$(top_builddir)/fortran/src $(F9XMODFLAG)$(top_builddir)/fortran/src # Our main target, the high-level fortran library -lib_LTLIBRARIES=libhdf5hl_fortran.la +lib_LTLIBRARIES=libhdf5_hl_fortran.la # Add libtool numbers to the HDF5 HL Fortran library (from config/lt_vers.am) -libhdf5hl_fortran_la_LDFLAGS= -version-info $(LT_HL_F_VERS_INTERFACE):$(LT_HL_F_VERS_REVISION):$(LT_HL_F_VERS_AGE) $(AM_LDFLAGS) +libhdf5_hl_fortran_la_LDFLAGS= -version-info $(LT_HL_F_VERS_INTERFACE):$(LT_HL_F_VERS_REVISION):$(LT_HL_F_VERS_AGE) $(AM_LDFLAGS) # Some Fortran compilers can't build shared libraries, so sometimes we # want to build a shared C library and a static Fortran library. If so, @@ -43,26 +43,11 @@ endif #endif # List sources to include in the HDF5 HL Fortran library. -libhdf5hl_fortran_la_SOURCES=H5DSfc.c H5LTfc.c H5IMfc.c H5IMcc.c H5TBfc.c \ +libhdf5_hl_fortran_la_SOURCES=H5DSfc.c H5LTfc.c H5IMfc.c H5IMcc.c H5TBfc.c \ H5DSff.F90 H5LTff.F90 H5TBff.F90 H5IMff.F90 H5DOff.F90 H5LTff_gen.F90 H5TBff_gen.F90 # HDF5 HL Fortran library depends on HDF5 Library. -libhdf5hl_fortran_la_LIBADD=$(LIBH5_HL) $(LIBH5F) - -# The name of the lib file doesn't follow the same pattern as the other hl lib -# files, namely libhdf5_hl_*. Add a symlink with the compliant name to the -# actual lib file. -install-exec-hook: - cd $(DESTDIR)$(libdir) && \ - if test -f libhdf5hl_fortran.a -a \ - ! -f libhdf5_hl_fortran.a; then \ - $(LN_S) libhdf5hl_fortran.a libhdf5_hl_fortran.a; \ - fi; \ - if test -f libhdf5hl_fortran.so -a \ - ! -f libhdf5_hl_fortran.so; then \ - $(LN_S) libhdf5hl_fortran.so libhdf5_hl_fortran.so; \ - fi; - +libhdf5_hl_fortran_la_LIBADD=$(LIBH5_HL) $(LIBH5F) # Fortran module files can have different extensions and different names # (e.g., different capitalizations) on different platforms. Write rules diff --git a/hl/fortran/test/Makefile.am b/hl/fortran/test/Makefile.am index a74f8ef0fbf..adaa59db4cc 100644 --- a/hl/fortran/test/Makefile.am +++ b/hl/fortran/test/Makefile.am @@ -15,7 +15,9 @@ ## # # HDF5 High-Level Fortran Makefile(.in) - +# +# Autoconf cannot figure out dependencies between modules; disable parallel make +.NOTPARALLEL: include $(top_srcdir)/config/commence.am AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_builddir)/src -I$(top_srcdir)/hl/src diff --git a/hl/src/H5DOpublic.h b/hl/src/H5DOpublic.h index 661ca7a2abe..b3ea31d67e1 100644 --- a/hl/src/H5DOpublic.h +++ b/hl/src/H5DOpublic.h @@ -161,7 +161,7 @@ H5_HLDLL herr_t H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis, size_t e * from one datatype to another, and the filter pipeline to write the chunk. * Developers should have experience with these processes before * using this function. Please see - * + * * Using the Direct Chunk Write Function * for more information. * diff --git a/hl/src/H5DSpublic.h b/hl/src/H5DSpublic.h index 4afe51180f9..a7c9c9326a8 100644 --- a/hl/src/H5DSpublic.h +++ b/hl/src/H5DSpublic.h @@ -117,7 +117,7 @@ H5_HLDLL herr_t H5DSwith_new_ref(hid_t obj_id, hbool_t *with_new_ref); * * Entries are created in the #DIMENSION_LIST and * #REFERENCE_LIST attributes, as defined in section 4.2 of - * + * * HDF5 Dimension Scale Specification. * * Fails if: @@ -147,7 +147,7 @@ H5_HLDLL herr_t H5DSattach_scale(hid_t did, hid_t dsid, unsigned int idx); * dimension \p idx of dataset \p did. This deletes the entries in the * #DIMENSION_LIST and #REFERENCE_LIST attributes, * as defined in section 4.2 of - * + * * HDF5 Dimension Scale Specification. * * Fails if: @@ -180,7 +180,7 @@ H5_HLDLL herr_t H5DSdetach_scale(hid_t did, hid_t dsid, unsigned int idx); * as defined above. Creates the CLASS attribute, set to the value * "DIMENSION_SCALE" and an empty #REFERENCE_LIST attribute, * as described in - * + * * HDF5 Dimension Scale Specification. * (PDF, see section 4.2). * diff --git a/hl/src/H5LTpublic.h b/hl/src/H5LTpublic.h index 18f7502209f..f5eea20eb60 100644 --- a/hl/src/H5LTpublic.h +++ b/hl/src/H5LTpublic.h @@ -1386,8 +1386,8 @@ H5_HLDLL herr_t H5LTget_attribute_info(hid_t loc_id, const char *obj_name, const * \p lang_type definition of HDF5 datatypes. * Currently, only the DDL(#H5LT_DDL) is supported. * The complete DDL definition of HDF5 datatypes can be found in - * the last chapter of the - * + * the specifications chapter of the + * * HDF5 User's Guide. * * \par Example @@ -1424,8 +1424,8 @@ H5_HLDLL hid_t H5LTtext_to_dtype(const char *text, H5LT_lang_t lang_type); * * Currently only DDL (#H5LT_DDL) is supported for \p lang_type. * The complete DDL definition of HDF5 data types can be found in - * the last chapter of the - * + * the specifications chapter of the + * * HDF5 User's Guide. * * \par Example @@ -1625,7 +1625,7 @@ H5_HLDLL htri_t H5LTpath_valid(hid_t loc_id, const char *path, hbool_t check_obj * \note **Recommended Reading:** * \note This function is part of the file image operations feature set. * It is highly recommended to study the guide - * + * * HDF5 File Image Operations before using this feature set.\n * See the “See Also” section below for links to other elements of * HDF5 file image operations. diff --git a/hl/tools/gif2h5/decompress.c b/hl/tools/gif2h5/decompress.c index e87a60cf7af..62a22922ff4 100644 --- a/hl/tools/gif2h5/decompress.c +++ b/hl/tools/gif2h5/decompress.c @@ -296,6 +296,10 @@ Decompress(GIFIMAGEDESC *GifImageDesc, GIFHEAD *GifHead) * Build the hash table on-the-fly. No table is stored in the * file. */ + if (FreeCode >= 4096) { + printf("Error: FreeCode out of bounds\n"); + exit(EXIT_FAILURE); + } Prefix[FreeCode] = OldCode; Suffix[FreeCode] = FinChar; OldCode = InCode; diff --git a/java/src/jni/exceptionImp.c b/java/src/jni/exceptionImp.c index 4cf03ac9f28..6b2004ddeb4 100644 --- a/java/src/jni/exceptionImp.c +++ b/java/src/jni/exceptionImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c index 41395a4413f..aeec71fb9f4 100644 --- a/java/src/jni/h5Constants.c +++ b/java/src/jni/h5Constants.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5Imp.c b/java/src/jni/h5Imp.c index 898b52ad3ed..6092419c256 100644 --- a/java/src/jni/h5Imp.c +++ b/java/src/jni/h5Imp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5aImp.c b/java/src/jni/h5aImp.c index 54c862eff6c..b6ed1c4c3e1 100644 --- a/java/src/jni/h5aImp.c +++ b/java/src/jni/h5aImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5dImp.c b/java/src/jni/h5dImp.c index f6318b222d4..363936b76e9 100644 --- a/java/src/jni/h5dImp.c +++ b/java/src/jni/h5dImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5eImp.c b/java/src/jni/h5eImp.c index d52a4f72cd0..89c9362626f 100644 --- a/java/src/jni/h5eImp.c +++ b/java/src/jni/h5eImp.c @@ -21,9 +21,6 @@ extern "C" { * Each routine wraps a single HDF entry point, generally with the * analogous arguments and return codes. * - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * */ #include diff --git a/java/src/jni/h5fImp.c b/java/src/jni/h5fImp.c index 9295383ef4d..6bd17a786cb 100644 --- a/java/src/jni/h5fImp.c +++ b/java/src/jni/h5fImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5gImp.c b/java/src/jni/h5gImp.c index fce68022649..54b72b6c09a 100644 --- a/java/src/jni/h5gImp.c +++ b/java/src/jni/h5gImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5iImp.c b/java/src/jni/h5iImp.c index de70e1e424f..728c3b14ed5 100644 --- a/java/src/jni/h5iImp.c +++ b/java/src/jni/h5iImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5jni.h b/java/src/jni/h5jni.h index ad867083ba9..b1bd968ba7c 100644 --- a/java/src/jni/h5jni.h +++ b/java/src/jni/h5jni.h @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #include #include "H5version.h" #include diff --git a/java/src/jni/h5lImp.c b/java/src/jni/h5lImp.c index 0d9ac7dfc01..7d487999f96 100644 --- a/java/src/jni/h5lImp.c +++ b/java/src/jni/h5lImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5oImp.c b/java/src/jni/h5oImp.c index 15daeafde6b..60a6e4fbf90 100644 --- a/java/src/jni/h5oImp.c +++ b/java/src/jni/h5oImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pACPLImp.c b/java/src/jni/h5pACPLImp.c index 4635fa7373b..7c9895a6de1 100644 --- a/java/src/jni/h5pACPLImp.c +++ b/java/src/jni/h5pACPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pDAPLImp.c b/java/src/jni/h5pDAPLImp.c index 01c3983c2cc..44378a1dc5e 100644 --- a/java/src/jni/h5pDAPLImp.c +++ b/java/src/jni/h5pDAPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pDCPLImp.c b/java/src/jni/h5pDCPLImp.c index a624fd96987..ebe12cb5455 100644 --- a/java/src/jni/h5pDCPLImp.c +++ b/java/src/jni/h5pDCPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pDXPLImp.c b/java/src/jni/h5pDXPLImp.c index 31f6d02b860..3b519ef2709 100644 --- a/java/src/jni/h5pDXPLImp.c +++ b/java/src/jni/h5pDXPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pFAPLImp.c b/java/src/jni/h5pFAPLImp.c index af56336fb55..24b7f357e50 100644 --- a/java/src/jni/h5pFAPLImp.c +++ b/java/src/jni/h5pFAPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pFCPLImp.c b/java/src/jni/h5pFCPLImp.c index 7c1b44add5f..56b4e921aae 100644 --- a/java/src/jni/h5pFCPLImp.c +++ b/java/src/jni/h5pFCPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pGAPLImp.c b/java/src/jni/h5pGAPLImp.c index 0ee65710ac5..b38bd4b3b23 100644 --- a/java/src/jni/h5pGAPLImp.c +++ b/java/src/jni/h5pGAPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pGCPLImp.c b/java/src/jni/h5pGCPLImp.c index 49d79dc2366..b71558012ce 100644 --- a/java/src/jni/h5pGCPLImp.c +++ b/java/src/jni/h5pGCPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pImp.c b/java/src/jni/h5pImp.c index c952ccb9dff..6c17984ae24 100644 --- a/java/src/jni/h5pImp.c +++ b/java/src/jni/h5pImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pLAPLImp.c b/java/src/jni/h5pLAPLImp.c index 3048c155413..36813e33fc9 100644 --- a/java/src/jni/h5pLAPLImp.c +++ b/java/src/jni/h5pLAPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pLCPLImp.c b/java/src/jni/h5pLCPLImp.c index ecabadd29bc..e27a9eb1570 100644 --- a/java/src/jni/h5pLCPLImp.c +++ b/java/src/jni/h5pLCPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pOCPLImp.c b/java/src/jni/h5pOCPLImp.c index 7cd9b5c721f..a743cbaa7f4 100644 --- a/java/src/jni/h5pOCPLImp.c +++ b/java/src/jni/h5pOCPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pOCpyPLImp.c b/java/src/jni/h5pOCpyPLImp.c index c4d2ed7fd14..a78aaa259f0 100644 --- a/java/src/jni/h5pOCpyPLImp.c +++ b/java/src/jni/h5pOCpyPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5pStrCPLImp.c b/java/src/jni/h5pStrCPLImp.c index 0045efa342e..3382f0aea30 100644 --- a/java/src/jni/h5pStrCPLImp.c +++ b/java/src/jni/h5pStrCPLImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5plImp.c b/java/src/jni/h5plImp.c index 3c87fd52a99..9632e9e2609 100644 --- a/java/src/jni/h5plImp.c +++ b/java/src/jni/h5plImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5rImp.c b/java/src/jni/h5rImp.c index f97f803f90e..4ccad5457a2 100644 --- a/java/src/jni/h5rImp.c +++ b/java/src/jni/h5rImp.c @@ -10,11 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5sImp.c b/java/src/jni/h5sImp.c index 55fb268434f..738db67ffee 100644 --- a/java/src/jni/h5sImp.c +++ b/java/src/jni/h5sImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5tImp.c b/java/src/jni/h5tImp.c index 309454b16e4..316455715ac 100644 --- a/java/src/jni/h5tImp.c +++ b/java/src/jni/h5tImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5util.c b/java/src/jni/h5util.c index 9c441729a39..fb619aa619d 100644 --- a/java/src/jni/h5util.c +++ b/java/src/jni/h5util.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5util.h b/java/src/jni/h5util.h index 5af96afaee9..011aaec428f 100644 --- a/java/src/jni/h5util.h +++ b/java/src/jni/h5util.h @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifndef H5UTIL_H__ #define H5UTIL_H__ diff --git a/java/src/jni/h5vlImp.c b/java/src/jni/h5vlImp.c index 2bf0b8d6b0a..47e532a5609 100644 --- a/java/src/jni/h5vlImp.c +++ b/java/src/jni/h5vlImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/h5zImp.c b/java/src/jni/h5zImp.c index e6d37bfa3af..9c387fa33ee 100644 --- a/java/src/jni/h5zImp.c +++ b/java/src/jni/h5zImp.c @@ -10,12 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ diff --git a/java/src/jni/nativeData.c b/java/src/jni/nativeData.c index d25951ff436..d014b64579d 100644 --- a/java/src/jni/nativeData.c +++ b/java/src/jni/nativeData.c @@ -10,11 +10,6 @@ * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* - * For details of the HDF libraries, see the HDF Documentation at: - * https://portal.hdfgroup.org/documentation/index.html - * - */ /* * This module contains the implementation of all the native methods * used for number conversion. This is represented by the Java diff --git a/release_docs/INSTALL b/release_docs/INSTALL index 9373192912b..afd077f772b 100644 --- a/release_docs/INSTALL +++ b/release_docs/INSTALL @@ -49,7 +49,7 @@ CONTENTS include the Szip library with the encoder enabled. These can be found here: - https://www.hdfgroup.org/downloads/hdf5/ + https://support.hdfgroup.org/downloads/index.html Please notice that if HDF5 configure cannot find a valid Szip library, configure will not fail; in this case, the compression filter will diff --git a/release_docs/INSTALL_Autotools.txt b/release_docs/INSTALL_Autotools.txt index a2c948198e8..dc394be4521 100644 --- a/release_docs/INSTALL_Autotools.txt +++ b/release_docs/INSTALL_Autotools.txt @@ -334,7 +334,7 @@ III. Full installation instructions for source distributions (or '--with-pthread=DIR') flag to the configure script. For further information, see: - https://portal.hdfgroup.org/display/knowledge/Questions+about+thread-safety+and+concurrent+access + https://support.hdfgroup.org/releases/hdf5/documentation/gen_topics/Questions+about+thread-safety+and+concurrent+access The high-level, C++, Fortran and Java interfaces are not compatible with the thread-safety option because the lock is not hoisted @@ -492,7 +492,7 @@ IV. Using the Library For information on using HDF5 see the documentation, tutorials and examples found here: - https://portal.hdfgroup.org/documentation/index.html + https://support.hdfgroup.org/documentation/index.html A summary of the features included in the built HDF5 installation can be found in the libhdf5.settings file in the same directory as the static and/or diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt index 2322b763662..b2bd84c20f3 100644 --- a/release_docs/INSTALL_CMake.txt +++ b/release_docs/INSTALL_CMake.txt @@ -59,7 +59,7 @@ HDF Group recommends using the ctest script mode to build HDF5. ------------------------------------------------------------------------- Individual files needed as mentioned in this document ------------------------------------------------------------------------- -Download from https://github.com/HDFGroup/hdf5/tree/master/config/cmake/scripts: +Download from https://github.com/HDFGroup/hdf5/blob/develop/config/cmake/scripts: CTestScript.cmake -- CMake build script HDF5config.cmake -- CMake configuration script @@ -144,6 +144,7 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: ------bin ------include ------lib + --------plugins ------cmake On Linux, change to the install destination directory @@ -162,6 +163,7 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: ------bin ------include ------lib + --------plugins ------share On Mac you will find HDF5-1.15."X"-Darwin.dmg in the myhdfstuff folder. Click @@ -173,6 +175,7 @@ To build HDF5 with the SZIP and ZLIB external libraries you will need to: ------bin ------include ------lib + --------plugins ------share By default the installation will create the bin, include, lib and cmake @@ -239,6 +242,7 @@ Notes: This short set of instructions is written for users who want to ------bin ------include ------lib + --------plugins ------cmake On Linux, change to the install destination directory @@ -257,6 +261,7 @@ Notes: This short set of instructions is written for users who want to ------bin ------include ------lib + --------plugins ------share On Mac you will find HDF5-1.15."X"-Darwin.dmg in the build folder. Click @@ -268,6 +273,7 @@ Notes: This short set of instructions is written for users who want to ------bin ------include ------lib + --------plugins ------share @@ -411,10 +417,8 @@ IV. Further considerations Notes: CMake and HDF5 1. Using CMake for building and using HDF5 is under active development. - While we have attempted to provide error-free files, please - understand that development with CMake has not been extensively - tested outside of HDF. The CMake specific files may change - before the next release. + We have attempted to provide error-free files. The CMake specific + files may change before the next release. 2. CMake support for HDF5 development should be usable on any system where CMake is supported. Please send us any comments on @@ -587,6 +591,11 @@ These five steps are described in detail below. set (BLOSC2_TGZ_ORIGPATH "https://github.com/Blosc/c-blosc2/archive/refs/tags" CACHE STRING "Use PLUGINS from original location" FORCE) set (BLOSC2_TGZ_NAME "c-blosc2-2.14.4.tar.gz" CACHE STRING "Use BLOSC2 from compressed file" FORCE) set (BLOSC2_PACKAGE_NAME "blosc2" CACHE STRING "Name of BLOSC2 package" FORCE) + set (BLOSC2_ZLIB_GIT_URL "https://github.com/madler/zlib.git" CACHE STRING "Use ZLIB from GitHub repository" FORCE) + set (BLOSC2_ZLIB_GIT_BRANCH "develop" CACHE STRING "" FORCE) + set (BLOSC2_ZLIB_TGZ_ORIGPATH "https://github.com/madler/zlib/releases/download/v1.3" CACHE STRING "Use PLUGINS from original location" FORCE) + set (BLOSC2_ZLIB_TGZ_NAME "zlib-1.3.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE) + set (BLOSC2_ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of BLOSC2_ZLIB package" FORCE) ######## # bzip2 ######## @@ -663,7 +672,7 @@ These five steps are described in detail below. 2.1 Visual CMake users, click the Configure button. If this is the first time you are running cmake-gui in this directory, you will be prompted for the - generator you wish to use (for example on Windows, Visual Studio 12). + generator you wish to use (for example on Windows, Visual Studio 14). CMake will read in the CMakeLists.txt files from the source directory and display options for the HDF5 project. After the first configure you can adjust the cache settings and/or specify the locations of other programs. @@ -1124,7 +1133,6 @@ Using individual command presets (where is GNUC or MSVC or Clan ctest --preset ci-StdShar- cpack --preset ci-StdShar- - Using the workflow preset to configure, build, test and package the standard configuration: change directory to the hdf5 source folder execute "cmake --workflow --preset ci-StdShar- --fresh" diff --git a/release_docs/INSTALL_parallel b/release_docs/INSTALL_parallel index 9eb486f79d2..e2ac46a471d 100644 --- a/release_docs/INSTALL_parallel +++ b/release_docs/INSTALL_parallel @@ -90,7 +90,7 @@ nodes. They would probably work for other Cray systems but have not been verified. Obtain the HDF5 source code: - https://portal.hdfgroup.org/display/support/Downloads + https://support.hdfgroup.org/downloads/index.html The entire build process should be done on a MOM node in an interactive allocation and on a file system accessible by all compute nodes. Request an interactive allocation with qsub: diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index aa681ddf791..8d5615fcc11 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -15,16 +15,16 @@ final release. Links to HDF5 documentation can be found on: - https://portal.hdfgroup.org/documentation/ + https://support.hdfgroup.org/releases/hdf5/latest-docs.html The official HDF5 releases can be obtained from: - https://www.hdfgroup.org/downloads/hdf5/ + https://support.hdfgroup.org/downloads/index.html Changes from Release to Release and New Features in the HDF5-1.16.x release series can be found at: - https://portal.hdfgroup.org/documentation/hdf5-docs/release_specific_info.html + https://support.hdfgroup.org/releases/hdf5/documentation/release_specific_info.md If you have any questions or comments, please send them to the HDF Help Desk: @@ -47,6 +47,12 @@ New Features Configuration: ------------- + - Added signed Windows msi binary and signed Apple dmg binary files. + + The release process now provides signed Windows and Apple installation + binaries in addition to the debian and rpm installation binaries. Also + these installer files are no longer compressed into packaged archives. + - Added configuration option for internal threading/concurrency support: CMake: HDF5_ENABLE_THREADS (ON/OFF) (Default: ON) @@ -823,6 +829,37 @@ Bug Fixes since HDF5-1.14.0 release =================================== Library ------- + - Fixed a bug with large external datasets + + When performing a large I/O on an external dataset, the library would only + issue a single read or write system call. This could cause errors or cause + the data to be incorrect. These calls do not guarantee that they will + process the entire I/O request, and may need to be called multiple times + to complete the I/O, advancing the buffer and reducing the size by the + amount actually processed by read or write each time. Implemented this + algorithm for external datasets in both the read and write cases. + + Fixes GitHub #4216 + Fixes h5py GitHub #2394 + + - Fixed a bug in the Subfiling VFD that could cause a buffer over-read + and memory allocation failures + + When performing vector I/O with the Subfiling VFD, making use of the + vector I/O size extension functionality could cause the VFD to read + past the end of the "I/O sizes" array that is passed in. When an entry + in the "I/O sizes" array has the value 0 and that entry is at an array + index greater than 0, this signifies that the value in the preceding + array entry should be used for the rest of the I/O vectors, effectively + extending the last valid I/O size across the remaining entries. This + allows an application to save a bit on memory by passing in a smaller + "I/O sizes" array. The Subfiling VFD didn't implement a check for this + functionality in the portion of the code that generates I/O vectors, + causing it to read past the end of the "I/O sizes" array when it was + shorter than expected. This could also result in memory allocation + failures, as the nearby memory allocations are based off the values + read from that array, which could be uninitialized. + - Fixed H5Rget_attr_name to return the length of the attribute's name without the null terminator @@ -1767,6 +1804,12 @@ Bug Fixes since HDF5-1.14.0 release Configuration ------------- + - Changed name of libhdf5hl_fortran installed by autotools to libhdf5_hl_fortran. The + new name is consistent with the name of the lib when installed by CMake and with the + other hl libs. + + Fixes GitHub issue #4811 + - Fixed usage issue with FindZLIB.cmake module When building HDF5 with CMake and relying on the FindZLIB.cmake module, diff --git a/release_docs/RELEASE_PROCESS.md b/release_docs/RELEASE_PROCESS.md index 047183b6026..c3e106d267a 100644 --- a/release_docs/RELEASE_PROCESS.md +++ b/release_docs/RELEASE_PROCESS.md @@ -18,7 +18,7 @@ Maintenance releases are always forward compatible with regards to the HDF5 file - HDF5 libraries and command line utilities can access files created by future maintenance versions of the library. Note that maintenance releases are NOT guaranteed to be interface-compatible, meaning that, on occasion, application source code will need updated and re-compiled against a new maintenance release when the interface changes. Interface changes are only made when absolutely necessary as deemed by the HDF5 product manager(s), and interface compatibility reports are published with each release to inform customers and users of any incompatibilities in the interface. -For more information on the HDF5 versioning and backward and forward compatibility issues, see the [API Compatibility Macros](https://hdfgroup.github.io/hdf5/develop/api-compat-macros.html) on the public website. +For more information on the HDF5 versioning and backward and forward compatibility issues, see the [API Compatibility Macros][u13] on the public website. ## Participants: - Product Manager — The individual responsible for the overall direction and development of a software product at The HDF Group. @@ -35,41 +35,42 @@ For more information on the HDF5 versioning and backward and forward compatibili ### 3. Prepare Release Notes (Release Manager) 1. Confirm that all non-trivial changes made to the source are reflected in the release notes. Verify the following: - [HDF5 Milestones Projects](https://github.com/HDFGroup/hdf5/milestones) - - Each entry in [RELEASE.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt) traces to one or more resolved GH issues marked with FixVersion="X.Y.Z". - - Each resolved GH milestone issue traces to an entry in [RELEASE.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt). + - Each entry in [RELEASE.txt][u1] traces to one or more resolved GH issues marked with FixVersion="X.Y.Z". + - Each resolved GH milestone issue traces to an entry in [RELEASE.txt][u1]. - Each resolved GH milestone issue traces to one or more revisions to the HDF5 source. - Each resolved GH milestone issue traces to one or more pull requests. -2. For each previously authored KNOWN ISSUE in the [RELEASE.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt), if the issue has been resolved or can no longer be confirmed, remove the issue from the [RELEASE.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt). +2. For each previously authored KNOWN ISSUE in the [RELEASE.txt][u1], if the issue has been resolved or can no longer be confirmed, remove the issue from the [RELEASE.txt][u1]. - Document any new known issues at the top of the list. -3. Update the TESTED CONFIGURATION FEATURES SUMMARY in [RELEASE.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt) to correspond to features and options that have been tested during the maintenance period by the automated daily regression tests. +3. Update the TESTED CONFIGURATION FEATURES SUMMARY in [RELEASE.txt][u1] to correspond to features and options that have been tested during the maintenance period by the automated daily regression tests. - **See: Testing/Testing Systems(this is a page in confluence)** -4. Update current compiler information for each platform in the PLATFORMS TESTED section of [RELEASE.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt). -5. Review the [RELEASE.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt) for formatting and language to verify that it corresponds to guidelines found in **[Writing Notes in a RELEASE.txt(this is missing)]()** File. -6. Review and update, if needed, the [README](https://github.com/HDFGroup/hdf5/blob/develop/README.md) and [COPYING](https://github.com/HDFGroup/hdf5/blob/develop/COPYING) files. -7. Review and update all INSTALL_* files in [release_docs](https://github.com/HDFGroup/hdf5/tree/develop/release_docs), if needed. - - [INSTALL](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/INSTALL) should be general info and not require extensive changes - - [INSTALL_Autotools.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/INSTALL_Autotools.txt) are the instructions for building under autotools. - - [INSTALL_CMake.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/INSTALL_CMake.txt) are the instructions for building under CMake. +4. Update current compiler information for each platform in the PLATFORMS TESTED section of [RELEASE.txt][u1]. +5. Review the [RELEASE.txt][u1] for formatting and language to verify that it corresponds to guidelines found in **[Writing Notes in a RELEASE.txt(this is missing)]()** File. +6. Review and update, if needed, the [README][u2] and [COPYING][u3] files. +7. Review and update all INSTALL_* files in [release_docs][u4], if needed. + - [INSTALL][u5] should be general info and not require extensive changes + - [INSTALL_Autotools.txt][u6] are the instructions for building under autotools. + - [INSTALL_CMake.txt][u7] are the instructions for building under CMake. ### 4. Freeze Code (Release Manager | Test Automation Team) 1. Transition from performing maintenance on software to preparing for its delivery. 2. A few days before the code freeze, announce (via a product's developer mailing list and during team meetings) the pending freeze of the code for the release. On the day of the code freeze, send a "no more commits" message for the software being released and any third party software we develop that it depends on, as well as a "no more upgrades" message for other third party software the release depends on. - - Recently we haven’t announced a code freeze since it doesn’t take long to create the release branch and the support branch doesn’t need to remain frozen once the release branch is created. There are a few things that can be done on the support branch before the release branch is created, in particular updating the .so numbers. -3. Move all unresolved Milestone issues to the next release version in GitHub. -4. Verify that frozen code branch satisfies all existing regression test cases, and give the 'OK' to the release coordinator once all daily test configurations are passing as expected after the date of the code freeze. If there are failing tests after the code freeze date, coordinate with maintainers responsible for the failures to ensure that either the changes causing the failures are corrected or reverted. -5. Verify release branches for third-party software used: SZIP, ZLIB, and Plugins; and announce release versions to hdf5lib@lists.hdfgroup.org. + - Recently we haven’t announced a code freeze since it doesn’t take long to create the release branch and the support branch doesn’t need to remain frozen once the release branch is created. There are a few things that can be done on the support branch before the release branch is created, in particular updating the .so numbers. +3. Be sure to complete all four steps to update so numbers for each deployed lib file in the process described in config/lt_vers.am and check that the .so numbers for lib files in binaries correctly indicate compatibility status with the previous release. +4. Move all unresolved Milestone issues to the next release version in GitHub. +5. Verify that frozen code branch satisfies all existing regression test cases, and give the 'OK' to the release coordinator once all daily test configurations are passing as expected after the date of the code freeze. If there are failing tests after the code freeze date, coordinate with maintainers responsible for the failures to ensure that either the changes causing the failures are corrected or reverted. +6. Verify release branches for third-party software used: SZIP, ZLIB, and Plugins; and announce release versions to hdf5lib@hdfgroup.org. ### 5. Update Interface Version (Release Manager | Product Manager) 1. Verify interface additions, changes, and removals, and update the shared library interface version number. 2. Execute the CI snapshot workflow. - - Actions - “[hdf5 release build](https://github.com/HDFGroup/hdf5/blob/develop/.github/workflows/release.yml)” workflow and use the defaults. + - Actions - “[hdf5 release build][u8]” workflow and use the defaults. 3. Download and inspect release build source and binary files. Downloaded source files should build correctly, one or more binaries should install and run correctly. There should be nothing missing nor any extraneous files that aren’t meant for release. -4. Verify the interface compatibility reports between the current source and the previous release on the Github [Snapshots](https://github.com/HDFGroup/hdf5/releases/tag/snapshot-1.14) page. - - The compatibility reports are produced by the CI and are viewable in the Github [Releases/snapshot](https://github.com/HDFGroup/hdf5/releases/tag/snapshot) section. -5. Verify the interface compatibility reports between the current source and the previous release on the Github [Snapshots](https://github.com/HDFGroup/hdf5/releases/tag/snapshot-1.14) page. - - The compatibility reports are produced by the CI and are viewable in the Github [Releases/snapshot](https://github.com/HDFGroup/hdf5/releases/tag/snapshot) section. +4. Verify the interface compatibility reports between the current source and the previous release on the Github [Snapshots]u14] page. + - The compatibility reports are produced by the CI and are viewable in the Github [Releases/snapshot][u15] section. +5. Verify the interface compatibility reports between the current source and the previous release on the Github [Snapshots][u14] page. + - The compatibility reports are produced by the CI and are viewable in the Github [Releases/snapshot][u15] section. 6. Confirm the necessity of and approve of any interface-breaking changes. If any changes need to be reverted, task the developer who made the change to do so as soon as possible. If a change is reverted, return to the previous step and regenerate the compatibility report after the changes is made. Otherwise, continue to the next step. -7. Update the .so version numbers in the [config/lt_vers.am](https://github.com/HDFGroup/hdf5/blob/develop/config/lt_vers.am) file in the support branch according to [libtool's library interface version](https://www.gnu.org/software/libtool/manual/libtool.html#Versioning) scheme. +7. Update the .so version numbers in the [config/lt_vers.am][u9] file in the support branch according to [libtool's library interface version](https://www.gnu.org/software/libtool/manual/libtool.html#Versioning) scheme. - See [Updating version info (Libtool)](https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html#Updating-version-info) for rules to help update library version numbers. 8. After the release branch has been created, run `./autogen.sh` to regenerate build system files on the release branch and commit the changes. @@ -83,21 +84,21 @@ For more information on the HDF5 versioning and backward and forward compatibili - or create the new branch in GitHub GUI. 4. Check that required CMake files point to the specific versions of the third-party software (szip, zlib and plugins) that they depend on. - Update as needed. -5. Change the **support** branch to X.Y.{Z+1}-1 using the [bin/h5vers](https://github.com/HDFGroup/hdf5/blob/develop/bin/h5vers) script: +5. Change the **support** branch to X.Y.{Z+1}-1 using the [bin/h5vers][u10] script: - `$ git checkout hdf5_X_Y` - `$ bin/h5vers -s X.Y.{Z+1}-1;` - `$ git commit -m "Updated support branch version number to X.Y.{Z+1}-1"` - `$ git push` -6. Change the **release preparation branch**'s version number to X.Y.Z-{SR+1} using the [bin/h5vers](https://github.com/HDFGroup/hdf5/blob/develop/bin/h5vers) script: +6. Change the **release preparation branch**'s version number to X.Y.Z-{SR+1} using the [bin/h5vers][u10]/bin/h5vers script: - `$ git checkout hdf5_X_Y_Z;` - `$ bin/h5vers -s X.Y.Z-{SR+1};` - `$ git commit -m "Updated release preparation branch version number to X.Y.Z-{SR+1}"` - `$ git push` 7. Update default configuration mode - `$ git checkout hdf5_X_Y_Z;` and `$ bin/switch_maint_mode -disable ./configure.ac` to disable `AM_MAINTAINER_MODE`. - - Need to set option `HDF5_GENERATE_HEADERS` to `OFF`, currently in line 996 of [src/CMakeLists.txt](https://github.com/HDFGroup/hdf5/blob/develop/src/CMakeLists.txt). - - Change the **release preparation branch**'s (i.e. hdf5_X_Y_Z) default configuration mode from development to production in [configure.ac](https://github.com/HDFGroup/hdf5/blob/develop/configure.ac). - - Find “Determine build mode” in [configure.ac](https://github.com/HDFGroup/hdf5/blob/develop/configure.ac). + - Need to set option `HDF5_GENERATE_HEADERS` to `OFF`, currently in line 996 of [src/CMakeLists.txt][u11]. + - Change the **release preparation branch**'s (i.e. hdf5_X_Y_Z) default configuration mode from development to production in [configure.ac][u12]. + - Find “Determine build mode” in [configure.ac][u12]. - Change `default=debug` to `default=production` at the bottom of the `AS_HELP_STRING` for `--enable-build-mode`. - Under `if test "X-$BUILD_MODE" = X- ; then` change `BUILD_MODE=debug` to `BUILD_MODE=production`. - Run `sh ./autogen.sh` to regenerate the UNIX build system files and commit the changes. (use `git status --ignored` to see the changes and `git add -f` to add all files. First delete any new files not to be committed, notably `src/H5public.h~` and `autom4te.cache/`.) @@ -114,19 +115,50 @@ For more information on the HDF5 versioning and backward and forward compatibili 7. Choose the release branch 8. Change ‘Release version tag’ name to 'hdf5_X.Y.Z.P' - P is some pre-release number. -9. Send a message to the HDF forum indicating that a pre-release source package is available for testing at and that feedback from the user community on their test results is being accepted. +9. Send a message to the HDF forum indicating that a pre-release source package is available for testing at /{hdf5-X.Y.Z-P}> and that feedback from the user community on their test results is being accepted. 10. Contact paying clients who are interested in testing the pre-release source package and inform them that it is available for testing and that feedback on their test results of the pre-release is appreciated. 11. This should be automated and currently github binaries are not signed. - Follow the [How to sign binaries with digital certificates(this is missing)]() work instructions to sign each Windows and Mac binary package with a digital certificate. 12. Once binaries are ready to be tested, send an e-mail notification or update the Confluence test dashboard page indicating source and binary test assignments and when results should be made available. 13. Use the pre-release source packages to build and test HDF5 on assigned platforms by hand. Build both shared and static libraries, Fortran, C++, and szip, and any additional configurations required on specific remote platforms based on customer support needs. 14. Use the pre-release binary packages found in /mnt/scr1/pre-release/hdf5/vXYZ/pre-\/binaries/{UNIX, Windows} to test according to the binary testing procedures for your assigned platforms. -15. Scripted Testing: - - UNIX: [Scripted Binary Testing of HDF5 on UNIX systems (this is missing)]() - - Windows: [Testing HDF5 Binaries(this is missing)]() +15. Initial Testing: + - Installation Using Installer Binary + - Execute the install package + - Follow prompts + - Uncompress Directory Image Binary + - Extract the package + - After Installation + - The examples folder, HDF5Examples, located in the HDF5 install folder, can be built and tested with CMake and the supplied + HDF5_Examples.cmake file. The HDF5_Examples.cmake expects HDF5 to have been installed in the default location with same compilers (see the + libhdf5.settings file in the lib install folder). Also, the CMake utility should be installed. + + - To test the installation with the examples; + - Create a directory to run the examples. + - Copy HDF5Examples folder to this directory. + - Copy CTestScript.cmake to this directory. + - Copy HDF5_Examples.cmake to this directory. + - Copy HDF5_Examples_options.cmake to this directory. + - The default source folder is defined as "HDF5Examples". It can be changed with the CTEST_SOURCE_NAME script option. + - The default installation folder should be visible in the script. It can be changed with the INSTALLDIR script option. + - The default ctest configuration is defined as "Release". It can be changed + with the CTEST_CONFIGURATION_TYPE script option. Note that this must + be the same as the value used with the -C command line option. + - The default build configuration is defined to build and use static libraries. + Shared libraries can be used with the STATICONLYLIBRARIES script option set to "NO". + - Other options can be changed by editing the HDF5_Examples_options.cmake file. + - If the defaults are okay, execute from this directory: + - ctest -S HDF5_Examples.cmake -C Release -V -O test.log + - If the defaults need change, execute from this directory: + - ctest -S HDF5_Examples.cmake,CTEST_SOURCE_NAME=MyExamples,INSTALLDIR=MyLocation -C Release -V -O test.log + - When executed, the ctest script will save the results to the log file, test.log, as + indicated by the ctest command. If you wish to see more build and test information, + add "-VV" to the ctest command. The output should show; + 100% tests passed, 0 tests failed out of 206 (all options). + - For more information see USING_CMake_Examples.txt in the install folder. 16. Manual Testing (i.e. verifying correct test outcomes via visual inspection): - - Use this if UNIX test script is not reporting correct results, yet binaries look OK. - - UNIX: [Manual Binary Testing of HDF5 on Unix systems (this is missing)]() + - Inspect text documents for correct versions and names. + - Inspect the doxygen files in the share/html directory open index.html . 17. Update the test results Confluence page with status/outcome of all test assignments. 18. If any test source (hdf-forum, clients, internal testers, automated regression suite) identifies any issues: - a) Enter the issue in JIRA summarizing the failure if it is not already there. @@ -137,7 +169,7 @@ For more information on the HDF5 versioning and backward and forward compatibili ### 8. Finalize Release Notes (Release Manager) 1. Perform a final review of release notes and ensure that any new changes made to the source, any new known issues discovered, and any additional tests run since the code freeze have been reflected in RELEASE.txt and other appropriate in-source documentation files (INSTALL_*, etc.). (Refer to the sub-steps of step 3 for what to check). -2. Update the [RELEASE.txt](https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt) in the **support** branch (i.e. hdf5_X_Y) to remove entries in “Bugs fixed” and “New Features” sections and increment the version number for the following release (“Bug fixes since X.Y.Z” - occurs twice). +2. Update the [RELEASE.txt][u1] in the **support** branch (i.e. hdf5_X_Y) to remove entries in “Bugs fixed” and “New Features” sections and increment the version number for the following release (“Bug fixes since X.Y.Z” - occurs twice). - `$ git checkout hdf5_X_Y` - `$ vi RELEASE.txt # update RELEASE.txt to clear it out` - `$ git commit -m "Reset RELEASE.txt in preparation for the next release."` @@ -161,3 +193,19 @@ For more information on the HDF5 versioning and backward and forward compatibili ### 11. Conduct Release Retrospective (Release Manager) 1. Schedule time and solicit comments from retrospective 2. Identify issues and document them + +[u1]: https://github.com/HDFGroup/hdf5/blob/develop/release_docs/RELEASE.txt +[u2]: https://github.com/HDFGroup/hdf5/blob/develop/README.md +[u3]: https://github.com/HDFGroup/hdf5/blob/develop/COPYING +[u4]: https://github.com/HDFGroup/hdf5/blob/develop/release_docs +[u5]: https://github.com/HDFGroup/hdf5/blob/develop/release_docs/INSTALL +[u6]: https://github.com/HDFGroup/hdf5/blob/develop/release_docs/INSTALL_Autotools.txt +[u7]: https://github.com/HDFGroup/hdf5/blob/develop/release_docs/INSTALL_CMake.txt +[u8]: https://github.com/HDFGroup/hdf5/blob/develop/.github/workflows/release.yml +[u9]: https://github.com/HDFGroup/hdf5/blob/develop/config/lt_vers.am +[u10]: https://github.com/HDFGroup/hdf5/blob/develop/bin/h5vers +[u11]: https://github.com/HDFGroup/hdf5/blob/develop/src/CMakeLists.txt +[u12]: https://github.com/HDFGroup/hdf5/blob/develop/configure.ac +[u13]: https://hdfgroup.github.io/hdf5/develop/api-compat-macros.html +[u14]: https://github.com/HDFGroup/hdf5/releases/tag/snapshot-1.14 +[u15]: https://github.com/HDFGroup/hdf5/releases/tag/snapshot diff --git a/release_docs/USING_CMake_Examples.txt b/release_docs/USING_CMake_Examples.txt index f31a922804d..31bb4dc92c4 100644 --- a/release_docs/USING_CMake_Examples.txt +++ b/release_docs/USING_CMake_Examples.txt @@ -22,7 +22,7 @@ I. Preconditions 1. We suggest you obtain the latest CMake for your platform from the Kitware web site. The HDF5 1.15.x product requires a minimum CMake version - of 3.18. If you are using VS2022, the minimum version is 3.21. + of 3.18. If you are using VS2022, the minimum CMake version is 3.21. 2. You have installed the HDF5 library built with CMake, by executing the HDF Install Utility (the *.msi file in the binary package for @@ -56,6 +56,11 @@ Default installation process: The default ctest configuration is defined as "Release". It can be changed with the CTEST_CONFIGURATION_TYPE script option. Note that this must be the same as the value used with the -C command line option. + On Windows, you can set the CTEST_VSVERS script option to either + 64_VS2022 or 64_VS2019. Alternately, you can set the script + CTEST_CMAKE_GENERATOR option to "Visual Studio 16 2019" or "Visual Studio 17 2022", + and the CMAKE_GENERATOR_ARCHITECTURE script option to "x64". + The default build configuration is defined to build and use static libraries. Shared libraries and other options can be changed by editing the @@ -69,21 +74,20 @@ Default installation process: When executed, the ctest script will save the results to the log file, test.log, as indicated by the ctest command. If you wish to see more build and test information, add "-VV" to the ctest command. The output should show; - 100% tests passed, 0 tests failed out of 156. + 100% tests passed, 0 tests failed out of 206. ======================================================================== III. Defaults in the HDF5_Examples_options.cmake file ======================================================================== -#### DEFAULT: ### -#### BUILD_SHARED_LIBS:BOOL=OFF ### -#### HDF_BUILD_C:BOOL=ON ### -#### HDF_BUILD_CXX:BOOL=OFF ### -#### HDF_BUILD_FORTRAN:BOOL=OFF ### -#### HDF_BUILD_JAVA:BOOL=OFF ### -#### HDF_BUILD_FILTERS:BOOL=OFF ### -#### BUILD_TESTING:BOOL=OFF ### -#### HDF_ENABLE_PARALLEL:BOOL=OFF ### -#### HDF_ENABLE_THREADSAFE:BOOL=OFF ### +#### DEFAULT: ### +#### BUILD_SHARED_LIBS:BOOL=OFF ### +#### H5EX_BUILD_C:BOOL=ON ### +#### H5EX_BUILD_CXX:BOOL=OFF ### +#### H5EX_BUILD_FORTRAN:BOOL=OFF ### +#### H5EX_BUILD_JAVA:BOOL=OFF ### +#### H5EX_BUILD_FILTERS:BOOL=OFF ### +#### H5EX_BUILD_TESTING:BOOL=OFF ### +#### H5EX_ENABLE_PARALLEL:BOOL=OFF ### diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2fadc0e039a..47fc1dcf751 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -650,6 +650,7 @@ set (H5TS_SOURCES ${HDF5_SRC_DIR}/H5TSonce.c ${HDF5_SRC_DIR}/H5TSpool.c ${HDF5_SRC_DIR}/H5TSpthread.c + ${HDF5_SRC_DIR}/H5TSrec_rwlock.c ${HDF5_SRC_DIR}/H5TSrwlock.c ${HDF5_SRC_DIR}/H5TSsemaphore.c ${HDF5_SRC_DIR}/H5TSthread.c diff --git a/src/H5A.c b/src/H5A.c index 651ed13c256..10475919fbe 100644 --- a/src/H5A.c +++ b/src/H5A.c @@ -983,7 +983,7 @@ H5A__read_api_common(hid_t attr_id, hid_t dtype_id, void *buf, void **token_ptr, HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "buf parameter can't be NULL"); /* Get attribute object pointer */ - if (NULL == (*vol_obj_ptr = (H5VL_object_t *)H5I_object_verify(attr_id, H5I_ATTR))) + if (NULL == (*vol_obj_ptr = H5VL_vol_object_verify(attr_id, H5I_ATTR))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an attribute"); /* Read the attribute data */ @@ -1090,7 +1090,7 @@ H5Aget_space(hid_t attr_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(attr_id, H5I_ATTR))) + if (NULL == (vol_obj = H5VL_vol_object_verify(attr_id, H5I_ATTR))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "not an attribute"); /* Set up VOL callback arguments */ @@ -1134,7 +1134,7 @@ H5Aget_type(hid_t attr_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(attr_id, H5I_ATTR))) + if (NULL == (vol_obj = H5VL_vol_object_verify(attr_id, H5I_ATTR))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "not an attribute"); /* Set up VOL callback arguments */ @@ -1183,7 +1183,7 @@ H5Aget_create_plist(hid_t attr_id) assert(H5P_LST_ATTRIBUTE_CREATE_ID_g != -1); /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(attr_id, H5I_ATTR))) + if (NULL == (vol_obj = H5VL_vol_object_verify(attr_id, H5I_ATTR))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "not an attribute"); /* Set up VOL callback arguments */ @@ -1218,7 +1218,7 @@ H5Aget_create_plist(hid_t attr_id) DESCRIPTION This function retrieves the name of an attribute for an attribute ID. - Up to 'buf_size' characters are stored in 'buf' followed by a '\0' string + Up to 'buf_size'-1 characters are stored in 'buf' followed by a '\0' string terminator. If the name of the attribute is longer than 'buf_size'-1, the string terminator is stored in the last position of the buffer to properly terminate the string. @@ -1234,7 +1234,7 @@ H5Aget_name(hid_t attr_id, size_t buf_size, char *buf /*out*/) FUNC_ENTER_API((-1)) /* check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(attr_id, H5I_ATTR))) + if (NULL == (vol_obj = H5VL_vol_object_verify(attr_id, H5I_ATTR))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, (-1), "not an attribute"); if (!buf && buf_size) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, (-1), "buf cannot be NULL if buf_size is non-zero"); @@ -1258,20 +1258,32 @@ H5Aget_name(hid_t attr_id, size_t buf_size, char *buf /*out*/) FUNC_LEAVE_API(ret_value) } /* H5Aget_name() */ -/*------------------------------------------------------------------------- - * Function: H5Aget_name_by_idx - * - * Purpose: Retrieve the name of an attribute, according to the - * order within an index. - * - * Same pattern of behavior as H5Iget_name. - * - * Return: Success: Non-negative length of name, with information - * in NAME buffer - * Failure: Negative - * - *------------------------------------------------------------------------- - */ +/*-------------------------------------------------------------------------- + NAME + H5Aget_name_by_idx + PURPOSE + Retrieve the name of an attribute, according to the order within an index. + USAGE + ssize_t H5Aget_name_by_idx(loc_id, obj_name, idx_type, order, n, name, size, lapl_id) + hid_t loc_id; IN: Object that attribute is attached to + const char *obj_name; IN: Name of the object relative to location + H5_index_t idx_type; IN: Type of index to use + H5_iter_order_t order; IN: Order to iterate over index + hsize_t n; IN: Index (0-based) of attribute to retrieve + char *name; IN: Buffer to store the name in + size_t size; IN: The size of the buffer to store the name in. + hid_t lapl_id; IN: Link access property list + RETURNS + This function returns the length of the attribute's name (which may be + longer than 'buf_size') on success or negative for failure. + + DESCRIPTION + This function retrieves the name of an attribute given its index. Up + to 'buf_size'-1 characters are stored in 'buf' followed by a '\0' string + terminator. If the name of the attribute is longer than 'buf_size'-1, + the string terminator is stored in the last position of the buffer to + properly terminate the string. +--------------------------------------------------------------------------*/ ssize_t H5Aget_name_by_idx(hid_t loc_id, const char *obj_name, H5_index_t idx_type, H5_iter_order_t order, hsize_t n, char *name /*out*/, size_t size, hid_t lapl_id) @@ -1352,7 +1364,7 @@ H5Aget_storage_size(hid_t attr_id) FUNC_ENTER_API(0) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(attr_id, H5I_ATTR))) + if (NULL == (vol_obj = H5VL_vol_object_verify(attr_id, H5I_ATTR))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not an attribute"); /* Set up VOL callback arguments */ @@ -1390,7 +1402,7 @@ H5Aget_info(hid_t attr_id, H5A_info_t *ainfo /*out*/) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(attr_id, H5I_ATTR))) + if (NULL == (vol_obj = H5VL_vol_object_verify(attr_id, H5I_ATTR))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an attribute"); if (!ainfo) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "attribute_info parameter cannot be NULL"); diff --git a/src/H5Amodule.h b/src/H5Amodule.h index 18fabe56f58..42715535367 100644 --- a/src/H5Amodule.h +++ b/src/H5Amodule.h @@ -59,7 +59,7 @@ * attached directly to that object * * \subsection subsec_error_H5A Attribute Function Summaries - * @see H5A reference manual + * see @ref H5A reference manual * * \subsection subsec_attribute_program Programming Model for Attributes * @@ -98,26 +98,6 @@ * \li Close the attribute * \li Close the primary data object (if appropriate) * - * - * - * - * - * - * - * - * - * - * - *
    CreateUpdate
    - * \snippet{lineno} H5A_examples.c create - * - * \snippet{lineno} H5A_examples.c update - *
    ReadDelete
    - * \snippet{lineno} H5A_examples.c read - * - * \snippet{lineno} H5A_examples.c delete - *
    - * * \subsection subsec_attribute_work Working with Attributes * * \subsubsection subsubsec_attribute_work_struct The Structure of an Attribute @@ -376,7 +356,7 @@ * An HDF5 attribute is a small metadata object describing the nature and/or intended usage of a primary data * object. A primary data object may be a dataset, group, or committed datatype. * - * @see sec_attribute + * @see \ref sec_attribute * */ diff --git a/src/H5Apublic.h b/src/H5Apublic.h index 256d19cf355..7c28c0a24a2 100644 --- a/src/H5Apublic.h +++ b/src/H5Apublic.h @@ -501,15 +501,9 @@ H5_DLL herr_t H5Aget_info_by_name(hid_t loc_id, const char *obj_name, const char * value. * * \details H5Aget_name() retrieves the name of an attribute specified by - * the identifier, \p attr_id. Up to \p buf_size characters are - * stored in \p buf followed by a \0 string terminator. If the - * name of the attribute is longer than (\p buf_size -1), the - * string terminator is stored in the last position of the buffer - * to properly terminate the string. + * the identifier, \p attr_id. * - * If the user only wants to retrieve the name length, the - * values 0 and NULL should be passed for the parameters - * \p bufsize and \p buf. + * \details_namelen{attribute,H5Aget_name} * * \since 1.0.0 * @@ -544,10 +538,7 @@ H5_DLL ssize_t H5Aget_name(hid_t attr_id, size_t buf_size, char *buf); * traversal order, and a position in the index, \p idx_type, * \p order and \p n, respectively. * - * If the attribute name's size is unknown, the values 0 and NULL - * can be passed in for the parameters \p size and \p name. The - * function's return value will provide the correct value for - * \p size. + * \details_namelen{attribute,H5Aget_name_by_idx} * * The link access property list, \p lapl_id, may provide * information regarding the properties of links required to access diff --git a/src/H5B.c b/src/H5B.c index 5a7a23853c5..30e39ef71a6 100644 --- a/src/H5B.c +++ b/src/H5B.c @@ -98,10 +98,10 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ -#include "H5Bpkg.h" /* B-link trees */ -#include "H5CXprivate.h" /* API Contexts */ +#include "H5Bpkg.h" /* B-link trees */ +#include "H5CXprivate.h" /* API Contexts */ #include "H5Eprivate.h" /* Error handling */ -#include "H5FLprivate.h" /* Free Lists */ +#include "H5FLprivate.h" /* Free Lists */ #include "H5MFprivate.h" /* File memory management */ #include "H5MMprivate.h" /* Memory management */ @@ -109,7 +109,7 @@ /* Local Macros */ /****************/ #define H5B_SIZEOF_HDR(F) \ - (H5_SIZEOF_MAGIC + /*magic number */ \ + (H5_SIZEOF_MAGIC + /*magic number */ \ 4 + /*type, level, num entries */ \ 2 * H5F_SIZEOF_ADDR(F)) /*left and right sibling addresses */ @@ -234,7 +234,7 @@ H5B_create(H5F_t *f, const H5B_class_t *type, void *udata, haddr_t *addr_p /*out * Cache the new B-tree node. */ if (H5AC_insert_entry(f, H5AC_BT, *addr_p, bt, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't add B-tree root node to cache"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTINS, FAIL, "can't add B-tree root node to cache"); done: if (ret_value < 0) { @@ -245,7 +245,7 @@ H5B_create(H5F_t *f, const H5B_class_t *type, void *udata, haddr_t *addr_p /*out if (bt) /* Destroy B-tree node */ if (H5B__node_dest(bt) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node"); + HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, FAIL, "unable to destroy B-tree node"); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -539,7 +539,7 @@ H5B_insert(H5F_t *f, const H5B_class_t *type, haddr_t addr, void *udata) /* Insert the object */ if ((int)(my_ins = H5B__insert_helper(f, &bt_ud, type, lt_key, <_key_changed, md_key, udata, rt_key, &rt_key_changed, &split_bt_ud /*out*/)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to insert key"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert key"); /* Check if the root node split */ if (H5B_INS_NOOP == my_ins) { @@ -585,7 +585,7 @@ H5B_insert(H5F_t *f, const H5B_class_t *type, haddr_t addr, void *udata) /* Move the location of the old root on the disk */ if (H5AC_move_entry(f, H5AC_BT, bt_ud.addr, old_root_addr) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to move B-tree root node"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTMOVE, FAIL, "unable to move B-tree root node"); bt_ud.addr = old_root_addr; /* Update the split b-tree's left pointer to point to the new location */ @@ -609,7 +609,7 @@ H5B_insert(H5F_t *f, const H5B_class_t *type, haddr_t addr, void *udata) /* Insert the modified copy of the old root into the file again */ if (H5AC_insert_entry(f, H5AC_BT, addr, new_root_bt, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to add old B-tree root node to cache"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTINS, FAIL, "unable to add old B-tree root node to cache"); done: if (ret_value < 0) @@ -857,8 +857,7 @@ H5B__insert_helper(H5F_t *f, H5B_ins_ud_t *bt_ud, const H5B_class_t *type, uint8 /* Since we are to the left of the leftmost key there must not be a left * sibling */ if (H5_addr_defined(bt->left)) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, H5B_INS_ERROR, - "internal error: likely corrupt key values"); + HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, H5B_INS_ERROR, "internal error: likely corrupt key values"); #endif /* H5_STRICT_FORMAT_CHECKS */ } else if (cmp > 0 && idx + 1 >= bt->nchildren) { @@ -909,8 +908,7 @@ H5B__insert_helper(H5F_t *f, H5B_ins_ud_t *bt_ud, const H5B_class_t *type, uint8 /* Since we are to the right of the rightmost key there must not be a * right sibling */ if (H5_addr_defined(bt->right)) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, H5B_INS_ERROR, - "internal error: likely corrupt key values"); + HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, H5B_INS_ERROR, "internal error: likely corrupt key values"); #endif /* H5_STRICT_FORMAT_CHECKS */ } else if (cmp) { @@ -1215,7 +1213,7 @@ H5B__remove_helper(H5F_t *f, haddr_t addr, const H5B_class_t *type, int level, u H5B__remove_helper(f, bt->child[idx], type, level + 1, H5B_NKEY(bt, shared, idx) /*out*/, lt_key_changed /*out*/, udata, H5B_NKEY(bt, shared, idx + 1) /*out*/, rt_key_changed /*out*/)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, H5B_INS_ERROR, "key not found in subtree"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTREMOVE, H5B_INS_ERROR, "key not found in subtree"); } else if (type->remove) { /* @@ -1225,7 +1223,7 @@ H5B__remove_helper(H5F_t *f, haddr_t addr, const H5B_class_t *type, int level, u */ if ((int)(ret_value = (type->remove)(f, bt->child[idx], H5B_NKEY(bt, shared, idx), lt_key_changed, udata, H5B_NKEY(bt, shared, idx + 1), rt_key_changed)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, H5B_INS_ERROR, "key not found in leaf node"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTREMOVE, H5B_INS_ERROR, "key not found in leaf node"); } else { /* @@ -1499,7 +1497,7 @@ H5B_remove(H5F_t *f, const H5B_class_t *type, haddr_t addr, void *udata) /* The actual removal */ if (H5B_INS_ERROR == H5B__remove_helper(f, addr, type, 0, lt_key, <_key_changed, udata, rt_key, &rt_key_changed)) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to remove entry from B-tree"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTREMOVE, FAIL, "unable to remove entry from B-tree"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -1550,7 +1548,7 @@ H5B_delete(H5F_t *f, const H5B_class_t *type, haddr_t addr, void *udata) /* Iterate over all children in node, deleting them */ for (u = 0; u < bt->nchildren; u++) if (H5B_delete(f, type, bt->child[u], udata) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "unable to delete B-tree node"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to delete B-tree node"); } /* end if */ else { @@ -1563,7 +1561,7 @@ H5B_delete(H5F_t *f, const H5B_class_t *type, haddr_t addr, void *udata) /* Call user's callback for each entry */ if ((type->remove)(f, bt->child[u], H5B_NKEY(bt, shared, u), <_key_changed, udata, H5B_NKEY(bt, shared, u + 1), &rt_key_changed) < H5B_INS_NOOP) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "can't remove B-tree node"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTREMOVE, FAIL, "can't remove B-tree node"); } /* end for */ } /* end if */ } /* end else */ @@ -1826,7 +1824,7 @@ H5B__get_info_helper(H5F_t *f, const H5B_class_t *type, haddr_t addr, const H5B_ if (level > 0) { /* Keep following the left-most child until we reach a leaf node. */ if (H5B__get_info_helper(f, type, left_child, info_udata) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "unable to list B-tree node"); + HGOTO_ERROR(H5E_BTREE, H5E_BADITER, FAIL, "unable to list B-tree node"); } /* end if */ done: @@ -1893,13 +1891,13 @@ H5B_get_info(H5F_t *f, const H5B_class_t *type, haddr_t addr, H5B_info_t *bt_inf * *------------------------------------------------------------------------- */ -htri_t +herr_t H5B_valid(H5F_t *f, const H5B_class_t *type, haddr_t addr) { H5B_t *bt = NULL; /* The B-tree */ H5UC_t *rc_shared; /* Ref-counted shared info */ H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ - htri_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h index f93fa9c5d82..f354dea2ac0 100644 --- a/src/H5Bprivate.h +++ b/src/H5Bprivate.h @@ -142,5 +142,5 @@ H5_DLL H5B_shared_t *H5B_shared_new(const H5F_t *f, const H5B_class_t *type, siz H5_DLL herr_t H5B_shared_free(void *_shared); H5_DLL herr_t H5B_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, const H5B_class_t *type, void *udata); -H5_DLL htri_t H5B_valid(H5F_t *f, const H5B_class_t *type, haddr_t addr); +H5_DLL herr_t H5B_valid(H5F_t *f, const H5B_class_t *type, haddr_t addr); #endif /* H5Bprivate_H */ diff --git a/src/H5Centry.c b/src/H5Centry.c index 6883e897186..1ca7479cf7e 100644 --- a/src/H5Centry.c +++ b/src/H5Centry.c @@ -3131,7 +3131,7 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign else empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - /* try to free up if necceary and if evictions are permitted. Note + /* try to free up if necessary and if evictions are permitted. Note * that if evictions are enabled, we will call H5C__make_space_in_cache() * regardless if the min_free_space requirement is not met. */ diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 208944e1053..6a636aee76c 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -1912,7 +1912,7 @@ typedef struct H5C_tag_info_t { * the hash table. Note that the index_size field (above) * is also the sum of the sizes of all entries in the cache. * Thus we should have the invariant that clean_index_size + - * dirty_index_size == index_size. + * dirty_index_size = index_size. * * WARNING: * The value of the clean_index_size must not be mistaken for @@ -1929,7 +1929,7 @@ typedef struct H5C_tag_info_t { * the hash table. Note that the index_size field (above) * is also the sum of the sizes of all entries in the cache. * Thus we should have the invariant that clean_index_size + - * dirty_index_size == index_size. + * dirty_index_size = index_size. * * dirty_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to * maintain the sum of the sizes of all dirty entries in the @@ -2025,12 +2025,12 @@ typedef struct H5C_tag_info_t { * The cost of maintaining the skip list is significant. As it is only used * on flush and close, it is maintained only when needed. * - * To do this, we add a flag to control maintenanace of the skip list. + * To do this, we add a flag to control maintenance of the skip list. * This flag is initially set to false, which disables all operations * on the skip list. * * At the beginning of either flush or close, we scan the index list, - * insert all dirtly entries in the skip list, and enable operations + * insert all dirty entries in the skip list, and enable operations * on skip list by setting above control flag to true. * * In the case of a partial flush (i.e. flush tagged entries), we only diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 9f123123bc6..d7065799a67 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -903,7 +903,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, bool was_ * * Note that flush dependencies are used to order flushes within rings. * - * Note also that at the conceptual level, rings are argueably superfluous, + * Note also that at the conceptual level, rings are arguably superfluous, * as a similar effect could be obtained via the flush dependency mechanism. * However, this would require all entries in the cache to participate in a * flush dependency -- with the implied setup and takedown overhead and diff --git a/src/H5D.c b/src/H5D.c index 7416405e7f9..a50a2aceda6 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -295,7 +295,7 @@ H5Dcreate_anon(hid_t loc_id, hid_t type_id, hid_t space_id, hid_t dcpl_id, hid_t HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, H5I_INVALID_HID, "can't set access property list info"); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); /* Set location parameters */ @@ -559,7 +559,7 @@ H5D__get_space_api_common(hid_t dset_id, void **token_ptr, H5VL_object_t **_vol_ FUNC_ENTER_PACKAGE /* Check args */ - if (NULL == (*vol_obj_ptr = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (*vol_obj_ptr = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid dataset identifier"); /* Set up VOL callback arguments */ @@ -671,7 +671,7 @@ H5Dget_space_status(hid_t dset_id, H5D_space_status_t *allocation /*out*/) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); /* Set up VOL callback arguments */ @@ -709,7 +709,7 @@ H5Dget_type(hid_t dset_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid dataset identifier"); /* Set up VOL callback arguments */ @@ -750,7 +750,7 @@ H5Dget_create_plist(hid_t dset_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid dataset identifier"); /* Set up VOL callback arguments */ @@ -808,7 +808,7 @@ H5Dget_access_plist(hid_t dset_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid dataset identifier"); /* Set up VOL callback arguments */ @@ -852,7 +852,7 @@ H5Dget_storage_size(hid_t dset_id) FUNC_ENTER_API(0) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "invalid dataset identifier"); /* Set up VOL callback arguments */ @@ -893,7 +893,7 @@ H5Dget_offset(hid_t dset_id) FUNC_ENTER_API(HADDR_UNDEF) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, HADDR_UNDEF, "invalid dataset identifier"); /* Set up VOL callback arguments */ @@ -957,7 +957,7 @@ H5D__read_api_common(size_t count, hid_t dset_id[], hid_t mem_type_id[], hid_t m HGOTO_ERROR(H5E_VOL, H5E_CANTALLOC, FAIL, "can't allocate space for object array"); /* Get vol_obj_ptr (return just the first dataset to caller if requested) */ - if (NULL == (*vol_obj_ptr = (H5VL_object_t *)H5I_object_verify(dset_id[0], H5I_DATASET))) + if (NULL == (*vol_obj_ptr = H5VL_vol_object_verify(dset_id[0], H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id is not a dataset ID"); /* Save the connector of the first dataset. Unpack the connector and call @@ -969,7 +969,7 @@ H5D__read_api_common(size_t count, hid_t dset_id[], hid_t mem_type_id[], hid_t m obj[0] = (*vol_obj_ptr)->data; for (i = 1; i < count; i++) { /* Get the object */ - if (NULL == (tmp_vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id[i], H5I_DATASET))) + if (NULL == (tmp_vol_obj = H5VL_vol_object_verify(dset_id[i], H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id is not a dataset ID"); obj[i] = tmp_vol_obj->data; @@ -1181,7 +1181,7 @@ H5Dread_chunk(hid_t dset_id, hid_t dxpl_id, const hsize_t *offset, uint32_t *fil FUNC_ENTER_API(FAIL) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id is not a dataset ID"); if (!buf) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "buf cannot be NULL"); @@ -1487,7 +1487,7 @@ H5Dwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *of FUNC_ENTER_API(FAIL) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset ID"); if (!buf) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "buf cannot be NULL"); @@ -1874,7 +1874,7 @@ H5Dvlen_get_buf_size(hid_t dataset_id, hid_t type_id, hid_t space_id, hsize_t *s FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dataset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dataset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); if (H5I_DATATYPE != H5I_get_type(type_id)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid datatype identifier"); @@ -1935,7 +1935,7 @@ H5D__set_extent_api_common(hid_t dset_id, const hsize_t size[], void **token_ptr FUNC_ENTER_PACKAGE /* Check args */ - if (NULL == (*vol_obj_ptr = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (*vol_obj_ptr = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); if (!size) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "size array cannot be NULL"); @@ -2040,7 +2040,7 @@ H5Dflush(hid_t dset_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id parameter is not a valid dataset identifier"); /* Set up collective metadata if appropriate */ @@ -2081,7 +2081,7 @@ H5Drefresh(hid_t dset_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id parameter is not a valid dataset identifier"); /* Set up collective metadata if appropriate */ @@ -2124,7 +2124,7 @@ H5Dformat_convert(hid_t dset_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id parameter is not a valid dataset identifier"); /* Set up collective metadata if appropriate */ @@ -2163,7 +2163,7 @@ H5Dget_chunk_index_type(hid_t dset_id, H5D_chunk_index_t *idx_type /*out*/) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id parameter is not a valid dataset identifier"); if (NULL == idx_type) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "idx_type parameter cannot be NULL"); @@ -2204,7 +2204,7 @@ H5Dget_chunk_storage_size(hid_t dset_id, const hsize_t *offset, hsize_t *chunk_n FUNC_ENTER_API(FAIL) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dset_id parameter is not a valid dataset identifier"); if (NULL == offset) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "offset parameter cannot be NULL"); @@ -2254,7 +2254,7 @@ H5Dget_num_chunks(hid_t dset_id, hid_t fspace_id, hsize_t *nchunks /*out*/) FUNC_ENTER_API(FAIL) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); if (NULL == nchunks) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid argument (null)"); @@ -2308,7 +2308,7 @@ H5Dget_chunk_info(hid_t dset_id, hid_t fspace_id, hsize_t chk_index, hsize_t *of if (NULL == offset && NULL == filter_mask && NULL == addr && NULL == size) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid arguments, must have at least one non-null output argument"); - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); /* Set up VOL callback arguments */ @@ -2373,7 +2373,7 @@ H5Dget_chunk_info_by_coord(hid_t dset_id, const hsize_t *offset, unsigned *filte FUNC_ENTER_API(FAIL) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); if (NULL == filter_mask && NULL == addr && NULL == size) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, @@ -2423,7 +2423,7 @@ H5Dchunk_iter(hid_t dset_id, hid_t dxpl_id, H5D_chunk_iter_op_t op, void *op_dat FUNC_ENTER_API(FAIL) /* Check arguments */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); if (NULL == op) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid callback to chunk iteration"); diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c index 5b0f4e33ede..51d6e4323c0 100644 --- a/src/H5Ddeprec.c +++ b/src/H5Ddeprec.c @@ -232,7 +232,7 @@ H5Dextend(hid_t dset_id, const hsize_t size[]) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(dset_id, H5I_DATASET))) + if (NULL == (vol_obj = H5VL_vol_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataset identifier"); if (!size) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no size specified"); diff --git a/src/H5Defl.c b/src/H5Defl.c index 22348e33fcc..34d48e690b7 100644 --- a/src/H5Defl.c +++ b/src/H5Defl.c @@ -278,12 +278,12 @@ H5D__efl_read(const H5O_efl_t *efl, const H5D_t *dset, haddr_t addr, size_t size { int fd = -1; size_t to_read; + size_t left_to_read; #ifndef NDEBUG hsize_t tempto_read; #endif /* NDEBUG */ hsize_t skip = 0; haddr_t cur; - ssize_t n; size_t u; /* Local index variable */ char *full_name = NULL; /* File name with prefix */ herr_t ret_value = SUCCEED; /* Return value */ @@ -325,15 +325,43 @@ H5D__efl_read(const H5O_efl_t *efl, const H5D_t *dset, haddr_t addr, size_t size #else /* NDEBUG */ to_read = MIN((size_t)(efl->slot[u].size - skip), (hsize_t)size); #endif /* NDEBUG */ - if ((n = HDread(fd, buf, to_read)) < 0) - HGOTO_ERROR(H5E_EFL, H5E_READERROR, FAIL, "read error in external raw data file"); - else if ((size_t)n < to_read) - memset(buf + n, 0, to_read - (size_t)n); + + /* Inner loop - read to_read bytes from a single external file */ + left_to_read = to_read; + while (left_to_read > 0) { + h5_posix_io_t bytes_in = 0; /* # of bytes to read */ + h5_posix_io_ret_t bytes_read = -1; /* # of bytes actually read */ + + /* Trying to read more bytes than the return type can handle is + * undefined behavior in POSIX. + */ + if (left_to_read > H5_POSIX_MAX_IO_BYTES) + bytes_in = H5_POSIX_MAX_IO_BYTES; + else + bytes_in = (h5_posix_io_t)left_to_read; + + do { + bytes_read = HDread(fd, buf, bytes_in); + } while (-1 == bytes_read && EINTR == errno); + + if (bytes_read < 0) + HGOTO_ERROR(H5E_EFL, H5E_READERROR, FAIL, "read error in external raw data file"); + + if (0 == bytes_read) { + /* End of file on disk, fill the remaining sectors to be read from this file with 0 */ + memset(buf, 0, left_to_read); + bytes_read = (h5_posix_io_ret_t)left_to_read; + } /* end if */ + + left_to_read -= (size_t)bytes_read; + buf += bytes_read; + } + + /* Prepare to advance to next external file */ full_name = (char *)H5MM_xfree(full_name); HDclose(fd); fd = -1; size -= to_read; - buf += to_read; skip = 0; u++; } /* end while */ @@ -364,6 +392,7 @@ H5D__efl_write(const H5O_efl_t *efl, const H5D_t *dset, haddr_t addr, size_t siz { int fd = -1; size_t to_write; + size_t left_to_write; #ifndef NDEBUG hsize_t tempto_write; #endif /* NDEBUG */ @@ -414,13 +443,39 @@ H5D__efl_write(const H5O_efl_t *efl, const H5D_t *dset, haddr_t addr, size_t siz #else /* NDEBUG */ to_write = MIN((size_t)(efl->slot[u].size - skip), size); #endif /* NDEBUG */ - if ((size_t)HDwrite(fd, buf, to_write) != to_write) - HGOTO_ERROR(H5E_EFL, H5E_READERROR, FAIL, "write error in external raw data file"); + + /* Inner loop - write to_write bytes to a single external file */ + left_to_write = to_write; + while (left_to_write > 0) { + h5_posix_io_t bytes_in = 0; /* # of bytes to write */ + h5_posix_io_ret_t bytes_wrote = -1; /* # of bytes actually written */ + + /* Trying to write more bytes than the return type can handle is + * undefined behavior in POSIX. + */ + if (left_to_write > H5_POSIX_MAX_IO_BYTES) + bytes_in = H5_POSIX_MAX_IO_BYTES; + else + bytes_in = (h5_posix_io_t)left_to_write; + + do { + bytes_wrote = HDwrite(fd, buf, bytes_in); + } while (-1 == bytes_wrote && EINTR == errno); + + if (bytes_wrote < 0) + HGOTO_ERROR(H5E_EFL, H5E_WRITEERROR, FAIL, "write error in external raw data file"); + if (bytes_wrote == 0) + HGOTO_ERROR(H5E_EFL, H5E_WRITEERROR, FAIL, "wrote 0 bytes to external raw data file"); + + left_to_write -= (size_t)bytes_wrote; + buf += bytes_wrote; + } + + /* Prepare to advance to next external file */ full_name = (char *)H5MM_xfree(full_name); HDclose(fd); fd = -1; size -= to_write; - buf += to_write; skip = 0; u++; } /* end while */ diff --git a/src/H5Dmodule.h b/src/H5Dmodule.h index 26e748ce1a0..96c5b1a704e 100644 --- a/src/H5Dmodule.h +++ b/src/H5Dmodule.h @@ -887,7 +887,7 @@ filter. * It is clear that the internal HDF5 filter mechanism, while extensible, does not work well with third-party * filters. It would be a maintenance nightmare to keep adding and supporting new compression methods * in HDF5. For any set of HDF5 “internal” filters, there always will be data with which the “internal” -filters + * filters * will not achieve the optimal performance needed to address data I/O and storage problems. Thus the * internal HDF5 filter mechanism is enhanced to address the issues discussed above. * @@ -901,7 +901,7 @@ filters * * When an application reads data compressed with a third-party HDF5 filter, the HDF5 Library will search * for the required filter plugin, register the filter with the library (if the filter function is not -registered) and + * registered) and * apply it to the data on the read operation. * * For more information, @@ -1496,7 +1496,7 @@ allocated if necessary. * the size of the memory datatype and the number of elements in the memory selection. * * Variable-length data are organized in two or more areas of memory. For more information, - * \see \ref h4_vlen_datatype "Variable-length Datatypes". + * see \ref h4_vlen_datatype "Variable-length Datatypes". * * When writing data, the application creates an array of * vl_info_t which contains pointers to the elements. The elements might be, for example, strings. @@ -2735,7 +2735,7 @@ allocated if necessary. * See The HDF Group website for further information regarding the SZip filter. * * \subsubsection subsubsec_dataset_filters_dyn Using Dynamically-Loadable Filters - * \see \ref sec_filter_plugins for further information regarding the dynamically-loadable filters. + * see \ref sec_filter_plugins for further information regarding the dynamically-loadable filters. * * HDF has a filter plugin repository of useful third-party plugins that can used * diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 19b9266556a..8692acdeff8 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -1842,7 +1842,7 @@ H5_DLL hid_t H5Dopen1(hid_t loc_id, const char *name); * used if the dataset dimension sizes are to be reduced. * * \version 1.8.0 Function deprecated in this release. Parameter size - * syntax changed to \Code{const hsize_t size[]} in this release. + * syntax changed to \TText{const hsize_t size[]} in this release. * \since 1.0.0 * */ diff --git a/src/H5Emodule.h b/src/H5Emodule.h index b41f70d8eb7..f46456a1369 100644 --- a/src/H5Emodule.h +++ b/src/H5Emodule.h @@ -58,7 +58,7 @@ * design for the Error Handling API. * * \subsection subsec_error_H5E Error Handling Function Summaries - * @see H5E reference manual + * see @ref H5E reference manual * * \subsection subsec_error_program Programming Model for Error Handling * This section is under construction. @@ -80,24 +80,21 @@ * an error stack ID is needed as a parameter, \ref H5E_DEFAULT can be used to indicate the library's default * stack. The first error record of the error stack, number #000, is produced by the API function itself and * is usually sufficient to indicate to the application what went wrong. - *
    - * - * - * - * - *
    Example: An Error Message
    - *

    If an application calls \ref H5Tclose on a - * predefined datatype then the following message is - * printed on the standard error stream. This is a - * simple error that has only one component, the API - * function; other errors may have many components. - *

    + *
    + * If an application calls \ref H5Tclose  on a
    + * predefined datatype then the following message is
    + * printed on the standard error stream.  This is a
    + * simple error that has only one component, the API
    + * function; other errors may have many components.
    + *
    + * An Error Message Example
    + * \code
      * HDF5-DIAG: Error detected in HDF5 (1.10.9) thread 0.
      *    #000: H5T.c line ### in H5Tclose(): predefined datatype
      *       major: Function argument
      *       minor: Bad value
    - *         
    - *
    + * \endcode + * * In the example above, we can see that an error record has a major message and a minor message. A major * message generally indicates where the error happens. The location can be a dataset or a dataspace, for * example. A minor message explains further details of the error. An example is “unable to open file”. @@ -158,15 +155,15 @@ * * Example: Turn off error messages while probing a function * \code - * *** Save old error handler *** + * // Save old error handler * H5E_auto2_t oldfunc; * void *old_client_data; * H5Eget_auto2(error_stack, &old_func, &old_client_data); - * *** Turn off error handling *** + * // Turn off error handling * H5Eset_auto2(error_stack, NULL, NULL); - * *** Probe. Likely to fail, but that's okay *** + * // Probe. Likely to fail, but that's okay * status = H5Fopen (......); - * *** Restore previous error handler *** + * // Restore previous error handler * H5Eset_auto2(error_stack, old_func, old_client_data); * \endcode * @@ -174,9 +171,9 @@ * * Example: Disable automatic printing and explicitly print error messages * \code - * *** Turn off error handling permanently *** + * // Turn off error handling permanently * H5Eset_auto2(error_stack, NULL, NULL); - * *** If failure, print error message *** + * // If failure, print error message * if (H5Fopen (....)<0) { * H5Eprint2(H5E_DEFAULT, stderr); * exit (1); @@ -243,9 +240,9 @@ * * The following example shows a user‐defined callback function. * - * Example: A user‐defined callback function + * A user‐defined callback function Example * \code - * \#define MSG_SIZE 64 + * #define MSG_SIZE 64 * herr_t * custom_print_cb(unsigned n, const H5E_error2_t *err_desc, void *client_data) * { @@ -255,7 +252,7 @@ * char cls[MSG_SIZE]; * const int indent = 4; * - * *** Get descriptions for the major and minor error numbers *** + * // Get descriptions for the major and minor error numbers * if(H5Eget_class_name(err_desc->cls_id, cls, MSG_SIZE) < 0) * TEST_ERROR; * if(H5Eget_msg(err_desc->maj_num, NULL, maj, MSG_SIZE) < 0) @@ -296,13 +293,11 @@ * to push its own error records onto the error stack once it declares an error class of its own through the * HDF5 Error API. * - * - * - * - * - * - *
    Example: An Error Report
    - *

    An error report shows both the library's error record and the application's error records. - * See the example below. - *

    + * An error report shows both the library's error record and the application's error records.
    + * See the example below.
    + *
    + * An Error Report Example
    + * \code
      * Error Test-DIAG: Error detected in Error Program (1.0)
      *         thread 8192:
      *     #000: ../../hdf5/test/error_test.c line ### in main():
    @@ -318,10 +313,8 @@
      *         not a dataset
      *       major: Invalid arguments to routine
      *       minor: Inappropriate type
    - *       
    - *
    + *\endcode + * * In the line above error record #002 in the example above, the starting phrase is HDF5. This is the error * class name of the HDF5 Library. All of the library's error messages (major and minor) are in this default * error class. The Error Test in the beginning of the line above error record #000 is the name of the @@ -334,7 +327,7 @@ * * Example: The user‐defined error handler * \code - * \#define MSG_SIZE 64 + * #define MSG_SIZE 64 * herr_t * custom_print_cb(unsigned n, const H5E_error2_t *err_desc, * void* client_data) @@ -345,7 +338,7 @@ * char cls[MSG_SIZE]; * const int indent = 4; * - * *** Get descriptions for the major and minor error numbers *** + * // Get descriptions for the major and minor error numbers * if(H5Eget_class_name(err_desc->cls_id, cls, MSG_SIZE) < 0) * TEST_ERROR; * if(H5Eget_msg(err_desc->maj_num, NULL, maj, MSG_SIZE) < 0) @@ -411,13 +404,13 @@ * * Example: Create an error class and error messages * \code - * *** Create an error class *** + * // Create an error class * class_id = H5Eregister_class(ERR_CLS_NAME, PROG_NAME, PROG_VERS); - * *** Retrieve class name *** + * // Retrieve class name * H5Eget_class_name(class_id, cls_name, cls_size); - * *** Create a major error message in the class *** + * // Create a major error message in the class * maj_id = H5Ecreate_msg(class_id, H5E_MAJOR, “... ...”); - * *** Create a minor error message in the class *** + * // Create a minor error message in the class * min_id = H5Ecreate_msg(class_id, H5E_MINOR, “... ...”); * \endcode * @@ -486,14 +479,14 @@ * * Example: Pushing an error message to an error stack * \code - * *** Make call to HDF5 I/O routine *** + * // Make call to HDF5 I/O routine * if((dset_id=H5Dopen(file_id, dset_name, access_plist)) < 0) * { - * *** Push client error onto error stack *** + * // Push client error onto error stack * H5Epush(H5E_DEFAULT,__FILE__,FUNC,__LINE__,cls_id, * CLIENT_ERR_MAJ_IO,CLIENT_ERR_MINOR_OPEN, “H5Dopen failed”); * } - * *** Indicate error occurred in function *** + * // Indicate error occurred in function * return 0; * \endcode * @@ -504,15 +497,15 @@ * \code * if (H5Dwrite(dset_id, mem_type_id, mem_space_id, file_space_id, dset_xfer_plist_id, buf) < 0) * { - * *** Push client error onto error stack *** + * // Push client error onto error stack * H5Epush2(H5E_DEFAULT,__FILE__,FUNC,__LINE__,cls_id, * CLIENT_ERR_MAJ_IO,CLIENT_ERR_MINOR_HDF5, * “H5Dwrite failed”); - * *** Preserve the error stack by assigning an object handle to it *** + * // Preserve the error stack by assigning an object handle to it * error_stack = H5Eget_current_stack(); - * *** Close dataset *** + * // Close dataset * H5Dclose(dset_id); - * *** Replace the current error stack with the preserved one *** + * // Replace the current error stack with the preserved one * H5Eset_current_stack(error_stack); * } * return 0; @@ -533,7 +526,7 @@ * do not clear the error stack. Otherwise, any function which does * not have an underscore immediately after the package name will * clear the error stack. For instance, H5Fopen() clears the error - * stack while \Code{H5F_open} does not. + * stack while \TText{H5F_open} does not. * * \internal An error stack has a fixed maximum size. If this size is exceeded * then the stack will be truncated and only the inner-most functions @@ -545,7 +538,7 @@ * error stack. The error stack is statically allocated to reduce the * complexity of handling errors within the \ref H5E package. * - * @see sec_error + * @see \ref sec_error * */ diff --git a/src/H5Epublic.h b/src/H5Epublic.h index 49628efb9be..9263c3c96bf 100644 --- a/src/H5Epublic.h +++ b/src/H5Epublic.h @@ -420,9 +420,11 @@ H5_DLL herr_t H5Eclose_stack(hid_t stack_id); * by the class identifier. If a non-NULL pointer is passed in for \p * name and \p size is greater than zero, the class name of \p size * long is returned. The length of the error class name is also - * returned. If NULL is passed in as \p name, only the length of class - * name is returned. If zero is returned, it means no name. The user is - * responsible for allocating sufficient buffer space for the name. + * returned. + * + * \details_namelen{error class,H5Eget_class_name} + * + * If zero is returned, it means the error class has no name. * * \since 1.8.0 */ diff --git a/src/H5F.c b/src/H5F.c index 390f667648b..5dd7bda3903 100644 --- a/src/H5F.c +++ b/src/H5F.c @@ -118,7 +118,7 @@ H5Fget_create_plist(hid_t file_id) FUNC_ENTER_API(H5I_INVALID_HID) /* check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -164,7 +164,7 @@ H5Fget_access_plist(hid_t file_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -239,7 +239,7 @@ H5Fget_obj_count(hid_t file_id, unsigned types) H5VL_file_get_args_t vol_cb_args; /* Arguments to VOL callback */ /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, (-1), "not a file id"); /* Set up VOL callback arguments */ @@ -358,7 +358,7 @@ H5Fget_obj_ids(hid_t file_id, unsigned types, size_t max_objs, hid_t *oid_list / H5VL_file_get_args_t vol_cb_args; /* Arguments to VOL callback */ /* get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, (-1), "invalid file identifier"); /* Set up VOL callback arguments */ @@ -439,7 +439,7 @@ H5Fget_vfd_handle(hid_t file_id, hid_t fapl_id, void **file_handle /*out*/) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file handle pointer"); /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1226,7 +1226,7 @@ H5Fmount(hid_t loc_id, const char *name, hid_t child_id, hid_t plist_id) H5VL_loc_params_t loc_params; /* Location parameters for object access */ /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5VL_vol_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set location parameters */ @@ -1244,12 +1244,12 @@ H5Fmount(hid_t loc_id, const char *name, hid_t child_id, hid_t plist_id) } /* end if */ else { assert(H5I_GROUP == loc_type); - if (NULL == (loc_vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (loc_vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "could not get location object"); } /* end else */ /* Get the child object */ - if (NULL == (child_vol_obj = (H5VL_object_t *)H5I_object(child_id))) + if (NULL == (child_vol_obj = H5VL_vol_object(child_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "could not get child object"); /* Check if both objects are associated with the same VOL connector */ @@ -1336,7 +1336,7 @@ H5Funmount(hid_t loc_id, const char *name) H5VL_loc_params_t loc_params; /* Location parameters for object access */ /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5VL_vol_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set location parameters */ @@ -1354,7 +1354,7 @@ H5Funmount(hid_t loc_id, const char *name) } /* end if */ else { assert(H5I_GROUP == loc_type); - if (NULL == (loc_vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (loc_vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "could not get location object"); } /* end else */ @@ -1404,7 +1404,7 @@ H5F__reopen_api_common(hid_t file_id, void **token_ptr) FUNC_ENTER_PACKAGE /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1555,7 +1555,7 @@ H5Fget_intent(hid_t file_id, unsigned *intent_flags /*out*/) H5VL_file_get_args_t vol_cb_args; /* Arguments to VOL callback */ /* Get the internal file structure */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1594,7 +1594,7 @@ H5Fget_fileno(hid_t file_id, unsigned long *fnumber /*out*/) H5VL_file_get_args_t vol_cb_args; /* Arguments to VOL callback */ /* Get the internal file structure */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1631,7 +1631,7 @@ H5Fget_freespace(hid_t file_id) FUNC_ENTER_API((-1)) /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, (-1), "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1674,7 +1674,7 @@ H5Fget_filesize(hid_t file_id, hsize_t *size /*out*/) /* Check args */ if (!size) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "size parameter cannot be NULL"); - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID"); /* Set up VOL callback arguments */ @@ -1739,7 +1739,7 @@ H5Fget_file_image(hid_t file_id, void *buf /*out*/, size_t buf_len) FUNC_ENTER_API((-1)) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, (-1), "not a file ID"); /* Set up VOL callback arguments */ @@ -1789,7 +1789,7 @@ H5Fget_mdc_config(hid_t file_id, H5AC_cache_config_t *config /*out*/) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad config ptr"); /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1827,7 +1827,7 @@ H5Fset_mdc_config(hid_t file_id, const H5AC_cache_config_t *config_ptr) FUNC_ENTER_API(FAIL) /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -1868,7 +1868,7 @@ H5Fget_mdc_hit_rate(hid_t file_id, double *hit_rate /*out*/) /* Check args */ if (NULL == hit_rate) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL hit rate pointer"); - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID"); /* Set up VOL callback arguments */ @@ -1910,7 +1910,7 @@ H5Fget_mdc_size(hid_t file_id, size_t *max_size /*out*/, size_t *min_clean_size FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID"); /* Set up VOL callback arguments */ @@ -1959,7 +1959,7 @@ H5Freset_mdc_hit_rate_stats(hid_t file_id) FUNC_ENTER_API(FAIL) /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -2109,7 +2109,7 @@ H5Fget_metadata_read_retry_info(hid_t file_id, H5F_retry_info_t *info /*out*/) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no info struct"); /* Get the file pointer */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID"); /* Set up VOL callback arguments */ @@ -2149,7 +2149,7 @@ H5Fget_free_sections(hid_t file_id, H5F_mem_t type, size_t nsects, H5F_sect_info FUNC_ENTER_API((-1)) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, (-1), "invalid file identifier"); if (sect_info && nsects == 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, (-1), "nsects must be > 0"); @@ -2194,7 +2194,7 @@ H5Fclear_elink_file_cache(hid_t file_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID"); /* Set up VOL callback arguments */ @@ -2253,7 +2253,7 @@ H5Fstart_swmr_write(hid_t file_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID"); /* Set up collective metadata if appropriate */ @@ -2292,7 +2292,7 @@ H5Fstart_mdc_logging(hid_t file_id) FUNC_ENTER_API(FAIL) /* Sanity check */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID"); /* Set up VOL callback arguments */ @@ -2328,7 +2328,7 @@ H5Fstop_mdc_logging(hid_t file_id) FUNC_ENTER_API(FAIL) /* Sanity check */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID"); /* Set up VOL callback arguments */ @@ -2365,7 +2365,7 @@ H5Fget_mdc_logging_status(hid_t file_id, hbool_t *is_enabled /*out*/, hbool_t *i FUNC_ENTER_API(FAIL) /* Sanity check */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID"); /* Set up VOL callback arguments */ @@ -2405,7 +2405,7 @@ H5Fset_libver_bounds(hid_t file_id, H5F_libver_t low, H5F_libver_t high) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "not a file ID"); /* Set up collective metadata if appropriate */ @@ -2447,7 +2447,7 @@ H5Fformat_convert(hid_t file_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "file_id parameter is not a valid file identifier"); /* Set up collective metadata if appropriate */ @@ -2485,7 +2485,7 @@ H5Freset_page_buffering_stats(hid_t file_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -2522,7 +2522,7 @@ H5Fget_page_buffering_stats(hid_t file_id, unsigned accesses[2] /*out*/, unsigne FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID"); if (NULL == accesses || NULL == hits || NULL == misses || NULL == evictions || NULL == bypasses) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL input parameters for stats"); @@ -2568,7 +2568,7 @@ H5Fget_mdc_image_info(hid_t file_id, haddr_t *image_addr /*out*/, hsize_t *image FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID"); /* Set up VOL callback arguments */ @@ -2605,7 +2605,7 @@ H5Fget_eoa(hid_t file_id, haddr_t *eoa /*out*/) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID"); /* Only do work if valid pointer to fill in */ @@ -2647,7 +2647,7 @@ H5Fincrement_filesize(hid_t file_id, hsize_t increment) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID"); /* Set up VOL callback arguments */ @@ -2686,7 +2686,7 @@ H5Fget_dset_no_attrs_hint(hid_t file_id, hbool_t *minimize /*out*/) /* Check args */ if (NULL == minimize) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "out pointer 'minimize' cannot be NULL"); - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ @@ -2723,7 +2723,7 @@ H5Fset_dset_no_attrs_hint(hid_t file_id, hbool_t minimize) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Set up VOL callback arguments */ diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c index 642da8d0b66..335c63a94f1 100644 --- a/src/H5FDfamily.c +++ b/src/H5FDfamily.c @@ -30,14 +30,14 @@ #include "H5FDdrvr_module.h" /* This source code file is part of the H5FD driver module */ -#include "H5private.h" /* Generic Functions */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5Fprivate.h" /* File access */ -#include "H5FDprivate.h" /* File drivers */ -#include "H5FDfamily.h" /* Family file driver */ -#include "H5Iprivate.h" /* IDs */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5Pprivate.h" /* Property lists */ +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fprivate.h" /* File access */ +#include "H5FDprivate.h" /* File drivers */ +#include "H5FDfamily.h" /* Family file driver */ +#include "H5Iprivate.h" /* IDs */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5Pprivate.h" /* Property lists */ /* The size of the member name buffers */ #define H5FD_FAM_MEMB_NAME_BUF_SIZE 4096 @@ -187,10 +187,9 @@ H5FD__family_get_default_config(H5FD_family_fapl_t *fa_out) HGOTO_ERROR(H5E_VFL, H5E_CANTSET, FAIL, "can't set default driver on member FAPL"); done: - if (ret_value < 0 && fa_out->memb_fapl_id >= 0) { + if (ret_value < 0 && fa_out->memb_fapl_id >= 0) if (H5I_dec_ref(fa_out->memb_fapl_id) < 0) HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "can't decrement ref. count on member FAPL ID"); - } FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD__family_get_default_config() */ @@ -237,6 +236,7 @@ H5FD__family_get_default_printf_filename(const char *old_filename) if (file_extension) { /* Insert the printf format between the filename and ".h5" extension. */ intptr_t beginningLength = file_extension - old_filename; + snprintf(tmp_buffer, new_filename_len, "%.*s%s%s", (int)beginningLength, old_filename, suffix, ".h5"); } else { @@ -246,15 +246,15 @@ H5FD__family_get_default_printf_filename(const char *old_filename) file_extension = strrchr(old_filename, '.'); if (file_extension) { intptr_t beginningLength = file_extension - old_filename; + snprintf(tmp_buffer, new_filename_len, "%.*s%s%s", (int)beginningLength, old_filename, suffix, file_extension); } - else { + else /* If the filename doesn't contain an extension at all, just insert * the printf format at the end of the filename. */ snprintf(tmp_buffer, new_filename_len, "%s%s", old_filename, suffix); - } } ret_value = tmp_buffer; @@ -618,7 +618,7 @@ H5FD__family_sb_decode(H5FD_t *_file, const char H5_ATTR_UNUSED *name, const uns /* Check if member size from file access property is correct */ if (msize != file->pmem_size) - HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, + HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "Family member size should be %lu. But the size from file access property is %lu", (unsigned long)msize, (unsigned long)file->pmem_size); @@ -724,16 +724,16 @@ H5FD__family_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxad file->pmem_size = fa->memb_size; /* Member size passed in through property */ if (default_config && H5I_dec_ref(fa->memb_fapl_id) < 0) - HGOTO_ERROR(H5E_ID, H5E_CANTDEC, NULL, "can't decrement ref. count on member FAPL"); + HGOTO_ERROR(H5E_VFL, H5E_CANTDEC, NULL, "can't decrement ref. count on member FAPL"); } /* end else */ file->name = H5MM_strdup(name); file->flags = flags; /* Allocate space for the string buffers */ if (NULL == (memb_name = (char *)H5MM_malloc(H5FD_FAM_MEMB_NAME_BUF_SIZE))) - HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "unable to allocate member name"); + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, NULL, "unable to allocate member name"); if (NULL == (temp = (char *)H5MM_malloc(H5FD_FAM_MEMB_NAME_BUF_SIZE))) - HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "unable to allocate temporary member name"); + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, NULL, "unable to allocate temporary member name"); /* Check that names are unique */ snprintf(memb_name, H5FD_FAM_MEMB_NAME_BUF_SIZE, name, 0); @@ -746,7 +746,7 @@ H5FD__family_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxad name = temp; } else - HGOTO_ERROR(H5E_FILE, H5E_FILEEXISTS, NULL, "file names not unique"); + HGOTO_ERROR(H5E_VFL, H5E_FILEEXISTS, NULL, "file names not unique"); } /* Open all the family members */ @@ -771,17 +771,14 @@ H5FD__family_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxad * Allow H5F_ACC_CREAT only on the first family member. */ if (0 == file->nmembs) { - if (NULL == (file->memb[file->nmembs] = H5FDopen(memb_name, (0 == file->nmembs ? flags : t_flags), - file->memb_fapl_id, HADDR_UNDEF))) + if (H5FD_open(false, &file->memb[file->nmembs], memb_name, (0 == file->nmembs ? flags : t_flags), + file->memb_fapl_id, HADDR_UNDEF) < 0) HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL, "unable to open member file"); } else { - H5E_PAUSE_ERRORS - { - file->memb[file->nmembs] = H5FDopen(memb_name, (0 == file->nmembs ? flags : t_flags), - file->memb_fapl_id, HADDR_UNDEF); - } - H5E_RESUME_ERRORS + if (H5FD_open(true, &file->memb[file->nmembs], memb_name, (0 == file->nmembs ? flags : t_flags), + file->memb_fapl_id, HADDR_UNDEF) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, NULL, "unable to open member file"); if (!file->memb[file->nmembs]) break; @@ -794,7 +791,7 @@ H5FD__family_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxad * smaller than the size specified through H5Pset_fapl_family(). Update the actual * member size. */ - if ((eof = H5FDget_eof(file->memb[0], H5FD_MEM_DEFAULT))) + if ((eof = H5FD_get_eof(file->memb[0], H5FD_MEM_DEFAULT))) file->memb_size = eof; ret_value = (H5FD_t *)file; @@ -818,7 +815,7 @@ H5FD__family_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxad if (H5FD_close(file->memb[u]) < 0) nerrors++; if (nerrors) - HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, NULL, "unable to close member files"); + HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, NULL, "unable to close member files"); if (file->memb) H5MM_xfree(file->memb); @@ -906,7 +903,7 @@ H5FD__family_cmp(const H5FD_t *_f1, const H5FD_t *_f2) assert(f1->nmembs >= 1 && f1->memb[0]); assert(f2->nmembs >= 1 && f2->memb[0]); - ret_value = H5FDcmp(f1->memb[0], f2->memb[0]); + ret_value = H5FD_cmp(f1->memb[0], f2->memb[0]); FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD__family_cmp() */ @@ -1000,7 +997,7 @@ H5FD__family_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t abs_eoa) /* Allocate space for the member name buffer */ if (NULL == (memb_name = (char *)H5MM_malloc(H5FD_FAM_MEMB_NAME_BUF_SIZE))) - HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, FAIL, "unable to allocate member name"); + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "unable to allocate member name"); for (u = 0; addr || u < file->nmembs; u++) { @@ -1021,9 +1018,9 @@ H5FD__family_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t abs_eoa) file->nmembs = MAX(file->nmembs, u + 1); snprintf(memb_name, H5FD_FAM_MEMB_NAME_BUF_SIZE, file->name, u); H5_CHECK_OVERFLOW(file->memb_size, hsize_t, haddr_t); - if (NULL == (file->memb[u] = H5FDopen(memb_name, file->flags | H5F_ACC_CREAT, file->memb_fapl_id, - (haddr_t)file->memb_size))) - HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to open member file"); + if (H5FD_open(false, &file->memb[u], memb_name, file->flags | H5F_ACC_CREAT, file->memb_fapl_id, + (haddr_t)file->memb_size) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, FAIL, "unable to open member file"); } /* end if */ /* Set the EOA marker for the member */ @@ -1031,12 +1028,12 @@ H5FD__family_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t abs_eoa) H5_CHECK_OVERFLOW(file->memb_size, hsize_t, haddr_t); if (addr > (haddr_t)file->memb_size) { if (H5FD_set_eoa(file->memb[u], type, ((haddr_t)file->memb_size - file->pub.base_addr)) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to set file eoa"); + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "unable to set file eoa"); addr -= file->memb_size; } /* end if */ else { if (H5FD_set_eoa(file->memb[u], type, (addr - file->pub.base_addr)) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to set file eoa"); + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "unable to set file eoa"); addr = 0; } /* end else */ } /* end for */ @@ -1126,12 +1123,12 @@ H5FD__family_get_handle(H5FD_t *_file, hid_t fapl, void **file_handle) /* Get the plist structure and family offset */ if (NULL == (plist = H5P_object_verify(fapl, H5P_FILE_ACCESS))) - HGOTO_ERROR(H5E_ID, H5E_BADID, FAIL, "can't find object for ID"); + HGOTO_ERROR(H5E_VFL, H5E_BADID, FAIL, "can't find object for ID"); if (H5P_get(plist, H5F_ACS_FAMILY_OFFSET_NAME, &offset) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get offset for family driver"); + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get offset for family driver"); if (offset > (file->memb_size * file->nmembs)) - HGOTO_ERROR(H5E_ID, H5E_BADID, FAIL, "offset is bigger than file size"); + HGOTO_ERROR(H5E_VFL, H5E_BADID, FAIL, "offset is bigger than file size"); memb = (int)(offset / file->memb_size); ret_value = H5FD_get_vfd_handle(file->memb[memb], fapl, file_handle); @@ -1192,8 +1189,8 @@ H5FD__family_read(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, s assert(u < file->nmembs); - if (H5FDread(file->memb[u], type, dxpl_id, sub, req, buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "member file read failed"); + if (H5FD_read(file->memb[u], type, sub, req, buf) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "member file read failed"); addr += req; buf += req; @@ -1254,8 +1251,8 @@ H5FD__family_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, assert(u < file->nmembs); - if (H5FDwrite(file->memb[u], type, dxpl_id, sub, req, buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "member file write failed"); + if (H5FD_write(file->memb[u], type, sub, req, buf) < 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "member file write failed"); addr += req; buf += req; @@ -1290,7 +1287,7 @@ H5FD__family_flush(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, bool closing) nerrors++; if (nerrors) - HGOTO_ERROR(H5E_IO, H5E_BADVALUE, FAIL, "unable to flush member files"); + HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "unable to flush member files"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -1321,7 +1318,7 @@ H5FD__family_truncate(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, bool closing) nerrors++; if (nerrors) - HGOTO_ERROR(H5E_IO, H5E_BADVALUE, FAIL, "unable to flush member files"); + HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "unable to flush member files"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -1360,12 +1357,12 @@ H5FD__family_lock(H5FD_t *_file, bool rw) if (u < file->nmembs) { unsigned v; /* Local index variable */ - for (v = 0; v < u; v++) { + for (v = 0; v < u; v++) if (H5FD_unlock(file->memb[v]) < 0) /* Push error, but keep going */ HDONE_ERROR(H5E_IO, H5E_CANTUNLOCKFILE, FAIL, "unable to unlock member files"); - } /* end for */ - HGOTO_ERROR(H5E_IO, H5E_CANTLOCKFILE, FAIL, "unable to lock member files"); + + HGOTO_ERROR(H5E_VFL, H5E_CANTLOCKFILE, FAIL, "unable to lock member files"); } /* end if */ done: @@ -1394,7 +1391,7 @@ H5FD__family_unlock(H5FD_t *_file) for (u = 0; u < file->nmembs; u++) if (file->memb[u]) if (H5FD_unlock(file->memb[u]) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTUNLOCKFILE, FAIL, "unable to unlock member files"); + HGOTO_ERROR(H5E_VFL, H5E_CANTUNLOCKFILE, FAIL, "unable to unlock member files"); done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5FDlog.h b/src/H5FDlog.h index b4af2050a62..ca431bdc691 100644 --- a/src/H5FDlog.h +++ b/src/H5FDlog.h @@ -95,7 +95,7 @@ H5_DLL hid_t H5FD_log_init(void); * table. Multiple flags can be set through the use of a logical \c OR * contained in parentheses. For example, logging read and write * locations would be specified as - * \Code{(H5FD_LOG_LOC_READ|H5FD_LOG_LOC_WRITE)}. + * \TText{(H5FD_LOG_LOC_READ|H5FD_LOG_LOC_WRITE)}. * * * @@ -115,7 +115,7 @@ H5_DLL hid_t H5FD_log_init(void); * * * * @@ -134,7 +134,7 @@ H5_DLL hid_t H5FD_log_init(void); * * * @@ -163,7 +163,7 @@ H5_DLL hid_t H5FD_log_init(void); * * * @@ -186,7 +186,7 @@ H5_DLL hid_t H5FD_log_init(void); * * @@ -204,7 +204,7 @@ H5_DLL hid_t H5FD_log_init(void); * * * @@ -234,19 +234,19 @@ H5_DLL hid_t H5FD_log_init(void); * * * * * * * * * @@ -254,19 +254,19 @@ H5_DLL hid_t H5FD_log_init(void); * * * * * * * * * @@ -274,10 +274,10 @@ H5_DLL hid_t H5FD_log_init(void); * * * * * @@ -287,7 +287,7 @@ H5_DLL hid_t H5FD_log_init(void); * Begins with:\n * Dumping read I/O information\n\n * Then, for each range of identical values, there is this line:\n - * \Code{Addr %10-%10 (%10lu bytes) read from %3d times}\n\n + * \TText{Addr %10-%10 (%10lu bytes) read from %3d times}\n\n * Start address\n * End address\n * Number of bytes\n @@ -303,7 +303,7 @@ H5_DLL hid_t H5FD_log_init(void); * Begins with:\n * Dumping read I/O information\n\n * Then, for each range of identical values, there is this line:\n - * \Code{Addr %10-%10 (%10lu bytes) written to %3d times}\n\n + * \TText{Addr %10-%10 (%10lu bytes) written to %3d times}\n\n * Start address\n * End address\n * Number of bytes\n @@ -319,7 +319,7 @@ H5_DLL hid_t H5FD_log_init(void); * Begins with:\n * Dumping I/O flavor information\n\n * Then, for each range of identical values, there is this line:\n - * \Code{Addr %10-%10 (%10lu bytes) flavor is %s}\n\n + * \TText{Addr %10-%10 (%10lu bytes) flavor is %s}\n\n * Start address\n * End address\n * Number of bytes\n @@ -332,42 +332,42 @@ H5_DLL hid_t H5FD_log_init(void); * * * * * * * * * * * * * * * * * * * * * * * * * * * * * @@ -376,7 +376,7 @@ H5_DLL hid_t H5FD_log_init(void); * * * * @@ -384,7 +384,7 @@ H5_DLL hid_t H5FD_log_init(void); * * * * @@ -392,21 +392,21 @@ H5_DLL hid_t H5FD_log_init(void); * * * * * * * * * * * * * *
    Table1: Logging Flags
    * Track all I/O locations and lengths. The logical equivalent of the following: - * \Code{(#H5FD_LOG_LOC_READ | #H5FD_LOG_LOC_WRITE | #H5FD_LOG_LOC_SEEK)} + * \TText{(#H5FD_LOG_LOC_READ | #H5FD_LOG_LOC_WRITE | #H5FD_LOG_LOC_SEEK)} *
    * Track the number of times each byte is read and written. The logical * equivalent of the following: - * \Code{(#H5FD_LOG_FILE_READ | #H5FD_LOG_FILE_WRITE)} + * \TText{(#H5FD_LOG_FILE_READ | #H5FD_LOG_FILE_WRITE)} *
    * Track the total number of all types of I/O operations. The logical equivalent * of the following: - * \Code{(#H5FD_LOG_NUM_READ | #H5FD_LOG_NUM_WRITE | #H5FD_LOG_NUM_SEEK | #H5FD_LOG_NUM_TRUNCATE)} + * \TText{(#H5FD_LOG_NUM_READ | #H5FD_LOG_NUM_WRITE | #H5FD_LOG_NUM_SEEK | #H5FD_LOG_NUM_TRUNCATE)} *
    * Track the time spent in each of the above operations. The logical equivalent * of the following: - * \Code{(#H5FD_LOG_TIME_OPEN | #H5FD_LOG_TIME_STAT | #H5FD_LOG_TIME_READ | #H5FD_LOG_TIME_WRITE | + * \TText{(#H5FD_LOG_TIME_OPEN | #H5FD_LOG_TIME_STAT | #H5FD_LOG_TIME_READ | #H5FD_LOG_TIME_WRITE | * #H5FD_LOG_TIME_SEEK | #H5FD_LOG_TIME_CLOSE)} *
    * Track everything. The logical equivalent of the following: - * \Code{(#H5FD_LOG_ALLOC | #H5FD_LOG_TIME_IO | #H5FD_LOG_NUM_IO | #H5FD_LOG_FLAVOR | #H5FD_LOG_FILE_IO | + * \TText{(#H5FD_LOG_ALLOC | #H5FD_LOG_TIME_IO | #H5FD_LOG_NUM_IO | #H5FD_LOG_FLAVOR | #H5FD_LOG_FILE_IO | * #H5FD_LOG_LOC_IO)} *
    #H5FD_LOG_LOC_READRead - * \Code{%10a-%10a (%10Zu bytes) (%s) Read}\n\n + * \TText{%10a-%10a (%10Zu bytes) (%s) Read}\n\n * Start position\n * End position\n * Number of bytes\n * Flavor of read\n\n - * Adds \Code{(\%f s)} and seek time if #H5FD_LOG_TIME_SEEK is also set. + * Adds \TText{(\%f s)} and seek time if #H5FD_LOG_TIME_SEEK is also set. *
    #H5FD_LOG_LOC_READRead Error - * \Code{Error! Reading: %10a-%10a (%10Zu bytes)}\n\n + * \TText{Error! Reading: %10a-%10a (%10Zu bytes)}\n\n * Same parameters as non-error entry. *
    #H5FD_LOG_LOC_WRITEWrite - * \Code{%10a-%10a (%10Zu bytes) (%s) Written}\n\n + * \TText{%10a-%10a (%10Zu bytes) (%s) Written}\n\n * Start position\n * End position\n * Number of bytes\n * Flavor of write\n\n - * Adds \Code{(\%f s)} and seek time if #H5FD_LOG_TIME_SEEK is also set. + * Adds \TText{(\%f s)} and seek time if #H5FD_LOG_TIME_SEEK is also set. *
    #H5FD_LOG_LOC_WRITEWrite Error - * \Code{Error! Writing: %10a-%10a (%10Zu bytes)}\n\n + * \TText{Error! Writing: %10a-%10a (%10Zu bytes)}\n\n * Same parameters as non-error entry. *
    #H5FD_LOG_LOC_SEEKRead, Write - * \Code{Seek: From %10a-%10a}\n\n + * \TText{Seek: From %10a-%10a}\n\n * Start position\n * End position\n\n - * Adds \Code{(\%f s)} and seek time if #H5FD_LOG_TIME_SEEK is also set. + * Adds \TText{(\%f s)} and seek time if #H5FD_LOG_TIME_SEEK is also set. *
    #H5FD_LOG_NUM_READClose - * Total number of read operations: \Code{%11u} + * Total number of read operations: \TText{%11u} *
    #H5FD_LOG_NUM_WRITEClose - * Total number of write operations: \Code{%11u} + * Total number of write operations: \TText{%11u} *
    #H5FD_LOG_NUM_SEEKClose - * Total number of seek operations: \Code{%11u} + * Total number of seek operations: \TText{%11u} *
    #H5FD_LOG_NUM_TRUNCATEClose - * Total number of truncate operations: \Code{%11u} + * Total number of truncate operations: \TText{%11u} *
    #H5FD_LOG_TIME_OPENOpen - * Open took: \Code{(\%f s)} + * Open took: \TText{(\%f s)} *
    #H5FD_LOG_TIME_READClose, Read - * Total time in read operations: \Code{\%f s}\n\n + * Total time in read operations: \TText{\%f s}\n\n * See also: #H5FD_LOG_LOC_READ *
    #H5FD_LOG_TIME_WRITEClose, Write - * Total time in write operations: \Code{\%f s}\n\n + * Total time in write operations: \TText{\%f s}\n\n * See also: #H5FD_LOG_LOC_WRITE *
    #H5FD_LOG_TIME_SEEKClose, Read, Write - * Total time in write operations: \Code{\%f s}\n\n + * Total time in write operations: \TText{\%f s}\n\n * See also: #H5FD_LOG_LOC_SEEK or #H5FD_LOG_LOC_WRITE *
    #H5FD_LOG_TIME_CLOSEClose - * Close took: \Code{(\%f s)} + * Close took: \TText{(\%f s)} *
    #H5FD_LOG_TIME_STATOpen - * Stat took: \Code{(\%f s)} + * Stat took: \TText{(\%f s)} *
    #H5FD_LOG_ALLOCAlloc - * \Code{%10-%10 (%10Hu bytes) (\%s) Allocated}\n\n + * \TText{%10-%10 (%10Hu bytes) (\%s) Allocated}\n\n * Start of address space\n * End of address space\n * Total size allocation\n @@ -462,14 +462,14 @@ H5_DLL hid_t H5FD_log_init(void); *
    * - * \version 1.8.7 The flags parameter has been changed from \Code{unsigned int} - * to \Code{unsigned long long}. + * \version 1.8.7 The flags parameter has been changed from \TText{unsigned int} + * to \TText{unsigned long long}. * The implementation of the #H5FD_LOG_TIME_OPEN, #H5FD_LOG_TIME_READ, * #H5FD_LOG_TIME_WRITE, and #H5FD_LOG_TIME_SEEK flags has been finished. * New flags were added: #H5FD_LOG_NUM_TRUNCATE and #H5FD_LOG_TIME_STAT. * \version 1.6.0 The \c verbosity parameter has been removed. - * Two new parameters have been added: \p flags of type \Code{unsigned} and - * \p buf_size of type \Code{size_t}. + * Two new parameters have been added: \p flags of type \TText{unsigned} and + * \p buf_size of type \TText{size_t}. * \since 1.4.0 * */ diff --git a/src/H5FDmulti.h b/src/H5FDmulti.h index d89a3e27cce..0bb86157f89 100644 --- a/src/H5FDmulti.h +++ b/src/H5FDmulti.h @@ -67,7 +67,7 @@ H5_DLL hid_t H5FD_multi_init(void); * usage type that will be associated with a file. * * The array \p memb_name should be a name generator (a - * \Code{printf}-style format with a \Code{%s} which will be replaced + * \TText{printf}-style format with a \TText{%s} which will be replaced * with the name passed to H5FDopen(), usually from H5Fcreate() or * H5Fopen()). * @@ -99,7 +99,7 @@ H5_DLL hid_t H5FD_multi_init(void); * \p memb_name * * - * The default string is \Code{%s-X.h5} where \c X is one of the following letters: + * The default string is \TText{%s-X.h5} where \c X is one of the following letters: * - \c s for #H5FD_MEM_SUPER * - \c b for #H5FD_MEM_BTREE * - \c r for #H5FD_MEM_DRAW @@ -115,12 +115,12 @@ H5_DLL hid_t H5FD_multi_init(void); * * The default setting is that the address space is equally divided * among all of the elements: - * - #H5FD_MEM_SUPER \Code{-> 0 * (HADDR_MAX/6)} - * - #H5FD_MEM_BTREE \Code{-> 1 * (HADDR_MAX/6)} - * - #H5FD_MEM_DRAW \Code{-> 2 * (HADDR_MAX/6)} - * - #H5FD_MEM_GHEAP \Code{-> 3 * (HADDR_MAX/6)} - * - #H5FD_MEM_LHEAP \Code{-> 4 * (HADDR_MAX/6)} - * - #H5FD_MEM_OHDR \Code{-> 5 * (HADDR_MAX/6)} + * - #H5FD_MEM_SUPER \TText{-> 0 * (HADDR_MAX/6)} + * - #H5FD_MEM_BTREE \TText{-> 1 * (HADDR_MAX/6)} + * - #H5FD_MEM_DRAW \TText{-> 2 * (HADDR_MAX/6)} + * - #H5FD_MEM_GHEAP \TText{-> 3 * (HADDR_MAX/6)} + * - #H5FD_MEM_LHEAP \TText{-> 4 * (HADDR_MAX/6)} + * - #H5FD_MEM_OHDR \TText{-> 5 * (HADDR_MAX/6)} * * * @@ -154,7 +154,7 @@ H5_DLL hid_t H5FD_multi_init(void); * memb_name, memb_addr, true); * \endcode * - * \version 1.6.3 \p memb_name parameter type changed to \Code{const char* const*}. + * \version 1.6.3 \p memb_name parameter type changed to \TText{const char* const*}. * \since 1.4.0 */ H5_DLL herr_t H5Pset_fapl_multi(hid_t fapl_id, const H5FD_mem_t *memb_map, const hid_t *memb_fapl, @@ -206,7 +206,7 @@ H5_DLL herr_t H5Pget_fapl_multi(hid_t fapl_id, H5FD_mem_t *memb_map /*out*/, hid * \p meta_ext is the filename extension for the metadata file. The * extension is appended to the name passed to H5FDopen(), usually from * H5Fcreate() or H5Fopen(), to form the name of the metadata file. If - * the string \Code{%s} is used in the extension, it works like the + * the string \TText{%s} is used in the extension, it works like the * name generator as in H5Pset_fapl_multi(). * * \p meta_plist_id is the file access property list identifier for the @@ -215,7 +215,7 @@ H5_DLL herr_t H5Pget_fapl_multi(hid_t fapl_id, H5FD_mem_t *memb_map /*out*/, hid * \p raw_ext is the filename extension for the raw data file. The * extension is appended to the name passed to H5FDopen(), usually from * H5Fcreate() or H5Fopen(), to form the name of the raw data file. If - * the string \Code{%s} is used in the extension, it works like the + * the string \TText{%s} is used in the extension, it works like the * name generator as in H5Pset_fapl_multi(). * * \p raw_plist_id is the file access property list identifier for the diff --git a/src/H5FDs3comms.c b/src/H5FDs3comms.c index 4b1ff0091aa..0d1cd0f868a 100644 --- a/src/H5FDs3comms.c +++ b/src/H5FDs3comms.c @@ -50,7 +50,7 @@ /* manipulate verbosity of CURL output * * 0 -> no explicit curl output - * 1 -> on error, print failure info to stderr + * 1 -> print: (1) failure info to stderr on error, (2) basic HTTP range GET info * 2 -> in addition to above, print information for all performs; sets all * curl handles with CURLOPT_VERBOSE */ @@ -810,6 +810,11 @@ H5FD_s3comms_s3r_getsize(s3r_t *handle) handle->filesize = (size_t)content_length; +#if S3COMMS_CURL_VERBOSITY > 0 + fprintf(stdout, " -- size: %ju\n", content_length); + fflush(stdout); +#endif + /********************** * UNDO HEAD SETTINGS * **********************/ @@ -1119,6 +1124,12 @@ H5FD_s3comms_s3r_read(s3r_t *handle, haddr_t offset, size_t len, void *dest) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "unable to format HTTP Range value"); } +#if S3COMMS_CURL_VERBOSITY > 0 + fprintf(stdout, "%s: Bytes %" PRIuHADDR " - %" PRIuHADDR ", Request Size: %zu\n", handle->httpverb, + offset, offset + len - 1, len); + fflush(stdout); +#endif + /******************* * COMPILE REQUEST * *******************/ @@ -1671,6 +1682,9 @@ H5FD_s3comms_HMAC_SHA256(const unsigned char *key, size_t key_len, const char *m FUNC_ENTER_NOAPI_NOINIT + if (!key) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "signing key not provided"); + if (dest == NULL) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "destination cannot be null."); @@ -1751,6 +1765,7 @@ H5FD__s3comms_load_aws_creds_from_file(FILE *file, const char *profile_name, cha unsigned setting_i = 0; int found_setting = 0; char *line_buffer = &(buffer[0]); + size_t end = 0; FUNC_ENTER_PACKAGE @@ -1761,8 +1776,7 @@ H5FD__s3comms_load_aws_creds_from_file(FILE *file, const char *profile_name, cha /* look for start of profile */ do { /* clear buffer */ - for (buffer_i = 0; buffer_i < 128; buffer_i++) - buffer[buffer_i] = 0; + memset(buffer, 0, 128); line_buffer = fgets(line_buffer, 128, file); if (line_buffer == NULL) /* reached end of file */ @@ -1771,9 +1785,9 @@ H5FD__s3comms_load_aws_creds_from_file(FILE *file, const char *profile_name, cha /* extract credentials from lines */ do { - /* clear buffer */ - for (buffer_i = 0; buffer_i < 128; buffer_i++) - buffer[buffer_i] = 0; + /* clear buffer and flag */ + memset(buffer, 0, 128); + found_setting = 0; /* collect a line from file */ line_buffer = fgets(line_buffer, 128, file); @@ -1812,10 +1826,11 @@ H5FD__s3comms_load_aws_creds_from_file(FILE *file, const char *profile_name, cha strncpy(setting_pointers[setting_i], (const char *)line_buffer, strlen(line_buffer)); /* "trim" tailing whitespace by replacing with null terminator*/ - buffer_i = 0; - while (!isspace(setting_pointers[setting_i][buffer_i])) - buffer_i++; - setting_pointers[setting_i][buffer_i] = '\0'; + end = strlen(line_buffer) - 1; + while (end > 0 && isspace((int)setting_pointers[setting_i][end])) { + setting_pointers[setting_i][end] = '\0'; + end--; + } break; /* have read setting; don't compare with others */ } /* end if possible name match */ @@ -2173,7 +2188,7 @@ H5FD_s3comms_signing_key(unsigned char *md, const char *secret, const char *regi HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "`iso8601now` cannot be NULL."); AWS4_secret_len = 4 + strlen(secret) + 1; - AWS4_secret = (char *)H5MM_malloc(sizeof(char *) * AWS4_secret_len); + AWS4_secret = (char *)H5MM_malloc(AWS4_secret_len); if (AWS4_secret == NULL) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Could not allocate space."); @@ -2188,10 +2203,13 @@ H5FD_s3comms_signing_key(unsigned char *md, const char *secret, const char *regi HMAC(EVP_sha256(), (const unsigned char *)AWS4_secret, (int)strlen(AWS4_secret), (const unsigned char *)iso8601now, 8, /* 8 --> length of 8 --> "yyyyMMDD" */ datekey, NULL); + HMAC(EVP_sha256(), (const unsigned char *)datekey, SHA256_DIGEST_LENGTH, (const unsigned char *)region, strlen(region), dateregionkey, NULL); + HMAC(EVP_sha256(), (const unsigned char *)dateregionkey, SHA256_DIGEST_LENGTH, (const unsigned char *)"s3", 2, dateregionservicekey, NULL); + HMAC(EVP_sha256(), (const unsigned char *)dateregionservicekey, SHA256_DIGEST_LENGTH, (const unsigned char *)"aws4_request", 12, md, NULL); diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c index 0a623e9cb36..99ff8df6cfe 100644 --- a/src/H5FDsec2.c +++ b/src/H5FDsec2.c @@ -700,15 +700,16 @@ H5FD__sec2_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU int myerrno = errno; time_t mytime = time(NULL); +#ifndef H5_HAVE_PREADWRITE offset = HDlseek(file->fd, 0, SEEK_CUR); +#endif HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file read failed: time = %s, filename = '%s', file descriptor = %d, errno = %d, " - "error message = '%s', buf = %p, total read size = %llu, bytes this sub-read = %llu, " - "bytes actually read = %llu, offset = %llu", - ctime(&mytime), file->filename, file->fd, myerrno, strerror(myerrno), buf, - (unsigned long long)size, (unsigned long long)bytes_in, - (unsigned long long)bytes_read, (unsigned long long)offset); + "error message = '%s', buf = %p, total read size = %zu, bytes this sub-read = %llu, " + "offset = %llu", + ctime(&mytime), file->filename, file->fd, myerrno, strerror(myerrno), buf, size, + (unsigned long long)bytes_in, (unsigned long long)offset); } /* end if */ if (0 == bytes_read) { @@ -810,15 +811,16 @@ H5FD__sec2_write(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UN int myerrno = errno; time_t mytime = time(NULL); +#ifndef H5_HAVE_PREADWRITE offset = HDlseek(file->fd, 0, SEEK_CUR); +#endif HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed: time = %s, filename = '%s', file descriptor = %d, errno = %d, " - "error message = '%s', buf = %p, total write size = %llu, bytes this sub-write = " - "%llu, bytes actually written = %llu, offset = %llu", - ctime(&mytime), file->filename, file->fd, myerrno, strerror(myerrno), buf, - (unsigned long long)size, (unsigned long long)bytes_in, - (unsigned long long)bytes_wrote, (unsigned long long)offset); + "error message = '%s', buf = %p, total write size = %zu, bytes this sub-write = " + "%llu, offset = %llu", + ctime(&mytime), file->filename, file->fd, myerrno, strerror(myerrno), buf, size, + (unsigned long long)bytes_in, (unsigned long long)offset); } /* end if */ assert(bytes_wrote > 0); diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c index affcfcbe94a..796654254ad 100644 --- a/src/H5FDsubfiling/H5FDsubfiling.c +++ b/src/H5FDsubfiling/H5FDsubfiling.c @@ -196,14 +196,14 @@ static herr_t H5FD__subfiling_mirror_writes_to_stub(H5FD_subfiling_t *file, uint const void *bufs[]); static herr_t H5FD__subfiling_generate_io_vectors(subfiling_context_t *sf_context, size_t in_count, H5FD_mem_t types[], haddr_t file_offsets[], - size_t nelemts[], H5_flexible_const_ptr_t bufs[], - H5FD_subfiling_io_type_t io_type, size_t *ioreq_count, - uint32_t *iovec_len, H5FD_mem_t **io_types, - haddr_t **io_addrs, size_t **io_sizes, - H5_flexible_const_ptr_t **io_bufs); -static void H5FD__subfiling_get_iovec_sizes(subfiling_context_t *sf_context, size_t in_count, - haddr_t file_offsets[], size_t nelemts[], size_t *max_iovec_depth, - size_t *max_num_subfiles); + size_t io_sizes[], H5_flexible_const_ptr_t bufs[], + H5FD_subfiling_io_type_t io_type, size_t *ioreq_count_out, + uint32_t *iovec_len_out, H5FD_mem_t **io_types_out, + haddr_t **io_addrs_out, size_t **io_sizes_out, + H5_flexible_const_ptr_t **io_bufs_out); +static herr_t H5FD__subfiling_get_iovec_sizes(subfiling_context_t *sf_context, size_t in_count, + haddr_t file_offsets[], size_t io_sizes[], + size_t *max_iovec_depth, size_t *max_num_subfiles); static herr_t H5FD__subfiling_translate_io_req_to_iovec( subfiling_context_t *sf_context, size_t iovec_idx, size_t iovec_len, size_t iovec_count, H5FD_mem_t type, haddr_t addr, size_t io_size, H5_flexible_const_ptr_t io_buf, H5FD_subfiling_io_type_t io_type, @@ -2023,7 +2023,7 @@ H5FD__subfiling_io_helper(H5FD_subfiling_t *file, size_t io_count, H5FD_mem_t ty io_count, /* IN: Number of entries in `types`, `addrs`, `sizes` and `bufs` */ types, /* IN: Array of memory types */ addrs, /* IN: Array of starting file offsets */ - sizes, /* IN: Array of I/O sizes (in terms of elements) */ + sizes, /* IN: Array of I/O sizes */ bufs, /* IN: Array of I/O buffers */ io_type, /* IN: Type of I/O being performed (IO_TYPE_WRITE or IO_TYPE_READ) */ &ioreq_count, /* OUT: Number of I/O requests to be made */ @@ -2335,30 +2335,30 @@ H5FD__subfiling_mirror_writes_to_stub(H5FD_subfiling_t *file, uint32_t count, H5 * - the type of I/O being performed (IO_TYPE_WRITE or * IO_TYPE_READ) * - * ioreq_count (OUT) + * ioreq_count_out (OUT) * - the number of I/O requests needed to fully satisfy the * I/O operation * - * iovec_len (OUT) + * iovec_len_out (OUT) * - the size of each I/O vector (in terms of array elements) * for each I/O request to be made * - * io_types (OUT) + * io_types_out (OUT) * - I/O vector of memory types for the I/O operation. * Allocated by this function and must be freed by the * caller. * - * io_addrs (OUT) + * io_addrs_out (OUT) * - I/O vector of file addresses for the I/O operation. * Allocated by this function and must be freed by the * caller. * - * io_sizes (OUT) + * io_sizes_out (OUT) * - I/O vector of the I/O sizes for the I/O operation. * Allocated by this function and must be freed by the * caller. * - * io_bufs (OUT) + * io_bufs_out (OUT) * - I/O vector of the I/O buffers for the I/O operation. * Allocated by this function and must be freed by the * caller. @@ -2368,10 +2368,11 @@ H5FD__subfiling_mirror_writes_to_stub(H5FD_subfiling_t *file, uint32_t count, H5 */ static herr_t H5FD__subfiling_generate_io_vectors(subfiling_context_t *sf_context, size_t in_count, H5FD_mem_t types[], - haddr_t file_offsets[], size_t nelemts[], H5_flexible_const_ptr_t bufs[], - H5FD_subfiling_io_type_t io_type, size_t *ioreq_count, - uint32_t *iovec_len, H5FD_mem_t **io_types, haddr_t **io_addrs, - size_t **io_sizes, H5_flexible_const_ptr_t **io_bufs) + haddr_t file_offsets[], size_t io_sizes[], H5_flexible_const_ptr_t bufs[], + H5FD_subfiling_io_type_t io_type, size_t *ioreq_count_out, + uint32_t *iovec_len_out, H5FD_mem_t **io_types_out, + haddr_t **io_addrs_out, size_t **io_sizes_out, + H5_flexible_const_ptr_t **io_bufs_out) { H5_flexible_const_ptr_t *loc_io_bufs = NULL; H5FD_mem_t *loc_io_types = NULL; @@ -2395,18 +2396,18 @@ H5FD__subfiling_generate_io_vectors(subfiling_context_t *sf_context, size_t in_c assert(sf_context->topology); assert(types || in_count == 0); assert(file_offsets || in_count == 0); - assert(nelemts || in_count == 0); + assert(io_sizes || in_count == 0); assert(bufs || in_count == 0); - assert(ioreq_count); - assert(iovec_len); - assert(io_types); - assert(io_addrs); - assert(io_sizes); - assert(io_bufs); + assert(ioreq_count_out); + assert(iovec_len_out); + assert(io_types_out); + assert(io_addrs_out); + assert(io_sizes_out); + assert(io_bufs_out); /* Set some returned values early */ - *ioreq_count = 0; - *iovec_len = 0; + *ioreq_count_out = 0; + *iovec_len_out = 0; /* Nothing to do */ if (in_count == 0) @@ -2416,11 +2417,16 @@ H5FD__subfiling_generate_io_vectors(subfiling_context_t *sf_context, size_t in_c * Do some initial pre-processing to determine how large of I/O vectors we * will need to allocate to satisfy the entire I/O request */ - H5FD__subfiling_get_iovec_sizes(sf_context, in_count, file_offsets, nelemts, &max_iovec_depth, - &max_num_subfiles_touched); + if (H5FD__subfiling_get_iovec_sizes(sf_context, in_count, file_offsets, io_sizes, &max_iovec_depth, + &max_num_subfiles_touched) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't determine maximum I/O request size"); tot_iovec_len = in_count * max_iovec_depth * max_num_subfiles_touched; + /* Nothing to do */ + if (tot_iovec_len == 0) + HGOTO_DONE(SUCCEED); + #ifdef H5_SUBFILING_DEBUG H5FD__subfiling_log( sf_context->sf_context_id, @@ -2456,10 +2462,10 @@ H5FD__subfiling_generate_io_vectors(subfiling_context_t *sf_context, size_t in_c } if (!extend_sizes) { - if (io_idx > 0 && nelemts[io_idx] == 0) + if (io_idx > 0 && io_sizes[io_idx] == 0) extend_sizes = true; else - io_size = nelemts[io_idx]; + io_size = io_sizes[io_idx]; } if (H5FD__subfiling_translate_io_req_to_iovec(sf_context, iovec_idx, max_num_subfiles_touched, @@ -2469,13 +2475,13 @@ H5FD__subfiling_generate_io_vectors(subfiling_context_t *sf_context, size_t in_c HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't translate I/O request to I/O vectors"); } - *ioreq_count = in_count * max_iovec_depth; + *ioreq_count_out = in_count * max_iovec_depth; H5_CHECK_OVERFLOW(max_num_subfiles_touched, size_t, uint32_t); - *iovec_len = (uint32_t)max_num_subfiles_touched; - *io_types = loc_io_types; - *io_addrs = loc_io_addrs; - *io_sizes = loc_io_sizes; - *io_bufs = loc_io_bufs; + *iovec_len_out = (uint32_t)max_num_subfiles_touched; + *io_types_out = loc_io_types; + *io_addrs_out = loc_io_addrs; + *io_sizes_out = loc_io_sizes; + *io_bufs_out = loc_io_bufs; done: if (ret_value < 0) { @@ -2497,26 +2503,28 @@ H5FD__subfiling_generate_io_vectors(subfiling_context_t *sf_context, size_t in_c * info is used to calculate the total size of I/O vectors we * need to allocate to satisfy an entire I/O request. * - * Return: Maximum I/O vector depth and maximum number of subfiles - * touched (can't fail) + * Return: Non-negative on success/negative on failure * *------------------------------------------------------------------------- */ -static void +static herr_t H5FD__subfiling_get_iovec_sizes(subfiling_context_t *sf_context, size_t in_count, haddr_t file_offsets[], - size_t nelemts[], size_t *max_iovec_depth, size_t *max_num_subfiles) + size_t io_sizes[], size_t *max_iovec_depth, size_t *max_num_subfiles) { int64_t stripe_size = 0; int64_t block_size = 0; size_t loc_max_iovec_depth = 0; size_t loc_max_num_subfiles = 0; + size_t io_size = 0; + bool extend_sizes = false; int num_subfiles = 0; + herr_t ret_value = SUCCEED; - FUNC_ENTER_PACKAGE_NOERR + FUNC_ENTER_PACKAGE assert(sf_context); assert(file_offsets); - assert(nelemts); + assert(io_sizes); assert(max_iovec_depth); assert(max_num_subfiles); @@ -2538,7 +2546,23 @@ H5FD__subfiling_get_iovec_sizes(subfiling_context_t *sf_context, size_t in_count size_t cur_iovec_depth; H5_CHECKED_ASSIGN(cur_file_offset, int64_t, file_offsets[io_idx], haddr_t); - H5_CHECKED_ASSIGN(data_size, int64_t, nelemts[io_idx], size_t); + + if (!extend_sizes) { + if (io_idx > 0 && io_sizes[io_idx] == 0) + extend_sizes = true; + else + io_size = io_sizes[io_idx]; + } + + H5_CHECKED_ASSIGN(data_size, int64_t, io_size, size_t); + + if (cur_file_offset < 0) + HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, + "file offset of %" PRIuHADDR " at index %zu too large; wrapped around", + file_offsets[io_idx], io_idx); + if (data_size < 0) + HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "I/O size of %zu at index %zu too large; wrapped around", + io_size, io_idx); /* * Calculate the following from the starting file offset: @@ -2645,7 +2669,8 @@ H5FD__subfiling_get_iovec_sizes(subfiling_context_t *sf_context, size_t in_count *max_iovec_depth = loc_max_iovec_depth; *max_num_subfiles = loc_max_num_subfiles; - FUNC_LEAVE_NOAPI_VOID +done: + FUNC_LEAVE_NOAPI(ret_value) } /*------------------------------------------------------------------------- diff --git a/src/H5Fdeprec.c b/src/H5Fdeprec.c index 06141de767b..c7552840bc1 100644 --- a/src/H5Fdeprec.c +++ b/src/H5Fdeprec.c @@ -214,7 +214,7 @@ H5Fset_latest_format(hid_t file_id, hbool_t latest_format) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(file_id, H5I_FILE))) + if (NULL == (vol_obj = H5VL_vol_object_verify(file_id, H5I_FILE))) HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "not a file ID"); /* Set up collective metadata if appropriate */ diff --git a/src/H5Fint.c b/src/H5Fint.c index e9817b13048..f653e0b71f0 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -1786,7 +1786,7 @@ H5F__check_if_using_file_locks(H5P_genplist_t *fapl, bool *use_file_locking, boo * s: the open succeeds with flags combination from both the first and second opens * * NOTE: If the 'try' flag is true, not opening the file with the - * "non-tentative" VFD 'open' call is not treated an error; SUCCEED is + * "non-tentative" VFD 'open' call is not treated as an error; SUCCEED is * returned, with the file ptr set to NULL. If 'try' is false, failing * the "non-tentative" VFD 'open' call generates an error. * @@ -1985,7 +1985,7 @@ H5F_open(bool try, H5F_t **_file, const char *name, unsigned flags, hid_t fcpl_i if ((ci_load || ci_write) && (flags & (H5F_ACC_SWMR_READ | H5F_ACC_SWMR_WRITE))) HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, FAIL, "can't have both SWMR and cache image"); - /* Retain the name the file was opened with */ + /* Retain the original filename. */ file->open_name = H5MM_xstrdup(name); /* Short cuts */ diff --git a/src/H5Fmodule.h b/src/H5Fmodule.h index 5cb4a05dd7f..c9f1b31ceac 100644 --- a/src/H5Fmodule.h +++ b/src/H5Fmodule.h @@ -43,7 +43,7 @@ * \li The use of low-level file drivers * * This chapter assumes an understanding of the material presented in the data model chapter. For - * more information, @see @ref sec_data_model. + * more information, see \ref sec_data_model. * * \subsection subsec_file_access_modes File Access Modes * There are two issues regarding file access: @@ -101,7 +101,7 @@ * a user-definable data block; the size of data address parameters; properties of the B-trees that are * used to manage the data in the file; and certain HDF5 Library versioning information. * - * For more information, @see @ref subsubsec_file_property_lists_props. + * For more information, see \ref subsubsec_file_property_lists_props. * * This section has a more detailed discussion of file creation properties. If you have no special * requirements for these file characteristics, you can simply specify #H5P_DEFAULT for the default @@ -112,7 +112,7 @@ * settings, and parallel I/O. Data alignment, metadata block and cache sizes, and data sieve buffer * size are factors in improving I/O performance. * - * For more information, @see @ref subsubsec_file_property_lists_access. + * For more information, see \ref subsubsec_file_property_lists_access. * * This section has a more detailed discussion of file access properties. If you have no special * requirements for these file access characteristics, you can simply specify #H5P_DEFAULT for the @@ -466,8 +466,9 @@ * remain valid. Each of these file identifiers must be released by calling #H5Fclose when it is no * longer needed. * - * For more information, @see @ref subsubsec_file_property_lists_access. - * For more information, @see @ref subsec_file_property_lists. + * For more information, see \ref subsubsec_file_property_lists_access. + * + * For more information, see \ref subsec_file_property_lists. * * \subsection subsec_file_closes Closing an HDF5 File * #H5Fclose both closes a file and releases the file identifier returned by #H5Fopen or #H5Fcreate. @@ -512,7 +513,7 @@ * information for every property list function is provided in the \ref H5P * section of the HDF5 Reference Manual. * - * For more information, @see @ref sec_plist. + * For more information, @see \ref sec_plist. * * \subsubsection subsubsec_file_property_lists_create Creating a Property List * If you do not wish to rely on the default file creation and access properties, you must first create @@ -594,7 +595,7 @@ * \subsubsection subsubsec_file_property_lists_access File Access Properties * This section discusses file access properties that are not related to the low-level file drivers. File * drivers are discussed separately later in this chapter. - * For more information, @see @ref subsec_file_alternate_drivers. + * For more information, @see \ref subsec_file_alternate_drivers. * * File access property lists control various aspects of file I/O and structure. * @@ -657,7 +658,7 @@ * * HDF5 employs an extremely flexible mechanism called the virtual file layer, or VFL, for file * I/O. A full understanding of the VFL is only necessary if you plan to write your own drivers - * @see \ref VFL in the HDF5 Technical Notes. + * see \ref VFL in the HDF5 Technical Notes. * * For our * purposes here, it is sufficient to know that the low-level drivers used for file I/O reside in the @@ -690,7 +691,7 @@ * * If an application requires a special-purpose low-level driver, the VFL provides a public API for * creating one. For more information on how to create a driver, - * @see @ref VFL in the HDF5 Technical Notes. + * see \ref VFL in the HDF5 Technical Notes. * * \subsubsection subsubsec_file_alternate_drivers_id Identifying the Previously‐used File Driver * When creating a new HDF5 file, no history exists, so the file driver must be specified if it is to be @@ -888,11 +889,11 @@ * * Additional parameters may be added to these functions in the future. * - * @see + * see * HDF5 File Image Operations * section for information on more advanced usage of the Memory file driver, and - * @see + * see * Modified Region Writes * section for information on how to set write operations so that only modified regions are written * to storage. @@ -1070,7 +1071,7 @@ * name is FILE. If the function does not find an existing file, it will create one. If it does find an * existing file, it will empty the file in preparation for a new set of data. The identifier for the * "new" file will be passed back to the application program. - * For more information, @see @ref subsec_file_access_modes. + * For more information, @see \ref subsec_file_access_modes. * * Creating a file with default creation and access properties * \code @@ -1182,7 +1183,7 @@ * Note: In the code example above, loc_id is the file identifier for File1, /B is the link path to the * group where File2 is mounted, child_id is the file identifier for File2, and plist_id is a property * list identifier. - * For more information, @see @ref sec_group. + * For more information, @see \ref sec_group. * * See the entries for #H5Fmount, #H5Funmount, and #H5Lcreate_external in the HDF5 Reference Manual. * diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index c9c23fd9aa9..1e5a84cdb09 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -874,7 +874,7 @@ H5_DLL herr_t H5Fget_vfd_handle(hid_t file_id, hid_t fapl, void **file_handle); * * \brief Mounts an HDF5 file * - * \loc_id{loc} + * \fg_loc_id{loc_id} * \param[in] name Name of the group onto which the file specified by \p child * is to be mounted * \file_id{child} @@ -898,13 +898,13 @@ H5_DLL herr_t H5Fget_vfd_handle(hid_t file_id, hid_t fapl, void **file_handle); * \since 1.0.0 * */ -H5_DLL herr_t H5Fmount(hid_t loc, const char *name, hid_t child, hid_t plist); +H5_DLL herr_t H5Fmount(hid_t loc_id, const char *name, hid_t child, hid_t plist); /** * \ingroup H5F * - * \brief Unounts an HDF5 file + * \brief Un-mounts an HDF5 file * - * \loc_id{loc} + * \fg_loc_id{loc_id} * \param[in] name Name of the mount point * * \return \herr_t @@ -922,7 +922,7 @@ H5_DLL herr_t H5Fmount(hid_t loc, const char *name, hid_t child, hid_t plist); * \since 1.0.0 * */ -H5_DLL herr_t H5Funmount(hid_t loc, const char *name); +H5_DLL herr_t H5Funmount(hid_t loc_id, const char *name); /** * \ingroup H5F * @@ -1113,7 +1113,7 @@ H5_DLL herr_t H5Fset_mdc_config(hid_t file_id, const H5AC_cache_config_t *config * \return \herr_t * * \details H5Fget_mdc_hit_rate() queries the metadata cache of the target file to obtain its hit rate - * \Code{(cache hits / (cache hits + cache misses))} since the last time hit rate statistics + * \TText{(cache hits / (cache hits + cache misses))} since the last time hit rate statistics * were reset. If the cache has not been accessed since the last time the hit rate stats were * reset, the hit rate is defined to be 0.0. * @@ -1293,22 +1293,22 @@ H5_DLL herr_t H5Fget_info2(hid_t obj_id, H5F_info2_t *file_info); * library and logarithmic base 10. * * If read retries are incurred for a metadata entry \c i, the library will - * allocate memory for \Code{retries[i] (nbins * sizeof(uint32_t)} and store + * allocate memory for \TText{retries[i] (nbins * sizeof(uint32_t)} and store * the collection of retries there. If there are no retries for a metadata entry - * \c i, \Code{retries[i]} will be NULL. After a call to this routine, users should - * free each \Code{retries[i]} that is non-NULL, otherwise resource leak will occur. + * \c i, \TText{retries[i]} will be NULL. After a call to this routine, users should + * free each \TText{retries[i]} that is non-NULL, otherwise resource leak will occur. * * For the library default read attempts of 100 for SWMR access, nbins will be 2 * as depicted below: - * \li \Code{retries[i][0]} is the number of 1 to 9 read retries. - * \li \Code{retries[i][1]} is the number of 10 to 99 read retries. + * \li \TText{retries[i][0]} is the number of 1 to 9 read retries. + * \li \TText{retries[i][1]} is the number of 10 to 99 read retries. * For the library default read attempts of 1 for non-SWMR access, \c nbins will - * be 0 and each \Code{retries[i]} will be NULL. + * be 0 and each \TText{retries[i]} will be NULL. * - * The following table lists the 21 metadata entries of \Code{retries[]}: + * The following table lists the 21 metadata entries of \TText{retries[]}: * * - * + * * * * @@ -1784,20 +1784,20 @@ H5_DLL herr_t H5Fset_dset_no_attrs_hint(hid_t file_id, hbool_t minimize); * pass the same values for \p file_id and \p flag. * * This function is available only when the HDF5 library is configured with parallel support - * (\Code{--enable-parallel | HDF5_ENABLE_PARALLEL}). It is useful only when used with the #H5FD_MPIO driver + * (\TText{--enable-parallel | HDF5_ENABLE_PARALLEL}). It is useful only when used with the #H5FD_MPIO driver * (see H5Pset_fapl_mpio()). * \endparblock * * \attention * \parblock - * H5Fset_mpi_atomicity() calls \Code{MPI_File_set_atomicity} underneath and is not supported - * if the execution platform does not support \Code{MPI_File_set_atomicity}. When it is + * H5Fset_mpi_atomicity() calls \TText{MPI_File_set_atomicity} underneath and is not supported + * if the execution platform does not support \TText{MPI_File_set_atomicity}. When it is * supported and used, the performance of data access operations may drop significantly. * - * In certain scenarios, even when \Code{MPI_File_set_atomicity} is supported, setting + * In certain scenarios, even when \TText{MPI_File_set_atomicity} is supported, setting * atomicity with H5Fset_mpi_atomicity() and \p flag set to 1 does not always yield * strictly atomic updates. For example, some H5Dwrite() calls translate to multiple - * \Code{MPI_File_write_at} calls. This happens in all cases where the high-level file + * \TText{MPI_File_write_at} calls. This happens in all cases where the high-level file * access routine translates to multiple lower level file access routines. * The following scenarios will raise this issue: * \li Non-contiguous file access using independent I/O diff --git a/src/H5G.c b/src/H5G.c index ede81551b9c..88d617afc3d 100644 --- a/src/H5G.c +++ b/src/H5G.c @@ -340,7 +340,7 @@ H5Gcreate_anon(hid_t loc_id, hid_t gcpl_id, hid_t gapl_id) loc_params.obj_type = H5I_get_type(loc_id); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); /* Create the group */ @@ -507,7 +507,7 @@ H5Gget_create_plist(hid_t group_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(group_id, H5I_GROUP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(group_id, H5I_GROUP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "not a group ID"); /* Set up VOL callback arguments */ @@ -960,7 +960,7 @@ H5Gflush(hid_t group_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(group_id, H5I_GROUP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(group_id, H5I_GROUP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a group ID"); /* Set up collective metadata if appropriate */ @@ -998,7 +998,7 @@ H5Grefresh(hid_t group_id) FUNC_ENTER_API(FAIL) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(group_id, H5I_GROUP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(group_id, H5I_GROUP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a group ID"); /* Set up collective metadata if appropriate */ diff --git a/src/H5Gdeprec.c b/src/H5Gdeprec.c index 433748e4389..5f9ad63756e 100644 --- a/src/H5Gdeprec.c +++ b/src/H5Gdeprec.c @@ -204,7 +204,7 @@ H5Gcreate1(hid_t loc_id, const char *name, size_t size_hint) loc_params.obj_type = H5I_get_type(loc_id); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); /* Create the group */ @@ -262,7 +262,7 @@ H5Gopen1(hid_t loc_id, const char *name) loc_params.obj_type = H5I_get_type(loc_id); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); /* Open the group */ @@ -320,7 +320,7 @@ H5Glink(hid_t cur_loc_id, H5G_link_t type, const char *cur_name, const char *new new_loc_params.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(cur_loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(cur_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Construct a temporary VOL object */ @@ -351,7 +351,7 @@ H5Glink(hid_t cur_loc_id, H5G_link_t type, const char *cur_name, const char *new loc_params.obj_type = H5I_get_type(cur_loc_id); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(cur_loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(cur_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -409,9 +409,9 @@ H5Glink2(hid_t cur_loc_id, const char *cur_name, H5G_link_t type, hid_t new_loc_ new_loc_params.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; /* Get the location objects */ - if (NULL == (vol_obj1 = (H5VL_object_t *)H5I_object(cur_loc_id))) + if (NULL == (vol_obj1 = H5VL_vol_object(cur_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); - if (NULL == (vol_obj2 = (H5VL_object_t *)H5I_object(new_loc_id))) + if (NULL == (vol_obj2 = H5VL_vol_object(new_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -443,7 +443,7 @@ H5Glink2(hid_t cur_loc_id, const char *cur_name, H5G_link_t type, hid_t new_loc_ loc_params.obj_type = H5I_get_type(new_loc_id); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(new_loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(new_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -493,7 +493,7 @@ H5Gmove(hid_t src_loc_id, const char *src_name, const char *dst_name) loc_params2.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(src_loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(src_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Move the link */ @@ -519,10 +519,35 @@ H5Gmove2(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *d H5VL_loc_params_t loc_params1; H5VL_object_t *vol_obj2 = NULL; /* Object of dst_id */ H5VL_loc_params_t loc_params2; + H5I_type_t src_id_type = H5I_BADID, dst_id_type = H5I_BADID; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) + /* Check arguments */ + if (!src_name || !*src_name) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no current name specified"); + if (!dst_name || !*dst_name) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no destination name specified"); + + /* src and dst location IDs cannot both have the value of H5L_SAME_LOC */ + if (src_loc_id == H5L_SAME_LOC && dst_loc_id == H5L_SAME_LOC) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "current and destination should not both be H5L_SAME_LOC"); + + /* reset an ID in the case of H5L_SAME_LOC */ + if (src_loc_id == H5L_SAME_LOC) + src_loc_id = dst_loc_id; + else if (dst_loc_id == H5L_SAME_LOC) + dst_loc_id = src_loc_id; + + src_id_type = H5I_get_type(src_loc_id); + if (!(H5I_GROUP == src_id_type || H5I_FILE == src_id_type)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid group (or file) ID, src_loc_id"); + + dst_id_type = H5I_get_type(dst_loc_id); + if (!(H5I_GROUP == dst_id_type || H5I_FILE == dst_id_type)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid group (or file) ID, dst_loc_id"); + /* Set up collective metadata if appropriate */ if (H5CX_set_loc(dst_loc_id) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTSET, FAIL, "can't set collective metadata read info"); @@ -531,22 +556,20 @@ H5Gmove2(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *d loc_params1.type = H5VL_OBJECT_BY_NAME; loc_params1.loc_data.loc_by_name.name = src_name; loc_params1.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; - loc_params1.obj_type = H5I_get_type(src_loc_id); + loc_params1.obj_type = src_id_type; /* Set location parameter for destination object */ loc_params2.type = H5VL_OBJECT_BY_NAME; loc_params2.loc_data.loc_by_name.name = dst_name; loc_params2.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; - loc_params2.obj_type = H5I_get_type(dst_loc_id); + loc_params2.obj_type = dst_id_type; - if (H5L_SAME_LOC != src_loc_id) - /* get the location object */ - if (NULL == (vol_obj1 = (H5VL_object_t *)H5I_object(src_loc_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); - if (H5L_SAME_LOC != dst_loc_id) - /* get the location object */ - if (NULL == (vol_obj2 = (H5VL_object_t *)H5I_object(dst_loc_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* get the location object */ + if (NULL == (vol_obj1 = H5VL_vol_object(src_loc_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* get the location object */ + if (NULL == (vol_obj2 = H5VL_vol_object(dst_loc_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Move the link */ if (H5VL_link_move(vol_obj1, &loc_params1, vol_obj2, &loc_params2, H5P_LINK_CREATE_DEFAULT, @@ -588,7 +611,7 @@ H5Gunlink(hid_t loc_id, const char *name) loc_params.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -635,7 +658,7 @@ H5Gget_linkval(hid_t loc_id, const char *name, size_t size, char *buf /*out*/) loc_params.loc_data.loc_by_name.lapl_id = H5P_LINK_ACCESS_DEFAULT; /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -719,11 +742,11 @@ H5Gset_comment(hid_t loc_id, const char *name, const char *comment) * * Note: Deprecated in favor of H5Oget_comment/H5Oget_comment_by_name * - * Return: Success: Number of characters in the comment counting - * the null terminator. The value returned may - * be larger than the BUFSIZE argument. + * Return: Success: Number of characters in the comment, excluding the + * NULL terminator character. The value returned may be + * larger than the BUFSIZE argument. * - * Failure: Negative + * Failure: Negative * *------------------------------------------------------------------------- */ @@ -1167,7 +1190,7 @@ H5Gget_objname_by_idx(hid_t loc_id, hsize_t idx, char *name /*out*/, size_t size loc_params.obj_type = H5I_get_type(loc_id); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, (-1), "invalid location identifier"); /* Set up VOL callback arguments */ diff --git a/src/H5Gmodule.h b/src/H5Gmodule.h index a06d44cea75..49fc9ed9472 100644 --- a/src/H5Gmodule.h +++ b/src/H5Gmodule.h @@ -722,7 +722,7 @@ * *

    Mounting a File

    * An external link is a permanent connection between two files. A temporary connection can be set - * up with the #H5Fmount function. For more information, @see sec_file. + * up with the #H5Fmount function. For more information, @see \ref sec_file. * For more information, see the #H5Fmount function in the \ref RM. * * \subsubsection subsubsec_group_program_info Discovering Information about Objects diff --git a/src/H5Gnode.c b/src/H5Gnode.c index cc45f39b1d6..c89417750e2 100644 --- a/src/H5Gnode.c +++ b/src/H5Gnode.c @@ -288,7 +288,7 @@ H5G__node_create(H5F_t *f, H5B_ins_t H5_ATTR_UNUSED op, void *_lt_key, void H5_A assert(H5B_INS_FIRST == op); if (NULL == (sym = H5FL_CALLOC(H5G_node_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); + HGOTO_ERROR(H5E_SYM, H5E_CANTALLOC, FAIL, "memory allocation failed"); sym->node_size = H5G_NODE_SIZE(f); if (HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_BTREE, (hsize_t)sym->node_size))) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to allocate file space"); @@ -309,13 +309,12 @@ H5G__node_create(H5F_t *f, H5B_ins_t H5_ATTR_UNUSED op, void *_lt_key, void H5_A rt_key->offset = 0; done: - if (ret_value < 0) { + if (ret_value < 0) if (sym != NULL) { if (sym->entry != NULL) sym->entry = H5FL_SEQ_FREE(H5G_entry_t, sym->entry); sym = H5FL_FREE(H5G_node_t, sym); } /* end if */ - } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__node_create() */ @@ -472,7 +471,7 @@ H5G__node_found(H5F_t *f, haddr_t addr, const void H5_ATTR_UNUSED *_lt_key, bool * Load the symbol table node for exclusive access. */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to protect symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table node"); /* * Binary search. @@ -504,7 +503,7 @@ H5G__node_found(H5F_t *f, haddr_t addr, const void H5_ATTR_UNUSED *_lt_key, bool done: if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to release symbol table node"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to release symbol table node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__node_found() */ @@ -569,7 +568,7 @@ H5G__node_insert(H5F_t *f, haddr_t addr, void H5_ATTR_UNUSED *_lt_key, bool H5_A * Load the symbol node. */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to protect symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to protect symbol table node"); /* * Where does the new symbol get inserted? We use a binary search. @@ -609,7 +608,7 @@ H5G__node_insert(H5F_t *f, haddr_t addr, void H5_ATTR_UNUSED *_lt_key, bool H5_A HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5B_INS_ERROR, "unable to split symbol table node"); if (NULL == (snrt = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, *new_node_p, f, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to split symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to split symbol table node"); H5MM_memcpy(snrt->entry, sn->entry + H5F_SYM_LEAF_K(f), H5F_SYM_LEAF_K(f) * sizeof(H5G_entry_t)); snrt->nsyms = H5F_SYM_LEAF_K(f); @@ -662,9 +661,9 @@ H5G__node_insert(H5F_t *f, haddr_t addr, void H5_ATTR_UNUSED *_lt_key, bool H5_A done: if (snrt && H5AC_unprotect(f, H5AC_SNODE, *new_node_p, snrt, snrt_flags) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5B_INS_ERROR, "unable to release symbol table node"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to release symbol table node"); if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, sn_flags) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5B_INS_ERROR, "unable to release symbol table node"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to release symbol table node"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__node_insert() */ @@ -718,7 +717,7 @@ H5G__node_remove(H5F_t *f, haddr_t addr, void H5_ATTR_NDEBUG_UNUSED *_lt_key /*i /* Load the symbol table */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to protect symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to protect symbol table node"); /* "Normal" removal of a single entry from the symbol table node */ if (udata->common.name != NULL) { @@ -860,7 +859,7 @@ H5G__node_remove(H5F_t *f, haddr_t addr, void H5_ATTR_NDEBUG_UNUSED *_lt_key /*i tmp_oloc.file = f; /* Reduce the link count for all entries in this node */ - for (idx = 0; idx < sn->nsyms; idx++) { + for (idx = 0; idx < sn->nsyms; idx++) if (!(H5G_CACHED_SLINK == sn->entry[idx].type)) { /* Decrement the reference count */ assert(H5_addr_defined(sn->entry[idx].header)); @@ -870,7 +869,6 @@ H5G__node_remove(H5F_t *f, haddr_t addr, void H5_ATTR_NDEBUG_UNUSED *_lt_key /*i HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, H5B_INS_ERROR, "unable to decrement object link count"); } /* end if */ - } /* end for */ /* * We are about to remove all the symbols in this node. Free this @@ -919,7 +917,7 @@ H5G__node_iterate(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, /* Protect the symbol table node & local heap while we iterate over entries */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load symbol table node"); /* * Iterate over the symbol table node entries. @@ -954,7 +952,7 @@ H5G__node_iterate(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, done: /* Release resources */ if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5_ITER_ERROR, "unable to release object header"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release object header"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__node_iterate() */ @@ -988,13 +986,13 @@ H5G__node_sumup(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, /* Find the object node and add the number of symbol entries. */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load symbol table node"); *num_objs += sn->nsyms; done: if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5_ITER_ERROR, "unable to release object header"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release object header"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__node_sumup() */ @@ -1029,7 +1027,7 @@ H5G__node_by_idx(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, /* Get a pointer to the symbol table node */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load symbol table node"); /* Find the node, locate the object symbol table entry and retrieve the name */ if (udata->idx >= udata->num_objs && udata->idx < (udata->num_objs + sn->nsyms)) { @@ -1051,7 +1049,7 @@ H5G__node_by_idx(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, done: if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5_ITER_ERROR, "unable to release object header"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release object header"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__node_by_idx() */ @@ -1084,14 +1082,14 @@ H5G__node_init(H5F_t *f) /* Allocate & initialize global info for the shared structure */ if (NULL == (shared = H5B_shared_new(f, H5B_SNODE, sizeof_rkey))) - HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info"); + HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "memory allocation failed for shared B-tree info"); /* Set up the "local" information for this file's groups */ /* */ /* Make shared B-tree info reference counted */ if (H5F_SET_GRP_BTREE_SHARED(f, H5UC_create(shared, H5B_shared_free)) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info"); + HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't create ref-count wrapper for shared B-tree info"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -1155,7 +1153,7 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const /* load the symbol table into memory from the source file */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load symbol table node"); /* copy object in this node one by one */ for (i = 0; i < sn->nsyms; i++) { @@ -1223,7 +1221,7 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const /* Copy the shared object from source to destination */ if (H5O_copy_header_map(&tmp_src_oloc, &new_dst_oloc, cpy_info, true, &obj_type, (void **)&cpy_udata) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, H5_ITER_ERROR, "unable to copy object"); + HGOTO_ERROR(H5E_SYM, H5E_CANTCOPY, H5_ITER_ERROR, "unable to copy object"); /* Set up object creation info for symbol table insertion. Only * case so far is for inserting old-style groups (for caching stab @@ -1247,7 +1245,7 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const lnk.type = H5L_TYPE_SOFT; if ((lnk.u.soft.name = (char *)H5HL_offset_into(udata->src_heap, src_ent->cache.slink.lval_offset)) == NULL) - HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, H5_ITER_ERROR, "unable to get link name"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, H5_ITER_ERROR, "unable to get link name"); /* Sanity check soft link name, to detect running off the end of the heap block */ max_link_len = udata->src_block_size - src_ent->cache.slink.lval_offset; @@ -1288,7 +1286,7 @@ H5G__node_copy(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr, const done: if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5_ITER_ERROR, "unable to release object header"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release object header"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__node_copy() */ @@ -1325,7 +1323,7 @@ H5G__node_build_table(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr * because we're about to call an application function. */ if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load symbol table node"); /* Check if the link table needs to be extended */ if ((udata->ltable->nlinks + sn->nsyms) >= udata->alloc_nlinks) { @@ -1335,7 +1333,7 @@ H5G__node_build_table(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr /* Re-allocate the link table */ if (NULL == (x = (H5O_link_t *)H5MM_realloc(udata->ltable->lnks, sizeof(H5O_link_t) * na))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed"); + HGOTO_ERROR(H5E_SYM, H5E_CANTALLOC, H5_ITER_ERROR, "memory allocation failed"); udata->ltable->lnks = x; } /* end if */ @@ -1355,7 +1353,7 @@ H5G__node_build_table(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t addr done: /* Release the locked items */ if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, H5_ITER_ERROR, "unable to release object header"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release object header"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__node_build_table() */ @@ -1400,9 +1398,8 @@ H5G__node_iterate_size(H5F_t *f, const void H5_ATTR_UNUSED *_lt_key, haddr_t H5_ herr_t H5G_node_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, haddr_t heap_addr) { - H5G_node_t *sn = NULL; - H5HL_t *heap = NULL; - unsigned u; /* Local index variable */ + H5G_node_t *sn = NULL; + H5HL_t *heap = NULL; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -1419,22 +1416,17 @@ H5G_node_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, had /* Pin the heap down in memory */ if (heap_addr > 0 && H5_addr_defined(heap_addr)) if (NULL == (heap = H5HL_protect(f, heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); - /* - * If we couldn't load the symbol table node, then try loading the - * B-tree node. - */ - if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) { - H5G_bt_common_t udata; /*data to pass through B-tree */ + /* Try loading symbol table node */ + H5E_PAUSE_ERRORS + { + sn = (H5G_node_t *)H5AC_protect(f, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG); + } + H5E_RESUME_ERRORS + if (sn) { + unsigned u; /* Local index variable */ - H5E_clear_stack(); /* discard that error */ - udata.heap = heap; - udata.block_size = H5HL_heap_get_size(heap); - if (H5B_debug(f, addr, stream, indent, fwidth, H5B_SNODE, &udata) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to debug B-tree node"); - } /* end if */ - else { fprintf(stream, "%*sSymbol Table Node...\n", indent, ""); fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth, "Dirty:", sn->cache_info.is_dirty ? "Yes" : "No"); fprintf(stream, "%*s%-*s %u\n", indent, "", fwidth, @@ -1460,12 +1452,24 @@ H5G_node_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth, had H5G__ent_debug(sn->entry + u, stream, indent, fwidth, heap); } /* end for */ } /* end if */ + /* + * If we couldn't load the symbol table node, then try loading the + * B-tree node. + */ + else { + H5G_bt_common_t udata; /*data to pass through B-tree */ + + udata.heap = heap; + udata.block_size = H5HL_heap_get_size(heap); + if (H5B_debug(f, addr, stream, indent, fwidth, H5B_SNODE, &udata) < 0) + HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, FAIL, "unable to debug B-tree node"); + } /* end else */ done: if (sn && H5AC_unprotect(f, H5AC_SNODE, addr, sn, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to release symbol table node"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to release symbol table node"); if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G_node_debug() */ diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h index 318098b9081..38880e787b7 100644 --- a/src/H5Gpublic.h +++ b/src/H5Gpublic.h @@ -692,9 +692,9 @@ H5_DLL hid_t H5Gopen1(hid_t loc_id, const char *name); * If \p link_type is #H5G_LINK_SOFT, then \p cur_name can be anything * and is interpreted at lookup time relative to the group which * contains the final component of \p new_name. For instance, if \p - * cur_name is \Code{./foo}, \p new_name is \Code{./x/y/bar}, and a - * request is made for \Code{./x/y/bar}, then the actual object looked - * up is \Code{./x/y/./foo}. + * cur_name is \TText{./foo}, \p new_name is \TText{./x/y/bar}, and a + * request is made for \TText{./x/y/bar}, then the actual object looked + * up is \TText{./x/y/./foo}. * \version 1.8.0 Function deprecated in this release. * @@ -730,9 +730,9 @@ H5_DLL herr_t H5Glink(hid_t cur_loc_id, H5G_link_t type, const char *cur_name, c * If \p link_type is #H5G_LINK_SOFT, then \p cur_name can be anything * and is interpreted at lookup time relative to the group which * contains the final component of \p new_name. For instance, if \p - * current_name is \Code{./foo}, \p new_name is \Code{./x/y/bar}, and a - * request is made for \Code{./x/y/bar}, then the actual object looked - * up is \Code{./x/y/./foo}. + * current_name is \TText{./foo}, \p new_name is \TText{./x/y/bar}, and a + * request is made for \TText{./x/y/bar}, then the actual object looked + * up is \TText{./x/y/./foo}. * * \version 1.8.0 Function deprecated in this release. * @@ -895,7 +895,7 @@ H5_DLL herr_t H5Gget_linkval(hid_t loc_id, const char *name, size_t size, char * * * \fgdt_loc_id * \param[in] name Name of the object whose comment is to be set or reset - * name must be \Code{'.'} (dot) if \p loc_id fully specifies + * name must be \TText{'.'} (dot) if \p loc_id fully specifies * the object for which the comment is to be set. * \param[in] comment The new comment * @@ -937,7 +937,7 @@ H5_DLL herr_t H5Gset_comment(hid_t loc_id, const char *name, const char *comment * * \fgdt_loc_id * \param[in] name Name of the object whose comment is to be set or reset - * name must be \Code{'.'} (dot) if \p loc_id fully specifies + * name must be \TText{'.'} (dot) if \p loc_id fully specifies * the object for which the comment is to be set. * \param[in] bufsize Maximum number of comment characters to be returned in \p buf. * \param[in] buf The comment @@ -960,12 +960,7 @@ H5_DLL herr_t H5Gset_comment(hid_t loc_id, const char *name, const char *comment * root group * \li A dot (\c .), if \p loc_id fully specifies the object * - * At most bufsize characters, including a null-terminator, are - * returned in \p buf. The returned value is not null-terminated if the - * comment is longer than the supplied buffer. If the size of the - * comment is unknown, a preliminary \p H5Gget_comment() call will - * return the size of the comment, including space for the - * null-terminator. + * \details_namelen{comment,H5Gget_comment} * * If an object does not have a comment, the empty string is returned * in comment. @@ -1160,7 +1155,7 @@ H5_DLL herr_t H5Gget_objinfo(hid_t loc_id, const char *name, hbool_t follow_link * * If the size of the provided buffer \p name is less or equal the * actual object name length, the object name is truncated to - * \Code{max_size - 1} characters. + * \TText{max_size - 1} characters. * * Note that if the size of the object's name is unknown, a preliminary * call to H5Gget_objname_by_idx() with \p name set to \c NULL will diff --git a/src/H5Gstab.c b/src/H5Gstab.c index 6c7e6db8824..594842830c4 100644 --- a/src/H5Gstab.c +++ b/src/H5Gstab.c @@ -138,7 +138,7 @@ H5G__stab_create_components(H5F_t *f, H5O_stab_t *stab, size_t size_hint) /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(f, stab->heap_addr, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Insert name into the heap */ if (H5HL_insert(f, heap, (size_t)1, "", &name_offset) < 0) @@ -152,7 +152,7 @@ H5G__stab_create_components(H5F_t *f, H5O_stab_t *stab, size_t size_hint) done: /* Release resources */ if (heap && FAIL == H5HL_unprotect(heap)) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__stab_create_components() */ @@ -243,7 +243,7 @@ H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, H5O_link_t *obj_lnk, H5O /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(f, stab->heap_addr, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Initialize data to pass through B-tree */ udata.common.name = obj_lnk->name; @@ -260,7 +260,7 @@ H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, H5O_link_t *obj_lnk, H5O done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__stab_insert_real() */ @@ -288,10 +288,10 @@ H5G__stab_insert(const H5O_loc_t *grp_oloc, H5O_link_t *obj_lnk, H5O_type_t obj_ /* Retrieve symbol table message */ if (NULL == H5O_msg_read(grp_oloc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "not a symbol table"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "not a symbol table"); if (H5G__stab_insert_real(grp_oloc->file, &stab, obj_lnk, obj_type, crt_info) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "unable to insert the link"); + HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, H5_ITER_ERROR, "unable to insert the link"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -321,11 +321,11 @@ H5G__stab_remove(const H5O_loc_t *loc, H5RS_str_t *grp_full_path_r, const char * /* Read in symbol table message */ if (NULL == H5O_msg_read(loc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "not a symbol table"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "not a symbol table"); /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(loc->file, stab.heap_addr, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Initialize data to pass through B-tree */ udata.common.name = name; @@ -335,12 +335,12 @@ H5G__stab_remove(const H5O_loc_t *loc, H5RS_str_t *grp_full_path_r, const char * /* Remove from symbol table */ if (H5B_remove(loc->file, H5B_SNODE, stab.btree_addr, &udata) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to remove entry"); + HGOTO_ERROR(H5E_SYM, H5E_CANTREMOVE, FAIL, "unable to remove entry"); done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__stab_remove() */ @@ -376,11 +376,11 @@ H5G__stab_remove_by_idx(const H5O_loc_t *grp_oloc, H5RS_str_t *grp_full_path_r, /* Read in symbol table message */ if (NULL == H5O_msg_read(grp_oloc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "not a symbol table"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "not a symbol table"); /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(grp_oloc->file, stab.heap_addr, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Initialize data to pass through B-tree */ udata.common.name = obj_lnk.name; @@ -390,12 +390,12 @@ H5G__stab_remove_by_idx(const H5O_loc_t *grp_oloc, H5RS_str_t *grp_full_path_r, /* Remove link from symbol table */ if (H5B_remove(grp_oloc->file, H5B_SNODE, stab.btree_addr, &udata) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to remove entry"); + HGOTO_ERROR(H5E_SYM, H5E_CANTREMOVE, FAIL, "unable to remove entry"); done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); /* Reset the link information, if we have a copy */ if (lnk_copied) @@ -429,7 +429,7 @@ H5G__stab_delete(H5F_t *f, const H5O_stab_t *stab) /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(f, stab->heap_addr, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Set up user data for B-tree deletion */ udata.common.name = NULL; @@ -441,7 +441,7 @@ H5G__stab_delete(H5F_t *f, const H5O_stab_t *stab) /* Release resources */ if (H5HL_unprotect(heap) < 0) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); heap = NULL; /* Delete local heap for names */ @@ -451,7 +451,7 @@ H5G__stab_delete(H5F_t *f, const H5O_stab_t *stab) done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__stab_delete() */ @@ -482,11 +482,11 @@ H5G__stab_iterate(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t skip, hs /* Get the B-tree info */ if (NULL == H5O_msg_read(oloc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to determine local heap address"); /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(oloc->file, stab.heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Check on iteration order */ /* ("native" iteration order is increasing for this link storage mechanism) */ @@ -519,7 +519,7 @@ H5G__stab_iterate(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t skip, hs /* Iterate over the group members */ if (H5B_iterate(oloc->file, H5B_SNODE, stab.btree_addr, H5G__node_build_table, &udata) < 0) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to build link table"); + HGOTO_ERROR(H5E_SYM, H5E_BADITER, FAIL, "unable to build link table"); /* Check for skipping out of bounds */ if (skip > 0 && (size_t)skip >= ltable.nlinks) @@ -537,9 +537,9 @@ H5G__stab_iterate(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t skip, hs done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); if (ltable.lnks && H5G__link_release_table(<able) < 0) - HDONE_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to release link table"); + HDONE_ERROR(H5E_SYM, H5E_CANTRELEASE, FAIL, "unable to release link table"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__stab_iterate() */ @@ -570,11 +570,11 @@ H5G__stab_count(const H5O_loc_t *oloc, hsize_t *num_objs) /* Get the B-tree info */ if (NULL == H5O_msg_read(oloc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to determine local heap address"); /* Iterate over the group members */ if (H5B_iterate(oloc->file, H5B_SNODE, stab.btree_addr, H5G__node_sumup, num_objs) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed"); + HGOTO_ERROR(H5E_SYM, H5E_BADITER, FAIL, "iteration operator failed"); done: FUNC_LEAVE_NOAPI_TAG(ret_value) @@ -608,14 +608,14 @@ H5G__stab_bh_size(H5F_t *f, const H5O_stab_t *stab, H5_ih_info_t *bh_info) /* Get the B-tree & symbol table node size info */ if (H5B_get_info(f, H5B_SNODE, stab->btree_addr, &bt_info, H5G__node_iterate_size, &snode_size) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "iteration operator failed"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "iteration operator failed"); /* Add symbol table & B-tree node sizes to index info */ bh_info->index_size += snode_size + bt_info.size; /* Get the size of the local heap for the group */ if (H5HL_heapsize(f, stab->heap_addr, &(bh_info->heap_size)) < 0) - HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "iteration operator failed"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "iteration operator failed"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -657,7 +657,7 @@ H5G__stab_get_name_by_idx_cb(const H5G_entry_t *ent, void *_udata) HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to get symbol table link name"); if (NULL == (udata->name = H5MM_strndup(name, (block_size - name_off)))) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to duplicate symbol table link name"); + HGOTO_ERROR(H5E_SYM, H5E_CANTCOPY, FAIL, "unable to duplicate symbol table link name"); done: FUNC_LEAVE_NOAPI(ret_value) @@ -692,11 +692,11 @@ H5G__stab_get_name_by_idx(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t /* Get the B-tree & local heap info */ if (NULL == H5O_msg_read(oloc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to determine local heap address"); /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(oloc->file, stab.heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Remap index for decreasing iteration order */ if (order == H5_ITER_DEC) { @@ -704,7 +704,7 @@ H5G__stab_get_name_by_idx(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t /* Iterate over the symbol table nodes, to count the links */ if (H5B_iterate(oloc->file, H5B_SNODE, stab.btree_addr, H5G__node_sumup, &nlinks) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed"); + HGOTO_ERROR(H5E_SYM, H5E_BADITER, FAIL, "iteration operator failed"); /* Map decreasing iteration order index to increasing iteration order index */ n = nlinks - (n + 1); @@ -720,11 +720,11 @@ H5G__stab_get_name_by_idx(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t /* Iterate over the group members */ if (H5B_iterate(oloc->file, H5B_SNODE, stab.btree_addr, H5G__node_by_idx, &udata) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "iteration operator failed"); + HGOTO_ERROR(H5E_SYM, H5E_BADITER, FAIL, "iteration operator failed"); /* If we don't know the name now, we almost certainly went out of bounds */ if (udata.name == NULL) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "index out of bound"); + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "index out of bound"); /* Get the length of the name */ *name_len = strlen(udata.name); @@ -739,7 +739,7 @@ H5G__stab_get_name_by_idx(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); /* Free the duplicated name */ if (udata_valid && udata.name != NULL) @@ -805,11 +805,11 @@ H5G__stab_lookup(const H5O_loc_t *grp_oloc, const char *name, bool *found, H5O_l /* Retrieve the symbol table message for the group */ if (NULL == H5O_msg_read(grp_oloc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "can't read message"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "can't read message"); /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(grp_oloc->file, stab.heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Set up user data to pass to 'find' operation callback */ udata.name = name; @@ -830,7 +830,7 @@ H5G__stab_lookup(const H5O_loc_t *grp_oloc, const char *name, bool *found, H5O_l done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__stab_lookup() */ @@ -892,11 +892,11 @@ H5G__stab_lookup_by_idx(const H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_ /* Get the B-tree & local heap info */ if (NULL == H5O_msg_read(grp_oloc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to determine local heap address"); /* Pin the heap down in memory */ if (NULL == (heap = H5HL_protect(grp_oloc->file, stab.heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap"); + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect symbol table heap"); /* Remap index for decreasing iteration order */ if (order == H5_ITER_DEC) { @@ -904,7 +904,7 @@ H5G__stab_lookup_by_idx(const H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_ /* Iterate over the symbol table nodes, to count the links */ if (H5B_iterate(grp_oloc->file, H5B_SNODE, stab.btree_addr, H5G__node_sumup, &nlinks) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed"); + HGOTO_ERROR(H5E_SYM, H5E_BADITER, FAIL, "iteration operator failed"); /* Map decreasing iteration order index to increasing iteration order index */ n = nlinks - (n + 1); @@ -920,16 +920,16 @@ H5G__stab_lookup_by_idx(const H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_ /* Iterate over the group members */ if (H5B_iterate(grp_oloc->file, H5B_SNODE, stab.btree_addr, H5G__node_by_idx, &udata) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "iteration operator failed"); + HGOTO_ERROR(H5E_SYM, H5E_BADITER, FAIL, "iteration operator failed"); /* If we didn't find the link, we almost certainly went out of bounds */ if (!udata.found) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "index out of bound"); + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "index out of bound"); done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5G__stab_lookup_by_idx() */ @@ -939,8 +939,8 @@ H5G__stab_lookup_by_idx(const H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_ /*------------------------------------------------------------------------- * Function: H5G__stab_valid * - * Purpose: Verify that a group's symbol table message is valid. If - * provided, the addresses in alt_stab will be tried if the + * Purpose: Verify that a group's symbol table message is valid. + * The addresses in alt_stab will be tried if the * addresses in the group's stab message are invalid, and * the stab message will be updated if necessary. * @@ -958,22 +958,32 @@ herr_t H5G__stab_valid(H5O_loc_t *grp_oloc, H5O_stab_t *alt_stab) { H5O_stab_t stab; /* Current symbol table */ - H5HL_t *heap = NULL; /* Pointer to local heap */ - bool changed = false; /* Whether stab has been modified */ + H5HL_t *heap = NULL; /* Pointer to local heap */ + bool changed = false; /* Whether stab has been modified */ + herr_t bt_status; /* B-tree status */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(grp_oloc->addr) + /* Sanity check */ + assert(grp_oloc); + assert(alt_stab); + /* Read the symbol table message */ if (NULL == H5O_msg_read(grp_oloc, H5O_STAB_ID, &stab)) - HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "unable to read symbol table message"); + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "unable to read symbol table message"); /* Check if the symbol table message's b-tree address is valid */ - if (H5B_valid(grp_oloc->file, H5B_SNODE, stab.btree_addr) < 0) { - /* Address is invalid, try the b-tree address in the alternate symbol - * table message */ - if (!alt_stab || H5B_valid(grp_oloc->file, H5B_SNODE, alt_stab->btree_addr) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to locate b-tree"); + H5E_PAUSE_ERRORS + { + bt_status = H5B_valid(grp_oloc->file, H5B_SNODE, stab.btree_addr); + } + H5E_RESUME_ERRORS + + if (bt_status < 0) { + /* Address is invalid, try the b-tree address in the alternate symbol table message */ + if (H5B_valid(grp_oloc->file, H5B_SNODE, alt_stab->btree_addr) < 0) + HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, FAIL, "unable to locate b-tree"); else { /* The alternate symbol table's b-tree address is valid. Adjust the * symbol table message in the group. */ @@ -983,12 +993,16 @@ H5G__stab_valid(H5O_loc_t *grp_oloc, H5O_stab_t *alt_stab) } /* end if */ /* Check if the symbol table message's heap address is valid */ - if (NULL == (heap = H5HL_protect(grp_oloc->file, stab.heap_addr, H5AC__READ_ONLY_FLAG))) { - /* Address is invalid, try the heap address in the alternate symbol - * table message */ - if (!alt_stab || - NULL == (heap = H5HL_protect(grp_oloc->file, alt_stab->heap_addr, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "unable to locate heap"); + H5E_PAUSE_ERRORS + { + heap = H5HL_protect(grp_oloc->file, stab.heap_addr, H5AC__READ_ONLY_FLAG); + } + H5E_RESUME_ERRORS + + if (NULL == heap) { + /* Address is invalid, try the heap address in the alternate symbol table message */ + if (NULL == (heap = H5HL_protect(grp_oloc->file, alt_stab->heap_addr, H5AC__READ_ONLY_FLAG))) + HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to locate heap"); else { /* The alternate symbol table's heap address is valid. Adjust the * symbol table message in the group. */ @@ -998,16 +1012,14 @@ H5G__stab_valid(H5O_loc_t *grp_oloc, H5O_stab_t *alt_stab) } /* end if */ /* Update the symbol table message and clear errors if necessary */ - if (changed) { - H5E_clear_stack(); + if (changed) if (H5O_msg_write(grp_oloc, H5O_STAB_ID, 0, H5O_UPDATE_TIME | H5O_UPDATE_FORCE, &stab) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to correct symbol table message"); - } /* end if */ + HGOTO_ERROR(H5E_SYM, H5E_CANTSET, FAIL, "unable to correct symbol table message"); done: /* Release resources */ if (heap && H5HL_unprotect(heap) < 0) - HDONE_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to unprotect symbol table heap"); + HDONE_ERROR(H5E_SYM, H5E_CANTUNPROTECT, FAIL, "unable to unprotect symbol table heap"); FUNC_LEAVE_NOAPI_TAG(ret_value) } /* end H5G__stab_valid */ diff --git a/src/H5L.c b/src/H5L.c index 091296240b2..3616cb75a59 100644 --- a/src/H5L.c +++ b/src/H5L.c @@ -94,7 +94,8 @@ H5Lmove(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds H5VL_object_t *vol_obj2 = NULL; /* Object of dst_id */ H5VL_loc_params_t loc_params1; H5VL_loc_params_t loc_params2; - H5VL_object_t tmp_vol_obj; /* Temporary object */ + H5VL_object_t tmp_vol_obj; /* Temporary object */ + H5I_type_t src_id_type = H5I_BADID, dst_id_type = H5I_BADID; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -106,6 +107,21 @@ H5Lmove(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no current name specified"); if (!dst_name || !*dst_name) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no destination name specified"); + + /* reset an ID in the case of H5L_SAME_LOC */ + if (src_loc_id == H5L_SAME_LOC) + src_loc_id = dst_loc_id; + else if (dst_loc_id == H5L_SAME_LOC) + dst_loc_id = src_loc_id; + + /* verify that src and dst IDs are either a file or a group ID */ + src_id_type = H5I_get_type(src_loc_id); + if (!(H5I_GROUP == src_id_type || H5I_FILE == src_id_type)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid group (or file) ID, src_loc_id"); + dst_id_type = H5I_get_type(dst_loc_id); + if (!(H5I_GROUP == dst_id_type || H5I_FILE == dst_id_type)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid group (or file) ID, dst_loc_id"); + if (lcpl_id != H5P_DEFAULT && (true != H5P_isa_class(lcpl_id, H5P_LINK_CREATE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a link creation property list"); @@ -117,30 +133,27 @@ H5Lmove(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds H5CX_set_lcpl(lcpl_id); /* Verify access property list and set up collective metadata if appropriate */ - if (H5CX_set_apl(&lapl_id, H5P_CLS_LACC, ((src_loc_id != H5L_SAME_LOC) ? src_loc_id : dst_loc_id), true) < - 0) + if (H5CX_set_apl(&lapl_id, H5P_CLS_LACC, dst_loc_id, true) < 0) HGOTO_ERROR(H5E_LINK, H5E_CANTSET, FAIL, "can't set access property list info"); /* Set location parameter for source object */ loc_params1.type = H5VL_OBJECT_BY_NAME; loc_params1.loc_data.loc_by_name.name = src_name; loc_params1.loc_data.loc_by_name.lapl_id = lapl_id; - loc_params1.obj_type = H5I_get_type(src_loc_id); + loc_params1.obj_type = src_id_type; /* Set location parameter for destination object */ loc_params2.type = H5VL_OBJECT_BY_NAME; loc_params2.loc_data.loc_by_name.name = dst_name; loc_params2.loc_data.loc_by_name.lapl_id = lapl_id; - loc_params2.obj_type = H5I_get_type(dst_loc_id); + loc_params2.obj_type = dst_id_type; - if (H5L_SAME_LOC != src_loc_id) - /* Get the location object */ - if (NULL == (vol_obj1 = (H5VL_object_t *)H5I_object(src_loc_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); - if (H5L_SAME_LOC != dst_loc_id) - /* Get the location object */ - if (NULL == (vol_obj2 = (H5VL_object_t *)H5I_object(dst_loc_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Get the location object */ + if (NULL == (vol_obj1 = H5VL_vol_object(src_loc_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Get the location object */ + if (NULL == (vol_obj2 = H5VL_vol_object(dst_loc_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Make sure that the VOL connectors are the same */ if (vol_obj1 && vol_obj2) { @@ -195,7 +208,8 @@ H5Lcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds H5VL_loc_params_t loc_params1; H5VL_object_t *vol_obj2 = NULL; /* Object of dst_id */ H5VL_loc_params_t loc_params2; - H5VL_object_t tmp_vol_obj; /* Temporary object */ + H5VL_object_t tmp_vol_obj; /* Temporary object */ + H5I_type_t src_id_type = H5I_BADID, dst_id_type = H5I_BADID; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) @@ -210,6 +224,20 @@ H5Lcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds if (lcpl_id != H5P_DEFAULT && (true != H5P_isa_class(lcpl_id, H5P_LINK_CREATE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a link creation property list"); + /* reset an ID in the case of H5L_SAME_LOC */ + if (src_loc_id == H5L_SAME_LOC) + src_loc_id = dst_loc_id; + else if (dst_loc_id == H5L_SAME_LOC) + dst_loc_id = src_loc_id; + + /* verify that src and dst IDs are either a file or a group ID */ + src_id_type = H5I_get_type(src_loc_id); + if (!(H5I_GROUP == src_id_type || H5I_FILE == src_id_type) && src_loc_id != H5L_SAME_LOC) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid group (or file) ID, src_loc_id"); + dst_id_type = H5I_get_type(dst_loc_id); + if (!(H5I_GROUP == dst_id_type || H5I_FILE == dst_id_type) && dst_loc_id != H5L_SAME_LOC) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid group (or file) ID, dst_loc_id"); + /* Check the link create property list */ if (H5P_DEFAULT == lcpl_id) lcpl_id = H5P_LINK_CREATE_DEFAULT; @@ -226,22 +254,20 @@ H5Lcopy(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, const char *ds loc_params1.type = H5VL_OBJECT_BY_NAME; loc_params1.loc_data.loc_by_name.name = src_name; loc_params1.loc_data.loc_by_name.lapl_id = lapl_id; - loc_params1.obj_type = H5I_get_type(src_loc_id); + loc_params1.obj_type = src_id_type; /* Set location parameter for destination object */ loc_params2.type = H5VL_OBJECT_BY_NAME; loc_params2.loc_data.loc_by_name.name = dst_name; loc_params2.loc_data.loc_by_name.lapl_id = lapl_id; - loc_params2.obj_type = H5I_get_type(dst_loc_id); + loc_params2.obj_type = dst_id_type; - if (H5L_SAME_LOC != src_loc_id) - /* Get the location object */ - if (NULL == (vol_obj1 = (H5VL_object_t *)H5I_object(src_loc_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); - if (H5L_SAME_LOC != dst_loc_id) - /* Get the location object */ - if (NULL == (vol_obj2 = (H5VL_object_t *)H5I_object(dst_loc_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Get the location object */ + if (NULL == (vol_obj1 = H5VL_vol_object(src_loc_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); + /* Get the location object */ + if (NULL == (vol_obj2 = H5VL_vol_object(dst_loc_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Make sure that the VOL connectors are the same */ if (vol_obj1 && vol_obj2) { @@ -468,11 +494,11 @@ H5L__create_hard_api_common(hid_t cur_loc_id, const char *cur_name, hid_t link_l if (H5L_SAME_LOC != cur_loc_id) /* Get the current location object */ - if (NULL == (curr_vol_obj = (H5VL_object_t *)H5VL_vol_object(cur_loc_id))) + if (NULL == (curr_vol_obj = H5VL_vol_object(cur_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); if (H5L_SAME_LOC != link_loc_id) /* Get the new location object */ - if (NULL == (link_vol_obj = (H5VL_object_t *)H5VL_vol_object(link_loc_id))) + if (NULL == (link_vol_obj = H5VL_vol_object(link_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Make sure that the VOL connectors are the same */ @@ -675,7 +701,7 @@ H5Lcreate_external(const char *file_name, const char *obj_name, hid_t link_loc_i loc_params.obj_type = H5I_get_type(link_loc_id); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(link_loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(link_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid object identifier"); /* Set up VOL callback arguments */ @@ -753,7 +779,7 @@ H5Lcreate_ud(hid_t link_loc_id, const char *link_name, H5L_type_t link_type, con loc_params.obj_type = H5I_get_type(link_loc_id); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(link_loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(link_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -1043,7 +1069,7 @@ H5Lget_val(hid_t loc_id, const char *name, void *buf /*out*/, size_t size, hid_t loc_params.loc_data.loc_by_name.lapl_id = lapl_id; /* Get the VOL object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -1107,7 +1133,7 @@ H5Lget_val_by_idx(hid_t loc_id, const char *group_name, H5_index_t idx_type, H5_ loc_params.obj_type = H5I_get_type(loc_id); /* Get the VOL object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -1269,7 +1295,7 @@ H5Lget_info2(hid_t loc_id, const char *name, H5L_info2_t *linfo /*out*/, hid_t l loc_params.loc_data.loc_by_name.lapl_id = lapl_id; /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -1328,7 +1354,7 @@ H5Lget_info_by_idx2(hid_t loc_id, const char *group_name, H5_index_t idx_type, H loc_params.obj_type = H5I_get_type(loc_id); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -1514,7 +1540,7 @@ H5Lget_name_by_idx(hid_t loc_id, const char *group_name, H5_index_t idx_type, H5 loc_params.obj_type = H5I_get_type(loc_id); /* Get the VOL object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, (-1), "invalid location identifier"); /* Set up VOL callback arguments */ @@ -1720,7 +1746,7 @@ H5Literate_by_name2(hid_t loc_id, const char *group_name, H5_index_t idx_type, H HGOTO_ERROR(H5E_LINK, H5E_CANTSET, FAIL, "can't set access property list info"); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set location struct fields */ @@ -1799,7 +1825,7 @@ H5Lvisit2(hid_t group_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterat loc_params.obj_type = H5I_get_type(group_id); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(group_id))) + if (NULL == (vol_obj = H5VL_vol_object(group_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set up VOL callback arguments */ @@ -1873,7 +1899,7 @@ H5Lvisit_by_name2(hid_t loc_id, const char *group_name, H5_index_t idx_type, H5_ HGOTO_ERROR(H5E_LINK, H5E_CANTSET, FAIL, "can't set access property list info"); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Set location struct fields */ diff --git a/src/H5Ldeprec.c b/src/H5Ldeprec.c index c5f8470c912..819aea87f10 100644 --- a/src/H5Ldeprec.c +++ b/src/H5Ldeprec.c @@ -162,7 +162,7 @@ H5Literate1(hid_t group_id, H5_index_t idx_type, H5_iter_order_t order, hsize_t HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no operator specified"); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(group_id))) + if (NULL == (vol_obj = H5VL_vol_object(group_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Check if the VOL object is a native VOL connector object */ @@ -248,7 +248,7 @@ H5Literate_by_name1(hid_t loc_id, const char *group_name, H5_index_t idx_type, H HGOTO_ERROR(H5E_LINK, H5E_CANTSET, FAIL, "can't set access property list info"); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Check if the VOL object is a native VOL connector object */ @@ -325,7 +325,7 @@ H5Lget_info1(hid_t loc_id, const char *name, H5L_info1_t *linfo /*out*/, hid_t l loc_params.loc_data.loc_by_name.lapl_id = lapl_id; /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Check if the VOL object is a native VOL connector object */ @@ -416,7 +416,7 @@ H5Lget_info_by_idx1(hid_t loc_id, const char *group_name, H5_index_t idx_type, H loc_params.obj_type = H5I_get_type(loc_id); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Check if the VOL object is a native VOL connector object */ @@ -515,7 +515,7 @@ H5Lvisit1(hid_t group_id, H5_index_t idx_type, H5_iter_order_t order, H5L_iterat loc_params.obj_type = H5I_get_type(group_id); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(group_id))) + if (NULL == (vol_obj = H5VL_vol_object(group_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Check if the VOL object is a native VOL connector object */ @@ -604,7 +604,7 @@ H5Lvisit_by_name1(hid_t loc_id, const char *group_name, H5_index_t idx_type, H5_ HGOTO_ERROR(H5E_LINK, H5E_CANTSET, FAIL, "can't set access property list info"); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Check if the VOL object is a native VOL connector object */ diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h index 2bf3c53b83e..ec325b8ad77 100644 --- a/src/H5Lpublic.h +++ b/src/H5Lpublic.h @@ -640,13 +640,13 @@ H5_DLL herr_t H5Lget_val_by_idx(hid_t loc_id, const char *group_name, H5_index_t * denote a valid link access property list identifier. A call to * H5Lexists() with arguments \c file, \c "/", and \c lapl * returns a positive value; in other words, - * \Code{H5Lexists(file, "/", lapl)} returns a positive value. + * \TText{H5Lexists(file, "/", lapl)} returns a positive value. * In the HDF5 1.8 release, this function returns 0. *
  • Let \c root denote a valid HDF5 group identifier that refers to the * root group of an HDF5 file, and let \c lapl denote a valid link * access property list identifier. A call to H5Lexists() with * arguments c root, \c "/", and \c lapl returns a positive value; - * in other words, \Code{H5Lexists(root, "/", lapl)} returns a positive + * in other words, \TText{H5Lexists(root, "/", lapl)} returns a positive * value. In the HDF5 1.8 release, this function returns 0.
  • * * Note that the function accepts link names and path names. This is @@ -764,7 +764,7 @@ H5_DLL herr_t H5Lget_info2(hid_t loc_id, const char *name, H5L_info2_t *linfo, h * * \return \herr_t * - * \details H5get_info_by_idx2() returns the metadata for a link in a group + * \details H5Lget_info_by_idx2() returns the metadata for a link in a group * according to a specified field or index and a specified order. The * link for which information is to be returned is specified by \p * idx_type, \p order, and \p n as follows: @@ -819,7 +819,7 @@ H5_DLL herr_t H5Lget_info_by_idx2(hid_t loc_id, const char *group_name, H5_index * \return Returns the size of the link name if successful; otherwise returns a * negative value. * - * \details H5get_name_by_idx() retrieves the name of the \Emph{n}-th link in a + * \details H5Lget_name_by_idx() retrieves the name of the \Emph{n}-th link in a * group, according to the specified order, \p order, within a specified * field or index, \p idx_type. * @@ -835,10 +835,7 @@ H5_DLL herr_t H5Lget_info_by_idx2(hid_t loc_id, const char *group_name, H5_index * If \p loc_id specifies the group in which the link resides, * \p group_name can be a dot (\c .). * - * The size in bytes of name is specified in \p size. If \p size is - * unknown, it can be determined via an initial H5Lget_name_by_idx() - * call with name set to NULL; the function's return value will be the - * size of the name. + * \details_namelen{link,H5Lget_name_by_idx} * * \note Please note that in order for the specified index to correspond to the * creation order index, \p order must be set to #H5_ITER_INC or @@ -1578,7 +1575,7 @@ H5_DLL herr_t H5Lget_info1(hid_t loc_id, const char *name, H5L_info1_t *linfo /* * the function H5Lget_info_by_idx2() and the macro * H5Lget_info_by_idx(). * - * \details H5get_info_by_idx1() returns the metadata for a link in a group + * \details H5Lget_info_by_idx1() returns the metadata for a link in a group * according to a specified field or index and a specified order. * * The link for which information is to be returned is specified by \p diff --git a/src/H5M.c b/src/H5M.c index b196f0930b8..bb8b4d9882b 100644 --- a/src/H5M.c +++ b/src/H5M.c @@ -359,8 +359,7 @@ H5Mcreate_async(const char *app_file, const char *app_func, unsigned app_line, h * the in-file datatype for values is defined by VAL_TYPE_ID. * LOC_ID specifies the file to create the map object, but no * link to the object is created. Other options can be - * specified through the property lists LCPL_ID, MCPL_ID, and - * MAPL_ID. + * specified through the property lists MCPL_ID and MAPL_ID. * * The resulting ID should be linked into the file with * H5Olink or it will be deleted when closed. @@ -397,7 +396,7 @@ H5Mcreate_anon(hid_t loc_id, hid_t key_type_id, hid_t val_type_id, hid_t mcpl_id HGOTO_ERROR(H5E_MAP, H5E_CANTSET, H5I_INVALID_HID, "can't set access property list info"); /* get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); /* Set location parameters */ @@ -693,7 +692,7 @@ H5Mget_key_type(hid_t map_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid map identifier"); /* Set up VOL callback arguments */ @@ -737,7 +736,7 @@ H5Mget_val_type(hid_t map_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid map identifier"); /* Set up VOL callback arguments */ @@ -781,7 +780,7 @@ H5Mget_create_plist(hid_t map_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid map identifier"); /* Set up VOL callback arguments */ @@ -828,7 +827,7 @@ H5Mget_access_plist(hid_t map_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid map identifier"); /* Set up VOL callback arguments */ @@ -871,7 +870,7 @@ H5Mget_count(hid_t map_id, hsize_t *count /*out*/, hid_t dxpl_id) FUNC_ENTER_API(H5I_INVALID_HID) /* Check args */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid map identifier"); /* Get the default dataset transfer property list if the user didn't provide one */ @@ -927,7 +926,7 @@ H5M__put_api_common(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid value memory datatype ID"); /* Get map pointer */ - if (NULL == (*vol_obj_ptr = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (*vol_obj_ptr = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "map_id is not a map ID"); /* Get the default dataset transfer property list if the user didn't provide one */ @@ -1056,7 +1055,7 @@ H5M__get_api_common(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid value memory datatype ID"); /* Get map pointer */ - if (NULL == (*vol_obj_ptr = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (*vol_obj_ptr = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "map_id is not a map ID"); /* Get the default dataset transfer property list if the user didn't provide one */ @@ -1187,7 +1186,7 @@ H5Mexists(hid_t map_id, hid_t key_mem_type_id, const void *key, hbool_t *exists, HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid key memory datatype ID"); /* Get map pointer */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "map_id is not a map ID"); /* Get the default dataset transfer property list if the user didn't provide one */ @@ -1263,7 +1262,7 @@ H5Miterate(hid_t map_id, hsize_t *idx, hid_t key_mem_type_id, H5M_iterate_t op, HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no operator specified"); /* Get map pointer */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "map_id is not a map ID"); /* Get the default dataset transfer property list if the user didn't provide one */ @@ -1348,7 +1347,7 @@ H5Miterate_by_name(hid_t loc_id, const char *map_name, hsize_t *idx, hid_t key_m HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no operator specified"); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Get the default dataset transfer property list if the user didn't provide one */ @@ -1412,7 +1411,7 @@ H5Mdelete(hid_t map_id, hid_t key_mem_type_id, const void *key, hid_t dxpl_id) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid key memory datatype ID"); /* Get map pointer */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object_verify(map_id, H5I_MAP))) + if (NULL == (vol_obj = H5VL_vol_object_verify(map_id, H5I_MAP))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "map_id is not a map ID"); /* Get the default dataset transfer property list if the user didn't provide one */ diff --git a/src/H5Mpublic.h b/src/H5Mpublic.h index 44c78fe064f..3d8a8c7f7c8 100644 --- a/src/H5Mpublic.h +++ b/src/H5Mpublic.h @@ -245,9 +245,27 @@ H5_DLL hid_t H5Mcreate_async(hid_t loc_id, const char *name, hid_t key_type_id, /** * \ingroup H5M * - * \brief + * \brief Creates a map object without linking it into a file * - * \details + * \fgdta_loc_id + * \type_id{key_type_id} + * \type_id{val_type_id} + * \mcpl_id + * \mapl_id + * \return \hid_t{map object} + * The resulting ID should be linked into the file with H5Olink or it + * will be deleted when closed. + * + * \details H5Mcreate_anon() creates a new map object for storing key-value + * pairs. The in-file datatype for keys is defined by \p key_type_id + * and the in-file datatype for values is defined by \p val_type_id. \p + * loc_id specifies the file to create the map object, but no link to + * the object is created. Other options can be specified through the + * property lists \p mcpl_id and \p mapl_id. + * + * The new map should be linked into the group hierarchy before being + * closed or it will be deleted. The map should be closed when the + * caller no longer requires it. * * \since 1.12.0 * diff --git a/src/H5O.c b/src/H5O.c index 26340aa567a..39887b51a29 100644 --- a/src/H5O.c +++ b/src/H5O.c @@ -359,7 +359,7 @@ H5Oopen_by_token(hid_t loc_id, H5O_token_t token) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, H5I_INVALID_HID, "can't open H5O_TOKEN_UNDEF"); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); /* Get object type */ @@ -436,7 +436,7 @@ H5O__copy_api_common(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, c HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't set object access arguments"); /* get the object */ - if (NULL == (*vol_obj_ptr = (H5VL_object_t *)H5I_object(dst_loc_id))) + if (NULL == (*vol_obj_ptr = H5VL_vol_object(dst_loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); loc_params2.type = H5VL_OBJECT_BY_SELF; loc_params2.obj_type = H5I_get_type(dst_loc_id); diff --git a/src/H5Odeprec.c b/src/H5Odeprec.c index f74ec542d6f..37a3996c1e6 100644 --- a/src/H5Odeprec.c +++ b/src/H5Odeprec.c @@ -347,7 +347,7 @@ H5Oopen_by_addr(hid_t loc_id, haddr_t addr) FUNC_ENTER_API(H5I_INVALID_HID) /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); /* Get object type */ diff --git a/src/H5Odtype.c b/src/H5Odtype.c index 24671b02107..b2e6c8f65be 100644 --- a/src/H5Odtype.c +++ b/src/H5Odtype.c @@ -135,7 +135,7 @@ H5O__dtype_decode_helper(unsigned *ioflags /*in,out*/, const uint8_t **pp, H5T_t * that case is impossible. * * Instead of using our normal H5_IS_BUFFER_OVERFLOW macro, use - * H5_IS_KNOWN_BUFFER_OVERFLOW, which will skip the check when the + * H5_IS_KNOWN_BUFFER_OVERFLOW, which will skip the check when * we're decoding a buffer from H5Tconvert(). * * Even if this is fixed at some point in the future, as long as we diff --git a/src/H5PLmodule.h b/src/H5PLmodule.h index f034e7c6631..1aedc2783fe 100644 --- a/src/H5PLmodule.h +++ b/src/H5PLmodule.h @@ -276,10 +276,12 @@ * \endcode * * See the documentation at - * hdf5_plugins/docs folder. In + * hdf5_plugins/docs folder. In * particular: - * INSTALL_With_CMake - * USING_HDF5_AND_CMake + * INSTALL_With_CMake + * USING_HDF5_AND_CMake */ /** diff --git a/src/H5Pmodule.h b/src/H5Pmodule.h index ef300f9312a..8ac6f86eed9 100644 --- a/src/H5Pmodule.h +++ b/src/H5Pmodule.h @@ -979,7 +979,7 @@ *
    * \snippet{doc} tables/propertyLists.dox lcpl_table *
    - * @see STRCPL + * @see @ref STRCPL * * \defgroup ACPL Attribute Creation Properties * \ingroup STRCPL @@ -988,7 +988,7 @@ * \snippet{doc} tables/propertyLists.dox acpl_table * * - * @see STRCPL + * @see @ref STRCPL * * \defgroup LAPL Link Access Properties * \ingroup H5P diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 724893484a2..ff46407a717 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -480,7 +480,7 @@ typedef enum H5D_mpio_no_collective_cause_t { H5D_MPIO_DATA_TRANSFORMS = 0x04, /**< Collective I/O was not performed because data transforms needed to be applied */ H5D_MPIO_MPI_OPT_TYPES_ENV_VAR_DISABLED = 0x08, - /**< Collective I/O was disabled by environment variable (\Code{HDF5_MPI_OPT_TYPES}) */ + /**< Collective I/O was disabled by environment variable (\TText{HDF5_MPI_OPT_TYPES}) */ H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES = 0x10, /**< Collective I/O was not performed because one of the dataspaces was neither simple nor scalar */ H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET = 0x20, @@ -1350,15 +1350,15 @@ H5_DLL herr_t H5Pget_size(hid_t id, const char *name, size_t *size); * modified * *
    - * + * * * * - * + * * * * - * + * * * @@ -1393,15 +1393,15 @@ H5_DLL herr_t H5Pget_size(hid_t id, const char *name, size_t *size); * * * - * + * * * * - * + * * * * - * + * * * *
    Index for \Code{retries[]}Index for \TText{retries[]}Metadata entries*
    0Object header (version 2)
    \Code{const char * name}\TText{const char * name}IN: The name of the property being modified
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void * value}\TText{void * value}IN: Pointer to new value pointer for the property * being modified
    IN: The identifier of the property list being queried
    \Code{const char * name}\TText{const char * name}IN: The name of the property being queried
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void * value}\TText{void * value}IN: The value of the property being returned
    @@ -1426,15 +1426,15 @@ H5_DLL herr_t H5Pget_size(hid_t id, const char *name, size_t *size); * being deleted from * * - * \Code{const char * name} + * \TText{const char * name} * IN: The name of the property in the list * * - * \Code{size_t size} + * \TText{size_t size} * IN: The size of the property in bytes * * - * \Code{void * value} + * \TText{void * value} * IN: The value for the property being deleted * * @@ -1455,15 +1455,15 @@ H5_DLL herr_t H5Pget_size(hid_t id, const char *name, size_t *size); * The parameters to the above callback function are: * * - * + * * * * - * + * * * * - * + * * * *
    \Code{const char * name}\TText{const char * name}IN: The name of the property being copied
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void * value}\TText{void * value}IN/OUT: The value for the property being copied
    @@ -1487,15 +1487,15 @@ H5_DLL herr_t H5Pget_size(hid_t id, const char *name, size_t *size); * * * - * + * * * * - * + * * * * - * + * * * *
    \Code{const void * value1}\TText{const void * value1}IN: The value of the first property to compare
    \Code{const void * value2}\TText{const void * value2}IN: The value of the second property to compare
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    @@ -1515,15 +1515,15 @@ H5_DLL herr_t H5Pget_size(hid_t id, const char *name, size_t *size); * * * - * + * * * * - * + * * * * - * + * * * *
    \Code{const char * name}\TText{const char * name}IN: The name of the property in the list
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void * value}\TText{void * value}IN: The value for the property being closed
    @@ -1667,15 +1667,15 @@ H5_DLL int H5Piterate(hid_t id, int *idx, H5P_iterate_t iter_func, void *iter_da * * * - * + * * * * - * + * * * * - * + * * * @@ -1701,15 +1701,15 @@ H5_DLL int H5Piterate(hid_t id, int *idx, H5P_iterate_t iter_func, void *iter_da * * * - * + * * * * - * + * * * * - * + * * * @@ -1745,15 +1745,15 @@ H5_DLL int H5Piterate(hid_t id, int *idx, H5P_iterate_t iter_func, void *iter_da * queried * * - * + * * * * - * + * * * * - * + * * * *
    \Code{const char * name}\TText{const char * name}IN: The name of the property being modified
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void * value}\TText{void * value}IN/OUT: The default value for the property being created, * which will be passed to H5Pregister2()
    IN: The identifier of the property list being modified
    \Code{const char * name}\TText{const char * name}IN: The name of the property being modified
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void *value}\TText{void *value}IN/OUT: Pointer to new value pointer for the property * being modified
    \Code{const char * name}\TText{const char * name}IN: The name of the property being queried
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void * value}\TText{void * value}IN/OUT: The value of the property being returned
    @@ -1778,15 +1778,15 @@ H5_DLL int H5Piterate(hid_t id, int *idx, H5P_iterate_t iter_func, void *iter_da * being deleted from * * - * \Code{const char * name} + * \TText{const char * name} * IN: The name of the property in the list * * - * \Code{size_t size} + * \TText{size_t size} * IN: The size of the property in bytes * * - * \Code{void * value} + * \TText{void * value} * IN: The value for the property being deleted * * @@ -1807,15 +1807,15 @@ H5_DLL int H5Piterate(hid_t id, int *idx, H5P_iterate_t iter_func, void *iter_da * * * - * + * * * * - * + * * * * - * + * * * *
    \Code{const char * name}\TText{const char * name}IN: The name of the property being copied
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void * value}\TText{void * value}IN/OUT: The value for the property being copied
    @@ -1837,15 +1837,15 @@ H5_DLL int H5Piterate(hid_t id, int *idx, H5P_iterate_t iter_func, void *iter_da * * * - * + * * * * - * + * * * * - * + * * * *
    \Code{const void * value1}\TText{const void * value1}IN: The value of the first property to compare
    \Code{const void * value2}\TText{const void * value2}IN: The value of the second property to compare
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    @@ -1865,15 +1865,15 @@ H5_DLL int H5Piterate(hid_t id, int *idx, H5P_iterate_t iter_func, void *iter_da * * * - * + * * * * - * + * * * * - * + * * * *
    \Code{const char * name}\TText{const char * name}IN: The name of the property in the list
    \Code{size_t size}\TText{size_t size}IN: The size of the property in bytes
    \Code{void * value}\TText{void * value}IN: The value for the property being closed
    @@ -2204,7 +2204,7 @@ H5_DLL herr_t H5Pget_filter_by_id2(hid_t plist_id, H5Z_filter_t filter_id, unsig * \details H5Pget_nfilters() returns the number of filters defined in the * filter pipeline associated with the property list \p plist_id. * - * In each pipeline, the filters are numbered from 0 through \Code{N-1}, + * In each pipeline, the filters are numbered from 0 through \TText{N-1}, * where \c N is the value returned by this function. During output to * the file, the filters are applied in increasing order; during * input from the file, they are applied in decreasing order. @@ -2821,7 +2821,7 @@ H5_DLL herr_t H5Pset_fletcher32(hid_t plist_id); * return it in the #H5O_info_t struct. * * If times are not tracked, they will be reported as follows when queried: - * \Code{ 12:00 AM UDT, Jan. 1, 1970} + * \TText{ 12:00 AM UDT, Jan. 1, 1970} * * That date and time are commonly used to represent the beginning of the UNIX epoch. * @@ -3674,17 +3674,17 @@ H5_DLL herr_t H5Pget_fclose_degree(hid_t fapl_id, H5F_close_degree_t *degree); * \param[in,out] buf_ptr_ptr On input, \c NULL or a pointer to a * pointer to a buffer that contains the * file image.\n On successful return, if \p buf_ptr_ptr is not - * \c NULL, \Code{*buf_ptr_ptr} will contain a pointer to a copy + * \c NULL, \TText{*buf_ptr_ptr} will contain a pointer to a copy * of the initial image provided in the last call to * H5Pset_file_image() for the supplied \p fapl_id. If no initial - * image has been set, \Code{*buf_ptr_ptr} will be \c NULL. + * image has been set, \TText{*buf_ptr_ptr} will be \c NULL. * \param[in,out] buf_len_ptr On input, \c NULL or a pointer to a buffer * specifying the required size of the buffer to hold the file * image.\n On successful return, if \p buf_len_ptr was not * passed in as \c NULL, \p buf_len_ptr will return the required * size in bytes of the buffer to hold the initial file image in * the supplied file access property list, \p fapl_id. If no - * initial image is set, the value of \Code{*buf_len_ptr} will be + * initial image is set, the value of \TText{*buf_len_ptr} will be * set to 0 (zero) * \return \herr_t * @@ -3708,7 +3708,7 @@ H5_DLL herr_t H5Pget_fclose_degree(hid_t fapl_id, H5F_close_degree_t *degree); * \see H5LTopen_file_image(), H5Fget_file_image(), H5Pset_file_image(), * H5Pset_file_image_callbacks(), H5Pget_file_image_callbacks(), * \ref H5FD_file_image_callbacks_t, \ref H5FD_file_image_op_t, - * + * * HDF5 File Image Operations. * * @@ -3748,7 +3748,7 @@ H5_DLL herr_t H5Pget_file_image(hid_t fapl_id, void **buf_ptr_ptr, size_t *buf_l * \see H5LTopen_file_image(), H5Fget_file_image(), H5Pset_file_image(), * H5Pset_file_image_callbacks(), H5Pget_file_image_callbacks(), * \ref H5FD_file_image_callbacks_t, \ref H5FD_file_image_op_t, - * + * * HDF5 File Image Operations. * * \since 1.8.9 @@ -3850,7 +3850,7 @@ H5_DLL herr_t H5Pget_libver_bounds(hid_t plist_id, H5F_libver_t *low, H5F_libver * instance of #H5AC_cache_config_t pointed to by the \p config_ptr * parameter. This configuration is used when the file is opened. * - * Note that the version field of \Code{*config_ptr} must be + * Note that the version field of \TText{*config_ptr} must be * initialized; this allows the library to support earlier versions of * the #H5AC_cache_config_t structure. * @@ -4692,7 +4692,7 @@ H5_DLL herr_t H5Pset_fclose_degree(hid_t fapl_id, H5F_close_degree_t degree); * This function is part of the file image * operations feature set. It is highly recommended to study the guide * [HDF5 File Image Operations] - * (https://\DOCURL/advanced_topics/file_image_ops.html + * (https://\DOCURL/advanced_topics/file_image_ops.md * ) before using this feature set. See the “See Also” section below * for links to other elements of HDF5 file image operations. * @@ -4704,9 +4704,9 @@ H5_DLL herr_t H5Pset_fclose_degree(hid_t fapl_id, H5F_close_degree_t degree); * \li H5Pget_file_image_callbacks() * * \li [HDF5 File Image Operations] - * (https://\DOCURL/advanced_topics/file_image_ops.html) + * (https://\DOCURL/advanced_topics/file_image_ops.md) * in [Advanced Topics in HDF5] - * (https://\DOCURL/advanced_topics_list.html) + * (https://\DOCURL/advanced_topics_list.md) * * \li Within H5Pset_file_image_callbacks(): * \li Callback #H5FD_file_image_callbacks_t @@ -4729,7 +4729,7 @@ H5_DLL herr_t H5Pset_file_image(hid_t fapl_id, void *buf_ptr, size_t buf_len); * **Recommended Reading:** This function is part of the file * image operations feature set. It is highly recommended to study * the guide [HDF5 File Image Operations] - * (https://\DOCURL/advanced_topics/file_image_ops.html + * (https://\DOCURL/advanced_topics/file_image_ops.md * ) before using this feature set. See the “See Also” section below * for links to other elements of HDF5 file image operations. * @@ -5205,7 +5205,7 @@ H5_DLL herr_t H5Pset_mdc_config(hid_t plist_id, H5AC_cache_config_t *config_ptr) * current state of the logging flags. * * The log format is described in [Metadata Cache Logging] - * (https://\DSPURL/Fine-tuning+the+Metadata+Cache). + * (https://\DOCURL/advanced_topics/FineTuningMetadataCache.md). * * \since 1.10.0 * @@ -5232,7 +5232,7 @@ H5_DLL herr_t H5Pset_mdc_log_options(hid_t plist_id, hbool_t is_enabled, const c * * The default setting is 2048 bytes, meaning that the library will * attempt to aggregate metadata in at least 2K blocks in the file. - * Setting the value to zero (\Code{0}) with this function will turn + * Setting the value to zero (\TText{0}) with this function will turn * off metadata aggregation, even if the VFL driver attempts to use the * metadata aggregation strategy. * @@ -5250,12 +5250,12 @@ H5_DLL herr_t H5Pset_meta_block_size(hid_t fapl_id, hsize_t size); * \brief Sets the number of read attempts in a file access property list * * \fapl_id{plist_id} - * \param[in] attempts The number of read attempts. Must be a value greater than \Code{0} + * \param[in] attempts The number of read attempts. Must be a value greater than \TText{0} * * \return \herr_t * * \return Failure Modes: - * - When the user sets the number of read attempts to \Code{0}. + * - When the user sets the number of read attempts to \TText{0}. * - When the input property list is not a file access property list. * - When the library is unable to set the number of read attempts in the file access property list. * @@ -5273,11 +5273,11 @@ H5_DLL herr_t H5Pset_meta_block_size(hid_t fapl_id, hsize_t size); * opened and whether the user sets the number of read attempts via this routine: * - For a file opened with SWMR access: - * - If the user sets the number of attempts to \Code{N}, the library will use \Code{N}. + * - If the user sets the number of attempts to \TText{N}, the library will use \TText{N}. * - If the user does not set the number of attempts, the library will use the - * default for SWMR access (\Code{100}). + * default for SWMR access (\TText{100}). * - For a file opened with non-SWMR access, the library will always use the default - * for non-SWMR access (\Code{1}). The value set via this routine does not have any effect + * for non-SWMR access (\TText{1}). The value set via this routine does not have any effect * during non-SWMR access. * * \b Example: The first example illustrates the case in setting the number of read attempts for a file @@ -5304,7 +5304,7 @@ H5_DLL herr_t H5Pset_metadata_read_attempts(hid_t plist_id, unsigned attempts); /** * \ingroup FAPL * - * \brief Specifies type of data to be accessed via the \Code{MULTI} driver, + * \brief Specifies type of data to be accessed via the \TText{MULTI} driver, * enabling more direct access * * \fapl_id{fapl_id} @@ -5316,7 +5316,7 @@ H5_DLL herr_t H5Pset_metadata_read_attempts(hid_t plist_id, unsigned attempts); * access property list \p fapl_id. This setting enables a user * application to specify the type of data the application wishes to * access so that the application can retrieve a file handle for - * low-level access to the particular member of a set of \Code{MULTI} + * low-level access to the particular member of a set of \TText{MULTI} * files in which that type of data is stored. The file handle is * retrieved with a separate call to H5Fget_vfd_handle() (or, in special * circumstances, to H5FDget_vfd_handle(); see \ref VFL. @@ -5345,7 +5345,7 @@ H5_DLL herr_t H5Pset_metadata_read_attempts(hid_t plist_id, unsigned attempts); * * * This function is for use only when accessing an HDF5 file written as a set of - * files with the \Code{MULTI} file driver. + * files with the \TText{MULTI} file driver. * * \since 1.6.0 */ @@ -5372,8 +5372,8 @@ H5_DLL herr_t H5Pset_multi_type(hid_t fapl_id, H5FD_mem_t type); * \endcode * * The parameters of the callback function, per the above prototypes, are defined as follows: - * - \Code{object_id} is the identifier of the object which has just been flushed. - * - \Code{user_data} is the user-defined input data for the callback function. + * - \TText{object_id} is the identifier of the object which has just been flushed. + * - \TText{user_data} is the user-defined input data for the callback function. * * \b Example: The example below illustrates the usage of this routine to set * the callback function to invoke when an object flush occurs. @@ -5404,7 +5404,7 @@ H5_DLL herr_t H5Pset_object_flush_cb(hid_t plist_id, H5F_flush_cb_t func, void * * * The default value is set to 64KB, indicating that file I/O for raw * data reads and writes will occur in at least 64KB blocks. Setting - * the value to zero (\Code{0}) with this API function will turn off + * the value to zero (\TText{0}) with this API function will turn off * the data sieving, even if the VFL driver attempts to use that * strategy. * @@ -5413,7 +5413,7 @@ H5_DLL herr_t H5Pset_object_flush_cb(hid_t plist_id, H5F_flush_cb_t func, void * * access property and the size of the dataset to allocate the sieve * buffer for the dataset in order to save memory usage. * - * \version 1.6.0 The \p size parameter has changed from type \Code{hsize_t} to \Code{size_t}. + * \version 1.6.0 The \p size parameter has changed from type \TText{hsize_t} to \TText{size_t}. * * \since 1.4.0 */ @@ -5425,7 +5425,7 @@ H5_DLL herr_t H5Pset_sieve_buf_size(hid_t fapl_id, size_t size); * * \fapl_id{fapl_id} * \param[in] size Maximum size, in bytes, of the small data block. - The default size is \Code{2048}. + The default size is \TText{2048}. * * \return \herr_t * @@ -5451,7 +5451,7 @@ H5_DLL herr_t H5Pset_sieve_buf_size(hid_t fapl_id, size_t size); * The small data block size is set as an allocation property in the * file access property list identified by \p fapl_id. * - * Setting \p size to zero (\Code{0}) disables the small data block mechanism. + * Setting \p size to zero (\TText{0}) disables the small data block mechanism. * * \since 1.4.4 */ @@ -5512,8 +5512,8 @@ H5_DLL herr_t H5Pget_vol_cap_flags(hid_t plist_id, uint64_t *cap_flags); * * \gacpl_id * \param[in] is_collective Boolean value indicating whether metadata reads are collective - * (\Code{1}) or independent (\Code{0}). - * Default mode: Independent (\Code{0}) + * (\TText{1}) or independent (\TText{0}). + * Default mode: Independent (\TText{0}) * * \return \herr_t * @@ -5521,9 +5521,9 @@ H5_DLL herr_t H5Pget_vol_cap_flags(hid_t plist_id, uint64_t *cap_flags); * operations in the access property list \p plist_id. * * When engaging in parallel I/O, all metadata write operations must be - * collective. If \p is_collective is \Code{1}, this property specifies + * collective. If \p is_collective is \TText{1}, this property specifies * that the HDF5 library will perform all metadata read operations - * collectively; if \p is_collective is \Code{0}, such operations may + * collectively; if \p is_collective is \TText{0}, such operations may * be performed independently. * * Users must be aware that several HDF5 operations can potentially @@ -5563,7 +5563,7 @@ H5_DLL herr_t H5Pget_vol_cap_flags(hid_t plist_id, uint64_t *cap_flags); * cache and HDF5 library behavior will be undefined when both of the following * conditions exist: * - A file is created or opened with a file access property list in which the - * collective metadata I/O property is set to \Code{1}. + * collective metadata I/O property is set to \TText{1}. * - Any function is called that triggers an independent metadata read while the * file remains open with that file access property list. * @@ -5581,8 +5581,8 @@ H5_DLL herr_t H5Pset_all_coll_metadata_ops(hid_t plist_id, hbool_t is_collective * * \gacpl_id * \param[out] is_collective Pointer to a buffer containing the Boolean value indicating whether metadata - * reads are collective (\Code{>0}) or independent (\Code{0}). - * Default mode: Independent (\Code{0}) + * reads are collective (\TText{>0}) or independent (\TText{0}). + * Default mode: Independent (\TText{0}) * * \return \herr_t * @@ -5601,8 +5601,8 @@ H5_DLL herr_t H5Pget_all_coll_metadata_ops(hid_t plist_id, hbool_t *is_collectiv * * \fapl_id{plist_id} * \param[out] is_collective Boolean value indicating whether metadata - * writes are collective (\Code{>0}) or independent (\Code{0}). - * \Emph{Default mode:} Independent (\Code{0}) + * writes are collective (\TText{>0}) or independent (\TText{0}). + * \Emph{Default mode:} Independent (\TText{0}) * \return \herr_t * * \details H5Pset_coll_metadata_write() tells the HDF5 library whether to @@ -5630,8 +5630,8 @@ H5_DLL herr_t H5Pset_coll_metadata_write(hid_t plist_id, hbool_t is_collective); * * \fapl_id{plist_id} * \param[out] is_collective Pointer to a boolean value indicating whether - * metadata writes are collective (\Code{>0}) or independent (\Code{0}). - * \Emph{Default mode:} Independent (\Code{0}) + * metadata writes are collective (\TText{>0}) or independent (\TText{0}). + * \Emph{Default mode:} Independent (\TText{0}) * \return \herr_t * * \details H5Pget_coll_metadata_write() retrieves the collective metadata write @@ -5721,7 +5721,7 @@ H5_DLL herr_t H5Pset_mpi_params(hid_t fapl_id, MPI_Comm comm, MPI_Info info); * #H5AC_cache_image_config_t::entry_ageout should address this problem. In * the interim, not requesting a cache image every n file close/open cycles * may be an acceptable work around. The choice of \c n will be driven by - * application behavior, but \Code{n = 10} seems a good starting point. + * application behavior, but \TText{n = 10} seems a good starting point. * * \since 1.10.1 */ @@ -7073,7 +7073,7 @@ H5_DLL herr_t H5Pset_szip(hid_t plist_id, unsigned options_mask, unsigned pixels * \param[in] vspace_id The dataspace identifier with the selection within the * virtual dataset applied, possibly an unlimited selection * \param[in] src_file_name The name of the HDF5 file where the source dataset is - * located or a \Code{"."} (period) for a source dataset in the same + * located or a \TText{"."} (period) for a source dataset in the same * file. The file might not exist yet. The name can be specified using * a C-style \c printf statement as described below. * \param[in] src_dset_name The path to the HDF5 dataset in the file specified by @@ -7096,14 +7096,14 @@ H5_DLL herr_t H5Pset_szip(hid_t plist_id, unsigned options_mask, unsigned pixels * treated as literals except for the following substitutions: * * - * - * + * + * * * * * * * * @@ -971,7 +971,7 @@ * translated to and from standard types of the same class, as described above. * * \subsection subsec_datatype_function Datatype Function Summaries - * @see H5T reference manual provides a reference list of datatype functions, the H5T APIs. + * see \ref H5T reference manual provides a reference list of datatype functions, the H5T APIs. * * \subsection subsec_datatype_program Programming Model for Datatypes * The HDF5 Library implements an object-oriented model of datatypes. HDF5 datatypes are @@ -2019,7 +2019,7 @@ filled according to the value of this property. The padding can be: * *
    \Code{"%%"}Replaced with a single \Code{"%"} (percent) character.\TText{"%%"}Replaced with a single \TText{"%"} (percent) character.
    "%b"Where "" is the virtual dataset dimension axis (0-based) - * and \Code{"b"} indicates that the block count of the selection in that - * dimension should be used. The full expression (for example, \Code{"%0b"}) + * and \TText{"b"} indicates that the block count of the selection in that + * dimension should be used. The full expression (for example, \TText{"%0b"}) * is replaced with a single numeric value when the mapping is evaluated at * VDS access time. Example code for many source and virtual dataset mappings * is available in the "Examples of Source to Virtual Dataset Mapping" @@ -7120,7 +7120,7 @@ H5_DLL herr_t H5Pset_szip(hid_t plist_id, unsigned options_mask, unsigned pixels * When a source dataset residing in a different file is accessed, the * library will search for the source file \p src_file_name as described * below: - * \li If \p src_file_name is a \Code{"."} (period) then it refers to the + * \li If \p src_file_name is a \TText{"."} (period) then it refers to the * file containing the virtual dataset. * \li If \p src_file_name is a relative pathname, the following steps are * performed: @@ -7149,37 +7149,37 @@ H5_DLL herr_t H5Pset_szip(hid_t plist_id, unsigned options_mask, unsigned pixels * Note that \p src_file_name is considered to be an absolute pathname when * the following condition is true: * \li For Unix, the first character of \p src_file_name is a slash - * (\Code{/}).\n For example, consider a \p src_file_name of - * \Code{/tmp/A.h5}. If that source file does not exist, the new - * \p src_file_name after stripping will be \Code{A.h5}. + * (\TText{/}).\n For example, consider a \p src_file_name of + * \TText{/tmp/A.h5}. If that source file does not exist, the new + * \p src_file_name after stripping will be \TText{A.h5}. * \li For Windows, there are 6 cases: * 1. \p src_file_name is an absolute drive with absolute pathname.\n - * For example, consider a \p src_file_name of \Code{/tmp/A.h5}. + * For example, consider a \p src_file_name of \TText{/tmp/A.h5}. * If that source file does not exist, the new \p src_file_name - * after stripping will be \Code{A.h5}. + * after stripping will be \TText{A.h5}. * 2. \p src_file_name is an absolute pathname without specifying * drive name.\n For example, consider a \p src_file_name of - * \Code{/tmp/A.h5}. If that source file does not exist, the new - * \p src_file_name after stripping will be \Code{A.h5}. + * \TText{/tmp/A.h5}. If that source file does not exist, the new + * \p src_file_name after stripping will be \TText{A.h5}. * 3. \p src_file_name is an absolute drive with relative pathname.\n - * For example, consider a \p src_file_name of \Code{/tmp/A.h5}. + * For example, consider a \p src_file_name of \TText{/tmp/A.h5}. * If that source file does not exist, the new \p src_file_name - * after stripping will be \Code{tmp/A.h5}. + * after stripping will be \TText{tmp/A.h5}. * 4. \p src_file_name is in UNC (Uniform Naming Convention) format * with server name, share name, and pathname.\n - * For example, consider a \p src_file_name of \Code{/tmp/A.h5}. + * For example, consider a \p src_file_name of \TText{/tmp/A.h5}. * If that source file does not exist, the new \p src_file_name - * after stripping will be \Code{A.h5}. + * after stripping will be \TText{A.h5}. * 5. \p src_file_name is in Long UNC (Uniform Naming Convention) * format with server name, share name, and pathname.\n - * For example, consider a \p src_file_name of \Code{/tmp/A.h5}. + * For example, consider a \p src_file_name of \TText{/tmp/A.h5}. * If that source file does not exist, the new \p src_file_name - * after stripping will be \Code{A.h5}. + * after stripping will be \TText{A.h5}. * 6. \p src_file_name is in Long UNC (Uniform Naming Convention) * format with an absolute drive and an absolute pathname.\n - * For example, consider a \p src_file_name of \Code{/tmp/A.h5}. + * For example, consider a \p src_file_name of \TText{/tmp/A.h5}. * If that source file does not exist, the new \p src_file_name - * after stripping will be \Code{A.h5} + * after stripping will be \TText{A.h5} * * \see * Virtual Dataset Overview @@ -7538,7 +7538,7 @@ H5_DLL herr_t H5Pset_append_flush(hid_t dapl_id, unsigned ndims, const hsize_t b * use a hash table with 12421 elements and a maximum size of * 16 MB, while using the preemption policy specified for the * entire file: - * \Code{ + * \TText{ * H5Pset_chunk_cache(dapl_id, 12421, 16*1024*1024, * H5D_CHUNK_CACHE_W0_DEFAULT);} * @@ -8012,11 +8012,11 @@ H5_DLL herr_t H5Pset_btree_ratios(hid_t plist_id, double left, double middle, do * mining can only break the data up along the first dimension, so the * buffer must be large enough to accommodate a complete slice that * encompasses all of the remaining dimensions. For example, when strip - * mining a \Code{100x200x300} hyperslab of a simple data space, the - * buffer must be large enough to hold \Code{1x200x300} data - * elements. When strip mining a \Code{100x200x300x150} hyperslab of a + * mining a \TText{100x200x300} hyperslab of a simple data space, the + * buffer must be large enough to hold \TText{1x200x300} data + * elements. When strip mining a \TText{100x200x300x150} hyperslab of a * simple data space, the buffer must be large enough to hold - * \Code{1x200x300x150} data elements. + * \TText{1x200x300x150} data elements. * * If \p tconv and/or \p bkg are null pointers, then buffers will be * allocated and freed during the data transfer. @@ -8046,7 +8046,7 @@ H5_DLL herr_t H5Pset_buffer(hid_t plist_id, size_t size, void *tconv, void *bkg) * transfer property list \p plist_id. * * The \p expression parameter is a string containing an algebraic - * expression, such as \Code{(5/9.0)*(x-32)} or \Code{x*(x-5)}. When a + * expression, such as \TText{(5/9.0)*(x-32)} or \TText{x*(x-5)}. When a * dataset is read or written with this property list, the transform * expression is applied with the \c x being replaced by the values in * the dataset. When reading data, the values in the file are not diff --git a/src/H5Rdeprec.c b/src/H5Rdeprec.c index 988f3292722..154d47cb508 100644 --- a/src/H5Rdeprec.c +++ b/src/H5Rdeprec.c @@ -267,12 +267,12 @@ H5Rget_obj_type1(hid_t id, H5R_type_t ref_type, const void *ref) /* Check if using native VOL connector */ if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) - HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, H5G_UNKNOWN, "can't determine if VOL object is native connector object"); /* Must use native VOL connector for this operation */ if (!is_native_vol_obj) - HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, H5G_UNKNOWN, "H5Rget_obj_type1 is only meant to be used with the native VOL connector"); /* Get object type */ @@ -341,12 +341,12 @@ H5Rdereference1(hid_t obj_id, H5R_type_t ref_type, const void *ref) /* Check if using native VOL connector */ if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) - HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, H5I_INVALID_HID, "can't determine if VOL object is native connector object"); /* Must use native VOL connector for this operation */ if (!is_native_vol_obj) - HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, H5I_INVALID_HID, "H5Rdereference1 is only meant to be used with the native VOL connector"); /* Get object type */ @@ -614,12 +614,12 @@ H5Rdereference2(hid_t obj_id, hid_t oapl_id, H5R_type_t ref_type, const void *re /* Check if using native VOL connector */ if (H5VL_object_is_native(vol_obj, &is_native_vol_obj) < 0) - HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, FAIL, + HGOTO_ERROR(H5E_REFERENCE, H5E_CANTGET, H5I_INVALID_HID, "can't determine if VOL object is native connector object"); /* Must use native VOL connector for this operation */ if (!is_native_vol_obj) - HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, H5I_INVALID_HID, "H5Rdereference2 is only meant to be used with the native VOL connector"); /* Get object type */ @@ -694,7 +694,7 @@ H5Rget_region(hid_t id, H5R_type_t ref_type, const void *ref) "can't query if file uses native VOL connector"); if (!is_native_vol_obj) - HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, FAIL, + HGOTO_ERROR(H5E_REFERENCE, H5E_VOL, H5I_INVALID_HID, "H5Rget_region is only meant to be used with the native VOL connector"); /* Get object type */ diff --git a/src/H5Smodule.h b/src/H5Smodule.h index 2dc8fe127d6..b9897485405 100644 --- a/src/H5Smodule.h +++ b/src/H5Smodule.h @@ -53,7 +53,7 @@ * sub‐sampling, and scatter‐gather access to datasets. * * \subsection subsec_dataspace_function Dataspace Function Summaries - * @see H5S reference manual provides a reference list of dataspace functions, the H5S APIs. + * see \ref H5S reference manual provides a reference list of dataspace functions, the H5S APIs. * * \subsection subsec_dataspace_program Definition of Dataspace Objects and the Dataspace Programming Model * @@ -977,9 +977,9 @@ * \subsection subsec_dataspace_refer References * * Another use of selections is to store a reference to a region of a dataset in the file or an external file. - An HDF5 object reference + * An HDF5 object reference * object is a pointer to an object (attribute, dataset, group, or committed datatype) in the file or an - external file. A selection can + * external file. A selection can * be used to create a pointer to a set of selected elements of a dataset, called a region reference. The * selection can be either a point selection or a hyperslab selection. * @@ -990,13 +990,179 @@ * To discover the elements and/or read the data, the region reference can be dereferenced to obtain the * identifiers for the dataset and dataspace. * - * For more information, \see subsubsec_datatype_other_refs. + * For more information, \see \ref subsubsec_datatype_other_refs. * * \subsubsection subsubsec_dataspace_refer_use Example Uses for Region References + * Region references are used to implement stored pointers to data within a dataset. For example, features + * in a large dataset might be indexed by a table. See the figure below. This table could be stored as an + * HDF5 dataset with a compound datatype, for example, with a field for the name of the feature and a region + * reference to point to the feature in the dataset. See the second figure below. + * + * + * + * + * + *
    + * \image html Dspace_features.gif " Features indexed by a table" + *
    + * + * + * + * + * + *
    + * \image html Dspace_features_cmpd.gif "Storing the table with a compound datatype" + *
    * * \subsubsection subsubsec_dataspace_refer_create Creating References to Regions + * To create a region reference: + * \li 1. Create or open the dataset that contains the region + * \li 2. Get the dataspace for the dataset + * \li 3. Define a selection that specifies the region + * \li 4. Create a region reference using the dataset and dataspace with selection + * \li 5. Write the region reference(s) to the desired dataset or attribute + * \li 6. Release the region reference(s) + * + * The figure below shows a diagram of a file with three datasets. Dataset D1 and D2 are two dimensional + * arrays of integers. Dataset R1 is a one dimensional array of references to regions in D1 and D2. The + * regions can be any valid selection of the dataspace of the target dataset. + * + * + * + * + *
    + * \image html Dspace_three_datasets.gif "A file with three datasets" + *
    + * Note: In the figure above, R1 is a 1 D array of region pointers; each pointer refers to a selection + * in one dataset. + * + * The example below shows code to create the array of region references. The references are created in an + * array of type #H5R_ref_t. Each region is defined as a selection on the dataspace of the dataset, + * and a reference is created using \ref H5Rcreate_region(). The call to \ref H5Rcreate_region() specifies the + file, + * dataset, and the dataspace with selection. + * + * Create an array of region references + * \code + * // create an array of 4 region references + * H5R_ref_t ref[4]; + * + * // Create a reference to the first hyperslab in the first Dataset. + * offset[0] = 1; offset[1] = 1; + * count[0] = 3; count[1] = 2; + * status = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, offset, NULL, count, NULL); + * status = H5Rcreate_region(file_id, "D1", space_id, H5P_DEFAULT, &ref[0]); + * + * // The second reference is to a union of hyperslabs in the first Dataset + * offset[0] = 5; offset[1] = 3; + * count[0] = 1; count[1] = 4; + * status = H5Sselect_none(space_id); + * status = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, offset, NULL, count, NULL); + * offset[0] = 6; offset[1] = 5; + * count[0] = 1; count[1] = 2; + * status = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, offset, NULL, count, NULL); + * status = H5Rcreate_region(file_id, "D1", space_id, H5P_DEFAULT, &ref[1]); + * + * // the fourth reference is to a selection of points in the first Dataset + * status = H5Sselect_none(space_id); + * coord[0][0] = 4; coord[0][1] = 4; + * coord[1][0] = 2; coord[1][1] = 6; + * coord[2][0] = 3; coord[2][1] = 7; + * coord[3][0] = 1; coord[3][1] = 5; + * coord[4][0] = 5; coord[4][1] = 8; + * + * status = H5Sselect_elements(space_id, H5S_SELECT_SET, num_points, (const hssize_t **)coord); + * status = H5Rcreate_region(file_id, "D1", space_id, H5P_DEFAULT, &ref[3]); + * + * // the third reference is to a hyperslab in the second Dataset + * offset[0] = 0; offset[1] = 0; + * count[0] = 4; count[1] = 6; + * status = H5Sselect_hyperslab(space_id2, H5S_SELECT_SET, offset, NULL, count, NULL); + * status = H5Rcreate_region(file_id, "D2", space_id2, H5P_DEFAULT, &ref[2]); + * \endcode + * + * When all the references are created, the array of references is written to the dataset R1. The + * dataset is declared to have datatype #H5T_STD_REF. See the example below. Also, note the release + * of the references afterwards. + * + * Write the array of references to a dataset + * \code + * Hsize_t dimsr[1]; + * dimsr[0] = 4; + * + * // Dataset with references. + * spacer_id = H5Screate_simple(1, dimsr, NULL); + * dsetr_id = H5Dcreate(file_id, "R1", H5T_STD_REF_DSETREG, spacer_id, H5P_DEFAULT, H5P_DEFAULT, + * H5P_DEFAULT); + * + * // Write dataset with the references. + * status = H5Dwrite(dsetr_id, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref); + * + * status = H5Rdestroy(&ref[0]); + * status = H5Rdestroy(&ref[1]); + * status = H5Rdestroy(&ref[0]); + * status = H5Rdestroy(&ref[1]); + * \endcode + * + * When creating region references, the following rules are enforced. + * \li The selection must be a valid selection for the target dataset, just as when transferring data + * \li The dataset must exist in the file when the reference is created; #H5Rcreate_region + * \li The target dataset must be in the same file as the stored reference * * \subsubsection subsubsec_dataspace_refer_read Reading References to Regions + * To retrieve data from a region reference, the reference must be read from the file, and then the data can + * be retrieved. The steps are: + * \li 1. Open the dataset or attribute containing the reference objects + * \li 2. Read the reference object(s) + * \li 3. For each region reference, get the dataset (#H5Ropen_object) and dataspace (#H5Ropen_region) + * \li 4. Use the dataspace and datatype to discover what space is needed to store the data, allocate the + * correct storage and create a dataspace and datatype to define the memory data layout + * \li 5. Release the region reference(s) + * + * The example below shows code to read an array of region references from a dataset, and then read the + * data from the first selected region. Note that the region reference has information that records the + * dataset (within the file) and the selection on the dataspace of the dataset. After dereferencing the + * regions reference, the datatype, number of points, and some aspects of the selection can be discovered. + * (For a union of hyperslabs, it may not be possible to determine the exact set of hyperslabs that has been + * combined.) + * The table below the code example shows the inquiry functions. + * + * When reading data from a region reference, the following rules are enforced: + * \li The target dataset must be present and accessible in the file + * \li The selection must be a valid selection for the dataset + * + * Read an array of region references; read from the first selection + * \code + * dsetr_id = H5Dopen (file_id, "R1", H5P_DEFAULT); + * status = H5Dread(dsetr_id, H5T_STD, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_out); + * + * // Dereference the first reference. + * // 1) get the dataset (H5Ropen_object) + * // 2) get the selected dataspace (H5Ropen_region) + * + * dsetv_id = H5Ropen_object(&ref_out[0], H5P_DEFAULT, H5P_DEFAULT); + * space_id = H5Ropen_region(&ref_out[0], H5P_DEFAULT, H5P_DEFAULT); + * + * // Discover how many points and shape of the data + * ndims = H5Sget_simple_extent_ndims(space_id); + * H5Sget_simple_extent_dims(space_id,dimsx,NULL); + * + * // Read and display hyperslab selection from the dataset. + * dimsy[0] = H5Sget_select_npoints(space_id); + * spacex_id = H5Screate_simple(1, dimsy, NULL); + * + * status = H5Dread(dsetv_id, H5T_NATIVE_INT, H5S_ALL, space_id, H5P_DEFAULT, data_out); + * printf("Selected hyperslab: "); + * for (i = 0; i < 8; i++) { + * printf("\n"); + * for (j = 0; j < 10; j++) + * printf("%d ", data_out[i][j]); + * } + * printf("\n"); + * + * status = H5Rdestroy(&ref_out[0]); + * \endcode + * * * \subsection subsec_dataspace_deprecated_refer Deprecated References to Dataset Regions * The API described in this section was deprecated since HDF5 1.12.0. Shown are @@ -1016,34 +1182,7 @@ * retrieved with a call to #H5Rget_region(). The selected dataspace can be used to read the selected data * elements. * - * For more information, \see subsubsec_datatype_other_refs. - * - * \subsubsection subsubsec_dataspace_deprecated_refer_use Deprecated Example Uses for Region References - * - * Region references are used to implement stored pointers to data within a dataset. For example, features - * in a large dataset might be indexed by a table. See the figure below. This table could be stored as an - * HDF5 dataset with a compound datatype, for example, with a field for the name of the feature and a region - * reference to point to the feature in the dataset. See the second figure below. - * - * - * - * - * - *
    - * \image html Dspace_features.gif " Features indexed by a table" - *
    - * - * - * - * - * - *
    - * \image html Dspace_features_cmpd.gif "Storing the table with a compound datatype" - *
    - * - * * \subsubsection subsubsec_dataspace_deprecated_refer_create Deprecated Creating References to Regions - * * To create a region reference: * \li 1. Create or open the dataset that contains the region * \li 2. Get the dataspace for the dataset @@ -1183,6 +1322,7 @@ * printf("\n"); * \endcode * + * \subsection subsec_dataspace_funcs Functions * * * @@ -1243,7 +1383,6 @@ * *
    The inquiry functions
    * - * * \subsection subsec_dataspace_sample Sample Programs * * This section contains the full programs from which several of the code examples in this chapter were diff --git a/src/H5TS.c b/src/H5TS.c index 590e20b70ff..05d700c4652 100644 --- a/src/H5TS.c +++ b/src/H5TS.c @@ -67,10 +67,12 @@ H5TS_api_info_t H5TS_api_info_p; /*-------------------------------------------------------------------------- * Function: H5TSmutex_acquire * - * Purpose: Attempts to acquire the HDF5 library global lock + * Purpose: Attempts to acquire the HDF5 library global lock. Should be preceded by a call to + * H5TSmutex_release(). * - * Note: On success, the 'acquired' flag indicates if the HDF5 library - * global lock was acquired. + * Parameters: + * lock_count; IN: The lock count that was held on the mutex before its release + * acquired; OUT: Whether the HDF5 library global lock was acquired * * Return: Non-negative on success / Negative on failure * @@ -118,10 +120,17 @@ H5TSmutex_get_attempt_count(unsigned *count) /*-------------------------------------------------------------------------- * Function: H5TSmutex_release * - * Purpose: Releases the HDF5 library global lock + * Purpose: Releases the HDF5 library global lock. Should be followed by a call to H5TSmutex_acquire(). * - * Return: Non-negative on success / Negative on failure + * This should be used by applications to temporarily release the lock in order to either perform + * multi-threaded work of their own or yield control to another thread using HDF5. The value + * returned in lock_count should be provided to H5TSmutex_acquire() in order to resume a + * consistent library state. + * + * Parameters: + * lock_count; OUT: The current lock count for the calling thread. * + * Return: Non-negative on success / Negative on failure *-------------------------------------------------------------------------- */ herr_t diff --git a/src/H5TSatomic.c b/src/H5TSatomic.c index 1e3798cd026..e765a5f491a 100644 --- a/src/H5TSatomic.c +++ b/src/H5TSatomic.c @@ -77,8 +77,7 @@ H5TS_atomic_init_int(H5TS_atomic_int_t *obj, int desired) { FUNC_ENTER_NOAPI_NAMECHECK_ONLY - /* Initialize mutex that protects the "atomic" value */ - (void) + /* Initialize mutex that protects the "atomic" value */ H5TS_mutex_init(&obj->mutex, H5TS_MUTEX_TYPE_PLAIN); /* Set the value */ @@ -104,8 +103,7 @@ H5TS_atomic_destroy_int(H5TS_atomic_int_t *obj) { FUNC_ENTER_NOAPI_NAMECHECK_ONLY - /* Destroy mutex that protects the "atomic" value */ - (void) + /* Destroy mutex that protects the "atomic" value */ H5TS_mutex_destroy(&obj->mutex); FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY @@ -128,8 +126,7 @@ H5TS_atomic_init_uint(H5TS_atomic_uint_t *obj, unsigned desired) { FUNC_ENTER_NOAPI_NAMECHECK_ONLY - /* Initialize mutex that protects the "atomic" value */ - (void) + /* Initialize mutex that protects the "atomic" value */ H5TS_mutex_init(&obj->mutex, H5TS_MUTEX_TYPE_PLAIN); /* Set the value */ @@ -155,13 +152,61 @@ H5TS_atomic_destroy_uint(H5TS_atomic_uint_t *obj) { FUNC_ENTER_NOAPI_NAMECHECK_ONLY - /* Destroy mutex that protects the "atomic" value */ - (void) + /* Destroy mutex that protects the "atomic" value */ H5TS_mutex_destroy(&obj->mutex); FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY } /* end H5TS_atomic_destroy_uint() */ +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_init_voidp + * + * Purpose: Initializes an atomic 'void *' variable object with a value. + * + * Note: Per the C11 standard, this function is not atomic and + * concurrent execution from multiple threads is a data race. + * + * Return: None + * + *-------------------------------------------------------------------------- + */ +void +H5TS_atomic_init_voidp(H5TS_atomic_voidp_t *obj, void *desired) +{ + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + /* Initialize mutex that protects the "atomic" value */ + H5TS_mutex_init(&obj->mutex, H5TS_MUTEX_TYPE_PLAIN); + + /* Set the value */ + obj->value = desired; + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS_atomic_init_voidp() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_destroy_voidp + * + * Purpose: Destroys / releases resources for an atomic 'void *' variable + * + * Note: No equivalent in the C11 atomics, but needed here, to destroy + * the mutex used to protect the atomic value. + * + * Return: None + * + *-------------------------------------------------------------------------- + */ +void +H5TS_atomic_destroy_voidp(H5TS_atomic_voidp_t *obj) +{ + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + /* Destroy mutex that protects the "atomic" value */ + H5TS_mutex_destroy(&obj->mutex); + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS_atomic_destroy_voidp() */ + #endif /* H5_HAVE_STDATOMIC_H */ #endif /* H5_HAVE_THREADS */ diff --git a/src/H5TSatomic.h b/src/H5TSatomic.h index dc6e5e4ca0e..478760e8c4d 100644 --- a/src/H5TSatomic.h +++ b/src/H5TSatomic.h @@ -277,4 +277,71 @@ H5TS_atomic_fetch_sub_uint(H5TS_atomic_uint_t *obj, unsigned arg) return ret_value; } /* end H5TS_atomic_fetch_sub_uint() */ -#endif /* H5_HAVE_THREADS */ +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_exchange_voidp + * + * Purpose: Atomically replaces the value of an atomic 'void *' variable + * and returns the value held previously. + * + * Return: Returns the value of the atomic variable held previously + * + *-------------------------------------------------------------------------- + */ +static inline void * +H5TS_atomic_exchange_voidp(H5TS_atomic_voidp_t *obj, void *desired) +{ + void *ret_value; + + /* Lock mutex that protects the "atomic" value */ + H5TS_mutex_lock(&obj->mutex); + + /* Get the current value */ + ret_value = obj->value; + + /* Set the value */ + obj->value = desired; + + /* Release the object's mutex */ + H5TS_mutex_unlock(&obj->mutex); + + return ret_value; +} /* end H5TS_atomic_exchange_voidp() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS_atomic_compare_exchange_strong_voidp + * + * Purpose: Atomically compares the contents of 'obj' with 'expected', and + * if those are bitwise equal, replaces the former with 'desired' + * (performs read-modify-write operation). Otherwise, loads the + * actual contents of 'obj' into '*expected' (performs load + * operation). + * + * Return: The result of the comparison: true if 'obj' was equal to + * 'expected', false otherwise. + * + *-------------------------------------------------------------------------- + */ +static inline bool +H5TS_atomic_compare_exchange_strong_voidp(H5TS_atomic_voidp_t *obj, void **expected, void *desired) +{ + bool ret_value; + + /* Lock mutex that protects the "atomic" value */ + H5TS_mutex_lock(&obj->mutex); + + /* Compare 'obj' w/'expected' */ + if (obj->value == *expected) { + obj->value = desired; + ret_value = true; + } + else { + *expected = obj->value; + ret_value = false; + } + /* Release the object's mutex */ + H5TS_mutex_unlock(&obj->mutex); + + return ret_value; +} /* end H5TS_atomic_compare_exchange_strong_voidp() */ + +#endif /* H5_HAVE_STDATOMIC_H */ diff --git a/src/H5TSpkg.h b/src/H5TSpkg.h index c2ff919f2f3..b90f4110d6b 100644 --- a/src/H5TSpkg.h +++ b/src/H5TSpkg.h @@ -34,9 +34,9 @@ /* Enable statistics for recursive R/W lock when H5TS debugging is enabled */ #ifdef H5TS_DEBUG -#define H5TS_ENABLE_REC_RW_LOCK_STATS 1 +#define H5TS_ENABLE_REC_RWLOCK_STATS 1 #else -#define H5TS_ENABLE_REC_RW_LOCK_STATS 0 +#define H5TS_ENABLE_REC_RWLOCK_STATS 0 #endif /****************************/ @@ -57,10 +57,10 @@ typedef struct H5TS_api_info_t { } H5TS_api_info_t; #endif /* H5_HAVE_THREADSAFE */ -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /****************************************************************************** * - * Structure H5TS_rw_lock_stats_t + * Structure H5TS_rec_rwlock_stats_t * * Statistics for the recursive R/W lock. * @@ -118,7 +118,7 @@ typedef struct H5TS_api_info_t { * ******************************************************************************/ -typedef struct H5TS_rw_lock_stats_t { +typedef struct H5TS_rec_rwlock_stats_t { int64_t read_locks_granted; int64_t read_locks_released; int64_t real_read_locks_granted; @@ -134,36 +134,35 @@ typedef struct H5TS_rw_lock_stats_t { int64_t max_write_lock_recursion_depth; int64_t write_locks_delayed; int64_t max_write_locks_pending; -} H5TS_rw_lock_stats_t; +} H5TS_rec_rwlock_stats_t; #endif /****************************************************************************** * - * Structure H5TS_rw_lock_t + * Structure H5TS_rec_rwlock_t * * A recursive readers / writer (R/W) lock. * - * This structure holds the fields needed to implement a recursive R/W lock - * that allows recursive write locks, and for the associated statistics - * collection fields. + * This structure holds the fields needed to implement a recursive R/W lock that + * allows recursive write locks, and the associated statistics collection fields. * * Note that we can't use the pthreads or Win32 R/W locks: they permit * recursive read locks, but disallow recursive write locks. * * Individual fields are: * - * mutex: Mutex used to maintain mutual exclusion on the fields of - * of this structure. + * mutex: Mutex used to maintain mutual exclusion on the fields of this + * structure. * * lock_type: Whether the lock is unused, a reader, or a writer. * * writers_cv: Condition variable used for waiting writers. * - * write_thread: The thread that owns a write lock, which is recursive - * for that thread. + * write_thread: The thread that owns a write lock, which is recursive for + * that thread. * - * rec_write_lock_count: The # of recursive write locks outstanding - * for the thread that owns the write lock. + * rec_write_lock_count: The # of recursive write locks outstanding for the + * thread that owns the write lock. * * waiting_writers_count: The count of waiting writers. * @@ -171,28 +170,27 @@ typedef struct H5TS_rw_lock_stats_t { * * reader_thread_count: The # of threads holding a read lock. * - * rec_read_lock_count_key: Instance of thread-local key used to maintain - * a thread-specific recursive lock count for each thread - * holding a read lock. + * rec_read_lock_count_key: Instance of thread-local key used to maintain a + * recursive lock count for each thread holding a read lock. * - * is_key_registered: Flag to track if the rec_read_lock_count_key has been + * is_key_registered: Flag to track if the read_lock_count_key has been * registered yet for a lock. * - * stats: Instance of H5TS_rw_lock_stats_t used to track - * statistics on the recursive R/W lock. + * stats: Instance of H5TS_rec_rwlock_stats_t used to track statistics + * on the lock. * ******************************************************************************/ typedef enum { - H5TS_RW_LOCK_UNUSED = 0, /* Lock is currently unused */ - H5TS_RW_LOCK_WRITE, /* Lock is a recursive write lock */ - H5TS_RW_LOCK_READ /* Lock is a recursive read lock */ -} H5TS_rw_lock_type_t; + H5TS_REC_RWLOCK_UNUSED = 0, /* Lock is currently unused */ + H5TS_REC_RWLOCK_WRITE, /* Lock is a recursive write lock */ + H5TS_REC_RWLOCK_READ /* Lock is a recursive read lock */ +} H5TS_rec_rwlock_type_t; -typedef struct H5TS_rw_lock_t { +typedef struct H5TS_rec_rwlock_t { /* General fields */ - H5TS_mutex_t mutex; - H5TS_rw_lock_type_t lock_type; + H5TS_mutex_t mutex; + H5TS_rec_rwlock_type_t lock_type; /* Writer fields */ H5TS_cond_t writers_cv; @@ -206,11 +204,11 @@ typedef struct H5TS_rw_lock_t { H5TS_key_t rec_read_lock_count_key; bool is_key_registered; -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* Stats */ - H5TS_rw_lock_stats_t stats; + H5TS_rec_rwlock_stats_t stats; #endif -} H5TS_rw_lock_t; +} H5TS_rec_rwlock_t; /*****************************/ /* Package Private Variables */ @@ -237,11 +235,12 @@ H5_DLL herr_t H5TS__tinfo_term(void); #endif /* H5_HAVE_THREADSAFE */ /* Recursive R/W lock related function declarations */ -H5_DLL herr_t H5TS__rw_lock_init(H5TS_rw_lock_t *rw_lock); -H5_DLL herr_t H5TS__rw_rdlock(H5TS_rw_lock_t *rw_lock); -H5_DLL herr_t H5TS__rw_wrlock(H5TS_rw_lock_t *rw_lock); -H5_DLL herr_t H5TS__rw_unlock(H5TS_rw_lock_t *rw_lock); -H5_DLL herr_t H5TS__rw_lock_destroy(H5TS_rw_lock_t *rw_lock); +H5_DLL herr_t H5TS__rec_rwlock_init(H5TS_rec_rwlock_t *lock); +H5_DLL herr_t H5TS__rec_rwlock_rdlock(H5TS_rec_rwlock_t *lock); +H5_DLL herr_t H5TS__rec_rwlock_wrlock(H5TS_rec_rwlock_t *lock); +H5_DLL herr_t H5TS__rec_rwlock_rdunlock(H5TS_rec_rwlock_t *lock); +H5_DLL herr_t H5TS__rec_rwlock_wrunlock(H5TS_rec_rwlock_t *lock); +H5_DLL herr_t H5TS__rec_rwlock_destroy(H5TS_rec_rwlock_t *lock); /* 'once' callbacks */ #ifdef H5_HAVE_THREADSAFE @@ -256,10 +255,10 @@ H5_DLL void H5TS__pthread_first_thread_init(void); #endif #endif /* H5_HAVE_THREADSAFE */ -#if H5TS_ENABLE_REC_RW_LOCK_STATS -H5_DLL herr_t H5TS__rw_lock_get_stats(H5TS_rw_lock_t *rw_lock, H5TS_rw_lock_stats_t *stats); -H5_DLL herr_t H5TS__rw_lock_reset_stats(H5TS_rw_lock_t *rw_lock); -H5_DLL herr_t H5TS__rw_lock_print_stats(const char *header_str, H5TS_rw_lock_stats_t *stats); +#if H5TS_ENABLE_REC_RWLOCK_STATS +H5_DLL herr_t H5TS__rec_rwlock_get_stats(H5TS_rec_rwlock_t *lock, H5TS_rec_rwlock_stats_t *stats); +H5_DLL herr_t H5TS__rec_rwlock_reset_stats(H5TS_rec_rwlock_t *lock); +H5_DLL herr_t H5TS__rec_rwlock_print_stats(const char *header_str, H5TS_rec_rwlock_stats_t *stats); #endif #endif /* H5_HAVE_THREADS */ diff --git a/src/H5TSprivate.h b/src/H5TSprivate.h index ccba5b8c390..d2be12414d6 100644 --- a/src/H5TSprivate.h +++ b/src/H5TSprivate.h @@ -99,7 +99,44 @@ #define H5TS_atomic_fetch_add_uint(obj, arg) atomic_fetch_add((obj), (arg)) #define H5TS_atomic_fetch_sub_uint(obj, arg) atomic_fetch_sub((obj), (arg)) #define H5TS_atomic_destroy_uint(obj) /* void */ -#endif /* H5_HAVE_STDATOMIC_H */ + +/* atomic_voidp */ +#define H5TS_atomic_init_voidp(obj, desired) atomic_init((obj), (desired)) +#define H5TS_atomic_exchange_voidp(obj, desired) atomic_exchange((obj), (desired)) +#define H5TS_atomic_compare_exchange_strong_voidp(obj, expected, desired) \ + atomic_compare_exchange_strong((obj), (expected), (desired)) +#define H5TS_atomic_destroy_voidp(obj) /* void */ +#endif /* H5_HAVE_STDATOMIC_H */ + +#if defined(H5_HAVE_STDATOMIC_H) +/* Spinlock operations, built from C11 atomics. Generally follows the example + * here: http://en.cppreference.com/w/cpp/atomic/atomic_flag with some memory + * order improvements. + * + * Note: Pass a pointer to a H5TS_spinlock_t to all the spinlock macros. + * + */ + +/* Initialize the lock */ +#define H5TS_SPINLOCK_INIT(lock) \ + do { \ + *(lock) = ATOMIC_FLAG_INIT; \ + } while (0) + +/* Acquire the lock */ +#define H5TS_SPINLOCK_LOCK(lock) \ + do { \ + while (atomic_flag_test_and_set_explicit(lock, memory_order_acquire)) \ + ; \ + } while (0) + +/* Release the lock */ +#define H5TS_SPINLOCK_UNLOCK(lock) \ + do { \ + atomic_flag_clear_explicit(lock, memory_order_release); \ + } while (0) + +#endif /****************************/ /* Library Private Typedefs */ @@ -113,6 +150,14 @@ typedef struct H5TS_pool_t H5TS_pool_t; /* Portability aliases */ #ifdef H5_HAVE_C11_THREADS + +/* Non-recursive readers/writer lock */ +typedef struct H5TS_rwlock_t { + mtx_t mutex; + cnd_t read_cv, write_cv; + unsigned readers, writers, read_waiters, write_waiters; +} H5TS_rwlock_t; + typedef thrd_t H5TS_thread_t; typedef int (*H5TS_thread_start_func_t)(void *); typedef int H5TS_thread_ret_t; @@ -128,17 +173,19 @@ typedef LPTHREAD_START_ROUTINE H5TS_thread_start_func_t; typedef DWORD H5TS_thread_ret_t; typedef DWORD H5TS_key_t; typedef CRITICAL_SECTION H5TS_CAPABILITY("mutex") H5TS_mutex_t; +typedef SRWLOCK H5TS_rwlock_t; typedef CONDITION_VARIABLE H5TS_cond_t; typedef INIT_ONCE H5TS_once_t; typedef PINIT_ONCE_FN H5TS_once_init_func_t; #else typedef pthread_t H5TS_thread_t; typedef void *(*H5TS_thread_start_func_t)(void *); -typedef void *H5TS_thread_ret_t; -typedef pthread_key_t H5TS_key_t; -typedef pthread_mutex_t H5TS_CAPABILITY("mutex") H5TS_mutex_t; -typedef pthread_cond_t H5TS_cond_t; -typedef pthread_once_t H5TS_once_t; +typedef void *H5TS_thread_ret_t; +typedef pthread_key_t H5TS_key_t; +typedef pthread_mutex_t H5TS_CAPABILITY("mutex") H5TS_mutex_t; +typedef pthread_rwlock_t H5TS_rwlock_t; +typedef pthread_cond_t H5TS_cond_t; +typedef pthread_once_t H5TS_once_t; typedef void (*H5TS_once_init_func_t)(void); #endif #endif @@ -147,6 +194,12 @@ typedef void (*H5TS_once_init_func_t)(void); #if defined(H5_HAVE_STDATOMIC_H) && !defined(__cplusplus) typedef atomic_int H5TS_atomic_int_t; typedef atomic_uint H5TS_atomic_uint_t; +/* Suppress warning about _Atomic keyword not supported in C99 */ +H5_GCC_DIAG_OFF("c99-c11-compat") +H5_CLANG_DIAG_OFF("c11-extensions") +typedef void *_Atomic H5TS_atomic_voidp_t; +H5_GCC_DIAG_ON("c99-c11-compat") +H5_CLANG_DIAG_ON("c11-extensions") #else typedef struct { H5TS_mutex_t mutex; @@ -156,6 +209,10 @@ typedef struct { H5TS_mutex_t mutex; unsigned value; } H5TS_atomic_uint_t; +typedef struct { + H5TS_mutex_t mutex; + void *value; +} H5TS_atomic_voidp_t; #endif /* Thread Barrier */ @@ -203,6 +260,11 @@ typedef struct H5TS_semaphore_t { } H5TS_semaphore_t; #endif +#if defined(H5_HAVE_STDATOMIC_H) && !defined(__cplusplus) +/* Spinlock, built from C11 atomic_flag */ +typedef atomic_flag H5TS_spinlock_t; +#endif + /*****************************/ /* Library-private Variables */ /*****************************/ @@ -234,6 +296,15 @@ H5_DLL herr_t H5TS_mutex_init(H5TS_mutex_t *mutex, int type); H5_DLL herr_t H5TS_mutex_trylock(H5TS_mutex_t *mutex, bool *acquired) H5TS_TRY_ACQUIRE(SUCCEED, *mutex); H5_DLL herr_t H5TS_mutex_destroy(H5TS_mutex_t *mutex); +/* R/W locks */ +H5_DLL herr_t H5TS_rwlock_init(H5TS_rwlock_t *lock); +/* R/W lock & unlock calls are defined in H5TSrwlock.h */ +static inline herr_t H5TS_rwlock_rdlock(H5TS_rwlock_t *lock); +static inline herr_t H5TS_rwlock_rdunlock(H5TS_rwlock_t *lock); +static inline herr_t H5TS_rwlock_wrlock(H5TS_rwlock_t *lock); +static inline herr_t H5TS_rwlock_wrunlock(H5TS_rwlock_t *lock); +H5_DLL herr_t H5TS_rwlock_destroy(H5TS_rwlock_t *lock); + /* Condition variable operations */ H5_DLL herr_t H5TS_cond_init(H5TS_cond_t *cond); /* Condition variable wait, signal, broadcast calls are defined in H5TScond.h */ @@ -275,6 +346,14 @@ static inline void H5TS_atomic_store_uint(H5TS_atomic_uint_t *obj, unsigned static inline unsigned H5TS_atomic_fetch_add_uint(H5TS_atomic_uint_t *obj, unsigned arg); static inline unsigned H5TS_atomic_fetch_sub_uint(H5TS_atomic_uint_t *obj, unsigned arg); H5_DLL void H5TS_atomic_destroy_uint(H5TS_atomic_uint_t *obj); + +/* void * _Atomic (atomic void pointer) */ +H5_DLL void H5TS_atomic_init_voidp(H5TS_atomic_voidp_t *obj, void *desired); +/* Atomic 'void *' load, store, etc. calls are defined in H5TSatomic.h */ +static inline void *H5TS_atomic_exchange_voidp(H5TS_atomic_voidp_t *obj, void *desired); +static inline bool H5TS_atomic_compare_exchange_strong_voidp(H5TS_atomic_voidp_t *obj, void **expected, + void *desired); +H5_DLL void H5TS_atomic_destroy_voidp(H5TS_atomic_voidp_t *obj); #endif /* H5_HAVE_STDATOMIC_H */ /* Barrier related function declarations */ @@ -297,6 +376,7 @@ H5_DLL herr_t H5TS_semaphore_destroy(H5TS_semaphore_t *sem); #include "H5TSatomic.h" #endif /* H5_HAVE_STDATOMIC_H */ #include "H5TSbarrier.h" +#include "H5TSrwlock.h" #include "H5TSsemaphore.h" #include "H5TSpool.h" diff --git a/src/H5TSrec_rwlock.c b/src/H5TSrec_rwlock.c new file mode 100644 index 00000000000..e191aa3a290 --- /dev/null +++ b/src/H5TSrec_rwlock.c @@ -0,0 +1,745 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: This file contains support for recursive R/W locks, equivalent to + * the pthread 'pthread_rwlock_t' type and capabilities, except that + * threads that hold write access for the lock are allowed to acquire + * write access again (and must match each lock with an unlock operation). + * + * Note: Because this threadsafety framework operates outside the library, + * it does not use the error stack (although it does use error macros + * that don't push errors on a stack) and only uses the "namecheck only" + * FUNC_ENTER_* / FUNC_LEAVE_* macros. + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5TSmodule.h" /* This source code file is part of the H5TS module */ + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5TSpkg.h" /* Threadsafety */ + +#ifdef H5_HAVE_THREADS + +/****************/ +/* Local Macros */ +/****************/ + +/******************/ +/* Local Typedefs */ +/******************/ + +/* + * Count of the number of active [recursive] read lock calls for a given thread. + * The # of readers for the lock in question is decremented when the recursive + * read lock count drops to zero. + */ +typedef int64_t H5TS_rec_entry_count_t; + +/********************/ +/* Local Prototypes */ +/********************/ + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +#if H5TS_ENABLE_REC_RWLOCK_STATS +/*-------------------------------------------------------------------------- + * Function: H5TS__update_stats_rdlock + * + * Purpose: Update stats for acquiring a read lock + * + * Return: none + * + *-------------------------------------------------------------------------- + */ +static void +H5TS__update_stats_rdlock(H5TS_rec_rwlock_t *lock, const H5TS_rec_entry_count_t *count) +{ + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + assert(lock); + assert(H5TS_REC_RWLOCK_READ == lock->lock_type); + assert(count); + assert(*count >= 1); + + lock->stats.read_locks_granted++; + + if (*count == 1) { + lock->stats.real_read_locks_granted++; + if (lock->reader_thread_count > lock->stats.max_read_locks) + lock->stats.max_read_locks = lock->reader_thread_count; + } + + if (*count > lock->stats.max_read_lock_recursion_depth) + lock->stats.max_read_lock_recursion_depth = *count; + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS__update_stats_rdlock() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__update_stats_rd_lock_delay + * + * Purpose: Update stats for delay in acquiring a read lock + * + * Return: none + * + *-------------------------------------------------------------------------- + */ +static void +H5TS__update_stats_rd_lock_delay(H5TS_rec_rwlock_t *lock) +{ + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + assert(lock); + + lock->stats.read_locks_delayed++; + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS__update_stats_rd_lock_delay() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__update_stats_rd_unlock + * + * Purpose: Update stats for releasing a read lock + * + * Return: none + * + *-------------------------------------------------------------------------- + */ +static void +H5TS__update_stats_rd_unlock(H5TS_rec_rwlock_t *lock, const H5TS_rec_entry_count_t *count) +{ + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + assert(lock); + assert(H5TS_REC_RWLOCK_READ == lock->lock_type); + assert(count); + assert(*count >= 0); + + lock->stats.read_locks_released++; + + if (*count == 0) + lock->stats.real_read_locks_released++; + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS__update_stats_rd_unlock() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__update_stats_wr_lock + * + * Purpose: Update stats for acquiring a write lock + * + * Return: none + * + *-------------------------------------------------------------------------- + */ +static void +H5TS__update_stats_wr_lock(H5TS_rec_rwlock_t *lock) +{ + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + assert(lock); + assert(H5TS_REC_RWLOCK_WRITE == lock->lock_type); + assert(lock->rec_write_lock_count >= 1); + + lock->stats.write_locks_granted++; + + if (lock->rec_write_lock_count == 1) { + lock->stats.real_write_locks_granted++; + if (lock->rec_write_lock_count > lock->stats.max_write_locks) + lock->stats.max_write_locks = lock->rec_write_lock_count; + } + + if (lock->rec_write_lock_count > lock->stats.max_write_lock_recursion_depth) + lock->stats.max_write_lock_recursion_depth = lock->rec_write_lock_count; + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS__update_stats_wr_lock() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__update_stats_wr_lock_delay + * + * Purpose: Update stats for delay in acquiring a write lock + * + * Return: none + * + *-------------------------------------------------------------------------- + */ +static void +H5TS__update_stats_wr_lock_delay(H5TS_rec_rwlock_t *lock) +{ + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + assert(lock); + + lock->stats.write_locks_delayed++; + + if (lock->stats.max_write_locks_pending <= lock->waiting_writers_count) + lock->stats.max_write_locks_pending = lock->waiting_writers_count + 1; + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS__update_stats_wr_lock_delay() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__update_stats_wr_unlock + * + * Purpose: Update stats for releasing a write lock + * + * Return: none + * + *-------------------------------------------------------------------------- + */ +static void +H5TS__update_stats_wr_unlock(H5TS_rec_rwlock_t *lock) +{ + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + assert(lock); + assert(H5TS_REC_RWLOCK_WRITE == lock->lock_type); + assert(lock->rec_write_lock_count >= 0); + + lock->stats.write_locks_released++; + + if (lock->rec_write_lock_count == 0) + lock->stats.real_write_locks_released++; + + FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY +} /* end H5TS__update_stats_wr_unlock() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_get_stats + * + * Purpose: Obtain a copy of the current statistics for a recursive + * read / write lock. + * + * Note: To obtain a consistent set of statistics, the function must + * obtain the lock mutex. + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_get_stats(H5TS_rec_rwlock_t *lock, H5TS_rec_rwlock_stats_t *stats) +{ + bool have_mutex = false; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == lock || NULL == stats)) + HGOTO_DONE(FAIL); + + /* Acquire the mutex */ + if (H5_UNLIKELY(H5TS_mutex_lock(&lock->mutex))) + HGOTO_DONE(FAIL); + have_mutex = true; + + /* Copy R/W lock stats */ + *stats = lock->stats; + +done: + if (H5_LIKELY(have_mutex)) + if (H5_UNLIKELY(H5TS_mutex_unlock(&lock->mutex) < 0)) + ret_value = FAIL; + + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_get_stats() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_reset_stats + * + * Purpose: Reset the statistics for the supplied recursive read / write + * lock. + * + * Note: To obtain a consistent set of statistics, the function must + * obtain the lock mutex. + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_reset_stats(H5TS_rec_rwlock_t *lock) +{ + bool have_mutex = false; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == lock)) + HGOTO_DONE(FAIL); + + /* Acquire the mutex */ + if (H5_UNLIKELY(H5TS_mutex_lock(&lock->mutex) < 0)) + HGOTO_DONE(FAIL); + have_mutex = true; + + /* Reset stats */ + memset(&lock->stats, 0, sizeof(lock->stats)); + +done: + if (H5_LIKELY(have_mutex)) + if (H5_UNLIKELY(H5TS_mutex_unlock(&lock->mutex) < 0)) + ret_value = FAIL; + + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_reset_stats() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_print_stats + * + * Purpose: Print statistics for the supplied recursive R/W lock. + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_print_stats(const char *header_str, H5TS_rec_rwlock_stats_t *stats) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == header_str || NULL == stats)) + HGOTO_DONE(FAIL); + + fprintf(stdout, "\n\n%s\n\n", header_str); + fprintf(stdout, " read_locks_granted = %" PRId64 "\n", stats->read_locks_granted); + fprintf(stdout, " read_locks_released = %" PRId64 "\n", stats->read_locks_released); + fprintf(stdout, " real_read_locks_granted = %" PRId64 "\n", stats->real_read_locks_granted); + fprintf(stdout, " real_read_locks_released = %" PRId64 "\n", stats->real_read_locks_released); + fprintf(stdout, " max_read_locks = %" PRId64 "\n", stats->max_read_locks); + fprintf(stdout, " max_read_lock_recursion_depth = %" PRId64 "\n", stats->max_read_lock_recursion_depth); + fprintf(stdout, " read_locks_delayed = %" PRId64 "\n", stats->read_locks_delayed); + fprintf(stdout, " write_locks_granted = %" PRId64 "\n", stats->write_locks_granted); + fprintf(stdout, " write_locks_released = %" PRId64 "\n", stats->write_locks_released); + fprintf(stdout, " real_write_locks_granted = %" PRId64 "\n", stats->real_write_locks_granted); + fprintf(stdout, " real_write_locks_released = %" PRId64 "\n", stats->real_write_locks_released); + fprintf(stdout, " max_write_locks = %" PRId64 "\n", stats->max_write_locks); + fprintf(stdout, " max_write_lock_recursion_depth = %" PRId64 "\n", + stats->max_write_lock_recursion_depth); + fprintf(stdout, " write_locks_delayed = %" PRId64 "\n", stats->write_locks_delayed); + fprintf(stdout, " max_write_locks_pending = %" PRId64 "\n\n", stats->max_write_locks_pending); + +done: + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_print_stats() */ +#endif /* H5TS_ENABLE_REC_RWLOCK_STATS */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_init + * + * Purpose: Initialize the supplied instance of H5TS_rec_rwlock_t. + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_init(H5TS_rec_rwlock_t *lock) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == lock)) + HGOTO_DONE(FAIL); + +#ifdef H5_HAVE_WIN_THREADS + /* The current H5TS_rec_rwlock_t implementation uses H5TS_key_create() with a + * key destructor callback, which is not [currently] supported by Windows. + */ + HGOTO_DONE(FAIL); +#else + /* Initialize the lock */ + memset(lock, 0, sizeof(*lock)); + HDcompile_assert(H5TS_REC_RWLOCK_UNUSED == 0); + if (H5_UNLIKELY(H5TS_mutex_init(&lock->mutex, H5TS_MUTEX_TYPE_PLAIN) < 0)) + HGOTO_DONE(FAIL); + if (H5_UNLIKELY(H5TS_cond_init(&lock->writers_cv) < 0)) + HGOTO_DONE(FAIL); + if (H5_UNLIKELY(H5TS_cond_init(&lock->readers_cv) < 0)) + HGOTO_DONE(FAIL); +#endif + +done: + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_init() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_destroy + * + * Purpose: Take down an instance of H5TS_rec_rwlock_t. All mutex, condition + * variables, and keys are destroyed. However, the instance of + * H5TS_rec_rwlock_t is not freed. + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_destroy(H5TS_rec_rwlock_t *lock) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == lock)) + HGOTO_DONE(FAIL); + + /* Call the appropriate destroy routines. We are committed + * to the destroy at this point, so call them all, even if one fails + * along the way. + */ + if (H5_UNLIKELY(H5TS_mutex_destroy(&lock->mutex) < 0)) + ret_value = FAIL; + if (H5_UNLIKELY(H5TS_cond_destroy(&lock->readers_cv) < 0)) + ret_value = FAIL; + if (H5_UNLIKELY(H5TS_cond_destroy(&lock->writers_cv) < 0)) + ret_value = FAIL; + if (lock->is_key_registered) + if (H5_UNLIKELY(H5TS_key_delete(lock->rec_read_lock_count_key) < 0)) + ret_value = FAIL; + +done: + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_destroy() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_rdlock + * + * Purpose: Attempt to obtain a read lock on the associated recursive + * read / write lock. + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_rdlock(H5TS_rec_rwlock_t *lock) +{ + H5TS_rec_entry_count_t *count; + H5TS_thread_t my_thread = H5TS_thread_self(); + bool have_mutex = false; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == lock)) + HGOTO_DONE(FAIL); + + /* Acquire the mutex */ + if (H5_UNLIKELY(H5TS_mutex_lock(&lock->mutex) < 0)) + HGOTO_DONE(FAIL); + have_mutex = true; + + /* Fail if attempting to acquire a read lock on a thread that holds + * a write lock + */ + if (H5_UNLIKELY(H5TS_REC_RWLOCK_WRITE == lock->lock_type && + H5TS_thread_equal(my_thread, lock->write_thread))) + HGOTO_DONE(FAIL); + + /* If there is no thread-specific data for this thread, set it up */ + if (!lock->is_key_registered) { + if (H5_UNLIKELY(H5TS_key_create(&lock->rec_read_lock_count_key, free) < 0)) + HGOTO_DONE(FAIL); + lock->is_key_registered = true; + count = NULL; + } + else if (H5_UNLIKELY(H5TS_key_get_value(lock->rec_read_lock_count_key, (void **)&count) < 0)) + HGOTO_DONE(FAIL); + if (NULL == count) { + if (H5_UNLIKELY(NULL == (count = calloc(1, sizeof(*count))))) + HGOTO_DONE(FAIL); + if (H5_UNLIKELY(H5TS_key_set_value(lock->rec_read_lock_count_key, (void *)count) < 0)) + HGOTO_DONE(FAIL); + } + + if (*count > 0) { /* This is a recursive lock */ + assert(H5TS_REC_RWLOCK_READ == lock->lock_type); + assert(lock->reader_thread_count > 0 && lock->rec_write_lock_count == 0); + } + else { /* This is an initial read lock request, on this thread */ + /* Readers defer to current or pending writers */ + if (H5TS_REC_RWLOCK_WRITE == lock->lock_type) { +#if H5TS_ENABLE_REC_RWLOCK_STATS + H5TS__update_stats_rd_lock_delay(lock); +#endif + + do { + if (H5_UNLIKELY(H5TS_cond_wait(&lock->readers_cv, &lock->mutex) < 0)) + HGOTO_DONE(FAIL); + } while (H5TS_REC_RWLOCK_WRITE == lock->lock_type); + } + + /* Set counter's lock type (which might already be set) & increment + * number of reader threads + */ + lock->lock_type = H5TS_REC_RWLOCK_READ; + lock->reader_thread_count++; + } + + /* Increment read lock count for this thread */ + (*count)++; +#if H5TS_ENABLE_REC_RWLOCK_STATS + H5TS__update_stats_rdlock(lock, count); +#endif + +done: + if (H5_LIKELY(have_mutex)) + if (H5_UNLIKELY(H5TS_mutex_unlock(&lock->mutex) < 0)) + ret_value = FAIL; + + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_rdlock() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_wrlock + * + * Purpose: Attempt to obtain a write lock on the associated recursive + * read / write lock. + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_wrlock(H5TS_rec_rwlock_t *lock) +{ + H5TS_thread_t my_thread = H5TS_thread_self(); + bool have_mutex = false; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == lock)) + HGOTO_DONE(FAIL); + + /* Acquire the mutex */ + if (H5_UNLIKELY(H5TS_mutex_lock(&lock->mutex) < 0)) + HGOTO_DONE(FAIL); + have_mutex = true; + + /* Check for initial write lock request on this thread */ + if (H5TS_REC_RWLOCK_WRITE != lock->lock_type || !H5TS_thread_equal(my_thread, lock->write_thread)) { + /* Fail if attempting to acquire a write lock on a thread that holds + * a read lock + */ + if (H5TS_REC_RWLOCK_READ == lock->lock_type) { + H5TS_rec_entry_count_t *count; + + /* Sanity check */ + assert(lock->is_key_registered); + + /* Fail if read lock count for this thread is > 0 */ + if (H5_UNLIKELY(H5TS_key_get_value(lock->rec_read_lock_count_key, (void **)&count) < 0)) + HGOTO_DONE(FAIL); + if (H5_UNLIKELY(NULL != count && *count > 0)) + HGOTO_DONE(FAIL); + } + + /* If lock is already held, wait to acquire it */ + if (H5TS_REC_RWLOCK_UNUSED != lock->lock_type) { +#if H5TS_ENABLE_REC_RWLOCK_STATS + H5TS__update_stats_wr_lock_delay(lock); +#endif + + do { + int result; + + lock->waiting_writers_count++; + result = H5TS_cond_wait(&lock->writers_cv, &lock->mutex); + lock->waiting_writers_count--; + if (H5_UNLIKELY(result != 0)) + HGOTO_DONE(FAIL); + } while (H5TS_REC_RWLOCK_UNUSED != lock->lock_type); + } + + /* Set lock type & owner thread */ + lock->lock_type = H5TS_REC_RWLOCK_WRITE; + lock->write_thread = my_thread; + } + + /* Increment write lock count for this thread */ + lock->rec_write_lock_count++; +#if H5TS_ENABLE_REC_RWLOCK_STATS + H5TS__update_stats_wr_lock(lock); +#endif + +done: + if (H5_LIKELY(have_mutex)) + if (H5_UNLIKELY(H5TS_mutex_unlock(&lock->mutex) < 0)) + ret_value = FAIL; + + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_wrlock() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_rdunlock + * + * Purpose: Attempt to unlock a read lock. + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_rdunlock(H5TS_rec_rwlock_t *lock) +{ + H5TS_rec_entry_count_t *count; + bool have_mutex = false; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == lock)) + HGOTO_DONE(FAIL); + + /* Acquire the mutex */ + if (H5_UNLIKELY(H5TS_mutex_lock(&lock->mutex) < 0)) + HGOTO_DONE(FAIL); + have_mutex = true; + + /* Error check */ + if (H5_UNLIKELY(H5TS_REC_RWLOCK_READ != lock->lock_type)) + HGOTO_DONE(FAIL); + + /* Sanity and error checks */ + assert(lock->is_key_registered); + assert(lock->reader_thread_count > 0); + assert(0 == lock->rec_write_lock_count); + if (H5_UNLIKELY(H5TS_key_get_value(lock->rec_read_lock_count_key, (void **)&count) < 0)) + HGOTO_DONE(FAIL); + if (H5_UNLIKELY(NULL == count)) + HGOTO_DONE(FAIL); + assert(*count > 0); + + /* Decrement recursive lock count for this thread */ + (*count)--; +#if H5TS_ENABLE_REC_RWLOCK_STATS + H5TS__update_stats_rd_unlock(lock, count); +#endif + + /* Check if this thread is releasing its last read lock */ + if (0 == *count) { + /* Decrement the # of threads with a read lock */ + lock->reader_thread_count--; + + /* Check if lock is unused now */ + if (0 == lock->reader_thread_count) { + lock->lock_type = H5TS_REC_RWLOCK_UNUSED; + + /* Indicate that lock is unused now */ + /* Prioritize pending writers if there are any */ + if (lock->waiting_writers_count > 0) { + if (H5_UNLIKELY(H5TS_cond_signal(&lock->writers_cv) < 0)) + HGOTO_DONE(FAIL); + } + else { + if (H5_UNLIKELY(H5TS_cond_broadcast(&lock->readers_cv) < 0)) + HGOTO_DONE(FAIL); + } + } + } + +done: + if (H5_LIKELY(have_mutex)) + if (H5_UNLIKELY(H5TS_mutex_unlock(&lock->mutex) < 0)) + ret_value = FAIL; + + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_rdunlock() */ + +/*-------------------------------------------------------------------------- + * Function: H5TS__rec_rwlock_wrunlock + * + * Purpose: Attempt to unlock a write lock + * + * Return: Non-negative on success / Negative on failure + * + *-------------------------------------------------------------------------- + */ +herr_t +H5TS__rec_rwlock_wrunlock(H5TS_rec_rwlock_t *lock) +{ + bool have_mutex = false; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NAMECHECK_ONLY + + if (H5_UNLIKELY(NULL == lock)) + HGOTO_DONE(FAIL); + + /* Acquire the mutex */ + if (H5_UNLIKELY(H5TS_mutex_lock(&lock->mutex) < 0)) + HGOTO_DONE(FAIL); + have_mutex = true; + + /* Error check */ + if (H5_UNLIKELY(H5TS_REC_RWLOCK_WRITE != lock->lock_type)) + HGOTO_DONE(FAIL); + + /* Sanity checks */ + assert(0 == lock->reader_thread_count); + assert(lock->rec_write_lock_count > 0); + + /* Decrement recursive lock count */ + lock->rec_write_lock_count--; +#if H5TS_ENABLE_REC_RWLOCK_STATS + H5TS__update_stats_wr_unlock(lock); +#endif + + /* Check if lock is unused now */ + if (0 == lock->rec_write_lock_count) { + lock->lock_type = H5TS_REC_RWLOCK_UNUSED; + + /* Indicate that lock is unused now */ + /* Prioritize pending writers if there are any */ + if (lock->waiting_writers_count > 0) { + if (H5_UNLIKELY(H5TS_cond_signal(&lock->writers_cv) < 0)) + HGOTO_DONE(FAIL); + } + else { + if (H5_UNLIKELY(H5TS_cond_broadcast(&lock->readers_cv) < 0)) + HGOTO_DONE(FAIL); + } + } + +done: + if (H5_LIKELY(have_mutex)) + if (H5_UNLIKELY(H5TS_mutex_unlock(&lock->mutex) < 0)) + ret_value = FAIL; + + FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) +} /* end H5TS__rec_rwlock_wrunlock() */ + +#endif /* H5_HAVE_THREADS */ diff --git a/src/H5TSrwlock.c b/src/H5TSrwlock.c index 43bac524c17..d8f772318a5 100644 --- a/src/H5TSrwlock.c +++ b/src/H5TSrwlock.c @@ -11,10 +11,8 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Purpose: This file contains support for recursive R/W locks, equivalent to - * the pthread 'pthread_rwlock_t' type and capabilities, except that - * threads that hold write access for the lock are allowed to acquire - * write access again (and must match each lock with an unlock operation). + * Purpose: This file contains support for non-recursive R/W locks, equivalent + * to the pthread 'pthread_rwlock_t' type and capabilities. * * Note: Because this threadsafety framework operates outside the library, * it does not use the error stack (although it does use error macros @@ -45,28 +43,6 @@ /* Local Typedefs */ /******************/ -/****************************************************************************** - * - * Structure H5TS_rec_entry_count_t; - * - * Structure associated with the rec_read_lock_count_key defined in - * H5TS_rw_lock_t. - * - * This structure maintains a count of recursive read locks so that the lock can - * be decremented when the thread-specific count drops to zero. - * - * Individual fields are: - * - * rec_lock_count: Count of the number of active [recursive] read lock calls - * for a given thread. The # of readers for the lock in question - * is decremented when the recursive read lock count drops to zero. - * - ******************************************************************************/ - -typedef struct H5TS_rec_entry_count { - int64_t rec_lock_count; -} H5TS_rec_entry_count_t; - /********************/ /* Local Prototypes */ /********************/ @@ -83,635 +59,183 @@ typedef struct H5TS_rec_entry_count { /* Local Variables */ /*******************/ -#if H5TS_ENABLE_REC_RW_LOCK_STATS -/*-------------------------------------------------------------------------- - * Function: H5TS__update_stats_rd_lock - * - * Purpose: Update stats for acquiring a read lock - * - * Return: none - * - *-------------------------------------------------------------------------- - */ -static void -H5TS__update_stats_rd_lock(H5TS_rw_lock_t *rw_lock, const H5TS_rec_entry_count_t *count) -{ - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY - - assert(rw_lock); - assert(H5TS_RW_LOCK_READ == rw_lock->lock_type); - assert(count); - assert(count->rec_lock_count >= 1); - - rw_lock->stats.read_locks_granted++; - - if (count->rec_lock_count == 1) { - rw_lock->stats.real_read_locks_granted++; - if (rw_lock->reader_thread_count > rw_lock->stats.max_read_locks) - rw_lock->stats.max_read_locks = rw_lock->reader_thread_count; - } - - if (count->rec_lock_count > rw_lock->stats.max_read_lock_recursion_depth) - rw_lock->stats.max_read_lock_recursion_depth = count->rec_lock_count; - - FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY -} /* end H5TS__update_stats_rd_lock() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__update_stats_rd_lock_delay - * - * Purpose: Update stats for delay in acquiring a read lock - * - * Return: none - * - *-------------------------------------------------------------------------- - */ -static void -H5TS__update_stats_rd_lock_delay(H5TS_rw_lock_t *rw_lock) -{ - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY - - assert(rw_lock); - - rw_lock->stats.read_locks_delayed++; - - FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY -} /* end H5TS__update_stats_rd_lock_delay() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__update_stats_rd_unlock +#ifdef H5_HAVE_C11_THREADS +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_init * - * Purpose: Update stats for releasing a read lock + * Purpose: Initialize a H5TS_rwlock_t (does not allocate it) * - * Return: none + * Return: Non-negative on success / Negative on failure * - *-------------------------------------------------------------------------- - */ -static void -H5TS__update_stats_rd_unlock(H5TS_rw_lock_t *rw_lock, const H5TS_rec_entry_count_t *count) -{ - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY - - assert(rw_lock); - assert(H5TS_RW_LOCK_READ == rw_lock->lock_type); - assert(count); - assert(count->rec_lock_count >= 0); - - rw_lock->stats.read_locks_released++; - - if (count->rec_lock_count == 0) - rw_lock->stats.real_read_locks_released++; - - FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY -} /* end H5TS__update_stats_rd_unlock() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__update_stats_wr_lock - * - * Purpose: Update stats for acquiring a write lock - * - * Return: none - * - *-------------------------------------------------------------------------- - */ -static void -H5TS__update_stats_wr_lock(H5TS_rw_lock_t *rw_lock) -{ - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY - - assert(rw_lock); - assert(H5TS_RW_LOCK_WRITE == rw_lock->lock_type); - assert(rw_lock->rec_write_lock_count >= 1); - - rw_lock->stats.write_locks_granted++; - - if (rw_lock->rec_write_lock_count == 1) { - rw_lock->stats.real_write_locks_granted++; - if (rw_lock->rec_write_lock_count > rw_lock->stats.max_write_locks) - rw_lock->stats.max_write_locks = rw_lock->rec_write_lock_count; - } - - if (rw_lock->rec_write_lock_count > rw_lock->stats.max_write_lock_recursion_depth) - rw_lock->stats.max_write_lock_recursion_depth = rw_lock->rec_write_lock_count; - - FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY -} /* end H5TS__update_stats_wr_lock() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__update_stats_wr_lock_delay - * - * Purpose: Update stats for delay in acquiring a write lock - * - * Return: none - * - *-------------------------------------------------------------------------- - */ -static void -H5TS__update_stats_wr_lock_delay(H5TS_rw_lock_t *rw_lock) -{ - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY - - assert(rw_lock); - - rw_lock->stats.write_locks_delayed++; - - if (rw_lock->stats.max_write_locks_pending <= rw_lock->waiting_writers_count) - rw_lock->stats.max_write_locks_pending = rw_lock->waiting_writers_count + 1; - - FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY -} /* end H5TS__update_stats_wr_lock_delay() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__update_stats_wr_unlock - * - * Purpose: Update stats for releasing a write lock - * - * Return: none - * - *-------------------------------------------------------------------------- - */ -static void -H5TS__update_stats_wr_unlock(H5TS_rw_lock_t *rw_lock) -{ - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY - - assert(rw_lock); - assert(H5TS_RW_LOCK_WRITE == rw_lock->lock_type); - assert(rw_lock->rec_write_lock_count >= 0); - - rw_lock->stats.write_locks_released++; - - if (rw_lock->rec_write_lock_count == 0) - rw_lock->stats.real_write_locks_released++; - - FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY -} /* end H5TS__update_stats_wr_unlock() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__rw_lock_get_stats - * - * Purpose: Obtain a copy of the current statistics for a recursive - * read / write lock. - * - * Note: To obtain a consistent set of statistics, the function must - * obtain the lock mutex. - * - * Return: Non-negative on success / Negative on failure - * - *-------------------------------------------------------------------------- + *------------------------------------------------------------------------- */ herr_t -H5TS__rw_lock_get_stats(H5TS_rw_lock_t *rw_lock, H5TS_rw_lock_stats_t *stats) +H5TS_rwlock_init(H5TS_rwlock_t *lock) { - bool have_mutex = false; - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + FUNC_ENTER_NOAPI_NAMECHECK_ONLY - if (H5_UNLIKELY(NULL == rw_lock || NULL == stats)) + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) HGOTO_DONE(FAIL); - /* Acquire the mutex */ - if (H5_UNLIKELY(H5TS_mutex_lock(&rw_lock->mutex))) + /* Initialize synchronization primitives */ + if (H5_UNLIKELY(mtx_init(&lock->mutex, mtx_plain) != thrd_success)) HGOTO_DONE(FAIL); - have_mutex = true; - - /* Copy R/W lock stats */ - *stats = rw_lock->stats; - -done: - if (H5_LIKELY(have_mutex)) - if (H5_UNLIKELY(H5TS_mutex_unlock(&rw_lock->mutex) < 0)) - ret_value = FAIL; - - FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) -} /* end H5TS__rw_lock_get_stats() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__rw_lock_reset_stats - * - * Purpose: Reset the statistics for the supplied recursive read / write - * lock. - * - * Note: To obtain a consistent set of statistics, the function must - * obtain the lock mutex. - * - * Return: Non-negative on success / Negative on failure - * - *-------------------------------------------------------------------------- - */ -herr_t -H5TS__rw_lock_reset_stats(H5TS_rw_lock_t *rw_lock) -{ - bool have_mutex = false; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY - - if (H5_UNLIKELY(NULL == rw_lock)) + if (H5_UNLIKELY(cnd_init(&lock->read_cv) != thrd_success)) HGOTO_DONE(FAIL); - - /* Acquire the mutex */ - if (H5_UNLIKELY(H5TS_mutex_lock(&rw_lock->mutex) < 0)) + if (H5_UNLIKELY(cnd_init(&lock->write_cv) != thrd_success)) HGOTO_DONE(FAIL); - have_mutex = true; - /* Reset stats */ - memset(&rw_lock->stats, 0, sizeof(rw_lock->stats)); + /* Initialize scalar fields */ + lock->readers = 0; + lock->writers = 0; + lock->read_waiters = 0; + lock->write_waiters = 0; done: - if (H5_LIKELY(have_mutex)) - if (H5_UNLIKELY(H5TS_mutex_unlock(&rw_lock->mutex) < 0)) - ret_value = FAIL; - FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) -} /* end H5TS__rw_lock_reset_stats() */ +} /* end H5TS_rwlock_init() */ -/*-------------------------------------------------------------------------- - * Function: H5TS__rw_lock_print_stats +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_destroy * - * Purpose: Print the supplied pthresds recursive R/W lock statistics. + * Purpose: Destroy a H5TS_rwlock_t (does not free it) * - * Return: Non-negative on success / Negative on failure + * Return: Non-negative on success / Negative on failure * - *-------------------------------------------------------------------------- + *------------------------------------------------------------------------- */ herr_t -H5TS__rw_lock_print_stats(const char *header_str, H5TS_rw_lock_stats_t *stats) +H5TS_rwlock_destroy(H5TS_rwlock_t *lock) { herr_t ret_value = SUCCEED; - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + FUNC_ENTER_NOAPI_NAMECHECK_ONLY - if (H5_UNLIKELY(NULL == header_str || NULL == stats)) + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) HGOTO_DONE(FAIL); - fprintf(stdout, "\n\n%s\n\n", header_str); - fprintf(stdout, " read_locks_granted = %" PRId64 "\n", stats->read_locks_granted); - fprintf(stdout, " read_locks_released = %" PRId64 "\n", stats->read_locks_released); - fprintf(stdout, " real_read_locks_granted = %" PRId64 "\n", stats->real_read_locks_granted); - fprintf(stdout, " real_read_locks_released = %" PRId64 "\n", stats->real_read_locks_released); - fprintf(stdout, " max_read_locks = %" PRId64 "\n", stats->max_read_locks); - fprintf(stdout, " max_read_lock_recursion_depth = %" PRId64 "\n", stats->max_read_lock_recursion_depth); - fprintf(stdout, " read_locks_delayed = %" PRId64 "\n", stats->read_locks_delayed); - fprintf(stdout, " write_locks_granted = %" PRId64 "\n", stats->write_locks_granted); - fprintf(stdout, " write_locks_released = %" PRId64 "\n", stats->write_locks_released); - fprintf(stdout, " real_write_locks_granted = %" PRId64 "\n", stats->real_write_locks_granted); - fprintf(stdout, " real_write_locks_released = %" PRId64 "\n", stats->real_write_locks_released); - fprintf(stdout, " max_write_locks = %" PRId64 "\n", stats->max_write_locks); - fprintf(stdout, " max_write_lock_recursion_depth = %" PRId64 "\n", - stats->max_write_lock_recursion_depth); - fprintf(stdout, " write_locks_delayed = %" PRId64 "\n", stats->write_locks_delayed); - fprintf(stdout, " max_write_locks_pending = %" PRId64 "\n\n", stats->max_write_locks_pending); + /* Destroy synchronization primitives */ + /* NOTE: mtx_destroy() & cnd_destroy() can't fail */ + mtx_destroy(&lock->mutex); + cnd_destroy(&lock->read_cv); + cnd_destroy(&lock->write_cv); done: FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) -} /* end H5TS__rw_lock_print_stats() */ -#endif /* H5TS_ENABLE_REC_RW_LOCK_STATS */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__rw_lock_init +} /* end H5TS_rwlock_destroy() */ +#else +#ifdef H5_HAVE_WIN_THREADS +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_init * - * Purpose: Initialize the supplied instance of H5TS_rw_lock_t. + * Purpose: Initialize a H5TS_rwlock_t (does not allocate it) * - * Return: Non-negative on success / Negative on failure + * Return: Non-negative on success / Negative on failure * - *-------------------------------------------------------------------------- + *------------------------------------------------------------------------- */ herr_t -H5TS__rw_lock_init(H5TS_rw_lock_t *rw_lock) +H5TS_rwlock_init(H5TS_rwlock_t *lock) { herr_t ret_value = SUCCEED; - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + FUNC_ENTER_NOAPI_NAMECHECK_ONLY - if (H5_UNLIKELY(NULL == rw_lock)) + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) HGOTO_DONE(FAIL); -#ifdef H5_HAVE_WIN_THREADS - /* The current H5TS_rw_lock_t implementation uses H5TS_key_create() with a - * key destructor callback, which is not [currently] supported by Windows. - */ - HGOTO_DONE(FAIL); -#else - /* Initialize the lock */ - memset(rw_lock, 0, sizeof(*rw_lock)); - HDcompile_assert(H5TS_RW_LOCK_UNUSED == 0); - if (H5_UNLIKELY(H5TS_mutex_init(&rw_lock->mutex, H5TS_MUTEX_TYPE_PLAIN) < 0)) - HGOTO_DONE(FAIL); - if (H5_UNLIKELY(H5TS_cond_init(&rw_lock->writers_cv) < 0)) - HGOTO_DONE(FAIL); - if (H5_UNLIKELY(H5TS_cond_init(&rw_lock->readers_cv) < 0)) - HGOTO_DONE(FAIL); -#endif + InitializeSRWLock(lock); done: FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) -} /* end H5TS__rw_lock_init() */ +} /* end H5TS_rwlock_init() */ -/*-------------------------------------------------------------------------- - * Function: H5TS__rw_lock_destroy +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_destroy * - * Purpose: Take down an instance of H5TS_rw_lock_t. All mutex, condition - * variables, and keys are destroyed. However, the instance of - * H5TS_rw_lock_t is not freed. + * Purpose: Destroy a H5TS_rwlock_t (does not free it) * - * Return: Non-negative on success / Negative on failure + * Return: Non-negative on success / Negative on failure * - *-------------------------------------------------------------------------- + *------------------------------------------------------------------------- */ herr_t -H5TS__rw_lock_destroy(H5TS_rw_lock_t *rw_lock) +H5TS_rwlock_destroy(H5TS_rwlock_t *lock) { herr_t ret_value = SUCCEED; - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY + FUNC_ENTER_NOAPI_NAMECHECK_ONLY - if (H5_UNLIKELY(NULL == rw_lock)) + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) HGOTO_DONE(FAIL); - /* Call the appropriate destroy routines. We are committed - * to the destroy at this point, so call them all, even if one fails - * along the way. - */ - if (H5_UNLIKELY(H5TS_mutex_destroy(&rw_lock->mutex) < 0)) - ret_value = FAIL; - if (H5_UNLIKELY(H5TS_cond_destroy(&rw_lock->readers_cv) < 0)) - ret_value = FAIL; - if (H5_UNLIKELY(H5TS_cond_destroy(&rw_lock->writers_cv) < 0)) - ret_value = FAIL; - if (rw_lock->is_key_registered) - if (H5_UNLIKELY(H5TS_key_delete(rw_lock->rec_read_lock_count_key) < 0)) - ret_value = FAIL; + /* Destroy synchronization primitives */ + /* SRWLOCKs don't have to be destroyed */ done: FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) -} /* end H5TS__rw_lock_destroy() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__rw_rdlock +} /* end H5TS_rwlock_destroy() */ +#else +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_init * - * Purpose: Attempt to obtain a read lock on the associated recursive - * read / write lock. + * Purpose: Initialize a H5TS_rwlock_t (does not allocate it) * - * Return: Non-negative on success / Negative on failure + * Return: Non-negative on success / Negative on failure * - *-------------------------------------------------------------------------- + *------------------------------------------------------------------------- */ herr_t -H5TS__rw_rdlock(H5TS_rw_lock_t *rw_lock) +H5TS_rwlock_init(H5TS_rwlock_t *lock) { - H5TS_rec_entry_count_t *count; - H5TS_thread_t my_thread = H5TS_thread_self(); - bool have_mutex = false; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_PACKAGE_NAMECHECK_ONLY - - if (H5_UNLIKELY(NULL == rw_lock)) - HGOTO_DONE(FAIL); + herr_t ret_value = SUCCEED; - /* Acquire the mutex */ - if (H5_UNLIKELY(H5TS_mutex_lock(&rw_lock->mutex) < 0)) - HGOTO_DONE(FAIL); - have_mutex = true; + FUNC_ENTER_NOAPI_NAMECHECK_ONLY - /* Fail if attempting to acquire a read lock on a thread that holds - * a write lock - */ - if (H5_UNLIKELY(H5TS_RW_LOCK_WRITE == rw_lock->lock_type && - H5TS_thread_equal(my_thread, rw_lock->write_thread))) + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) HGOTO_DONE(FAIL); - /* If there is no thread-specific data for this thread, set it up */ - if (!rw_lock->is_key_registered) { - if (H5_UNLIKELY(H5TS_key_create(&rw_lock->rec_read_lock_count_key, free) < 0)) - HGOTO_DONE(FAIL); - rw_lock->is_key_registered = true; - count = NULL; - } - else if (H5_UNLIKELY(H5TS_key_get_value(rw_lock->rec_read_lock_count_key, (void **)&count) < 0)) + if (H5_UNLIKELY(pthread_rwlock_init(lock, NULL))) HGOTO_DONE(FAIL); - if (NULL == count) { - if (H5_UNLIKELY(NULL == (count = calloc(1, sizeof(*count))))) - HGOTO_DONE(FAIL); - if (H5_UNLIKELY(H5TS_key_set_value(rw_lock->rec_read_lock_count_key, (void *)count) < 0)) - HGOTO_DONE(FAIL); - } - - if (count->rec_lock_count > 0) { /* This is a recursive lock */ - assert(H5TS_RW_LOCK_READ == rw_lock->lock_type); - assert(rw_lock->reader_thread_count > 0 && rw_lock->rec_write_lock_count == 0); - } - else { /* This is an initial read lock request, on this thread */ - /* Readers defer to current or pending writers */ - if (H5TS_RW_LOCK_WRITE == rw_lock->lock_type) { -#if H5TS_ENABLE_REC_RW_LOCK_STATS - H5TS__update_stats_rd_lock_delay(rw_lock); -#endif - - do { - if (H5_UNLIKELY(H5TS_cond_wait(&rw_lock->readers_cv, &rw_lock->mutex) < 0)) - HGOTO_DONE(FAIL); - } while (H5TS_RW_LOCK_WRITE == rw_lock->lock_type); - } - - /* Set counter's lock type (which might already be set) & increment - * number of reader threads - */ - rw_lock->lock_type = H5TS_RW_LOCK_READ; - rw_lock->reader_thread_count++; - } - - /* Increment read lock count for this thread */ - count->rec_lock_count++; -#if H5TS_ENABLE_REC_RW_LOCK_STATS - H5TS__update_stats_rd_lock(rw_lock, count); -#endif done: - if (H5_LIKELY(have_mutex)) - if (H5_UNLIKELY(H5TS_mutex_unlock(&rw_lock->mutex) < 0)) - ret_value = FAIL; - FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) -} /* end H5TS__rw_rdlock() */ +} /* end H5TS_rwlock_init() */ -/*-------------------------------------------------------------------------- - * Function: H5TS__rw_wrlock +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_destroy * - * Purpose: Attempt to obtain a write lock on the associated recursive - * read / write lock. + * Purpose: Destroy a H5TS_rwlock_t (does not free it) * - * Return: Non-negative on success / Negative on failure + * Return: Non-negative on success / Negative on failure * - *-------------------------------------------------------------------------- + *------------------------------------------------------------------------- */ herr_t -H5TS__rw_wrlock(H5TS_rw_lock_t *rw_lock) +H5TS_rwlock_destroy(H5TS_rwlock_t *lock) { - H5TS_thread_t my_thread = H5TS_thread_self(); - bool have_mutex = false; - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI_NAMECHECK_ONLY - if (H5_UNLIKELY(NULL == rw_lock)) + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) HGOTO_DONE(FAIL); - /* Acquire the mutex */ - if (H5_UNLIKELY(H5TS_mutex_lock(&rw_lock->mutex) < 0)) + if (H5_UNLIKELY(pthread_rwlock_destroy(lock))) HGOTO_DONE(FAIL); - have_mutex = true; - - /* Check for initial write lock request on this thread */ - if (H5TS_RW_LOCK_WRITE != rw_lock->lock_type || !H5TS_thread_equal(my_thread, rw_lock->write_thread)) { - /* Fail if attempting to acquire a write lock on a thread that holds - * a read lock - */ - if (H5TS_RW_LOCK_READ == rw_lock->lock_type) { - H5TS_rec_entry_count_t *count; - - /* Sanity check */ - assert(rw_lock->is_key_registered); - - /* Fail if read lock count for this thread is > 0 */ - if (H5_UNLIKELY(H5TS_key_get_value(rw_lock->rec_read_lock_count_key, (void **)&count) < 0)) - HGOTO_DONE(FAIL); - if (H5_UNLIKELY(NULL != count && count->rec_lock_count > 0)) - HGOTO_DONE(FAIL); - } - - /* If lock is already held, wait to acquire it */ - if (H5TS_RW_LOCK_UNUSED != rw_lock->lock_type) { -#if H5TS_ENABLE_REC_RW_LOCK_STATS - H5TS__update_stats_wr_lock_delay(rw_lock); -#endif - - do { - int result; - - rw_lock->waiting_writers_count++; - result = H5TS_cond_wait(&rw_lock->writers_cv, &rw_lock->mutex); - rw_lock->waiting_writers_count--; - if (H5_UNLIKELY(result != 0)) - HGOTO_DONE(FAIL); - } while (H5TS_RW_LOCK_UNUSED != rw_lock->lock_type); - } - - /* Set lock type & owner thread */ - rw_lock->lock_type = H5TS_RW_LOCK_WRITE; - rw_lock->write_thread = my_thread; - } - - /* Increment write lock count for this thread */ - rw_lock->rec_write_lock_count++; -#if H5TS_ENABLE_REC_RW_LOCK_STATS - H5TS__update_stats_wr_lock(rw_lock); -#endif done: - if (H5_LIKELY(have_mutex)) - if (H5_UNLIKELY(H5TS_mutex_unlock(&rw_lock->mutex) < 0)) - ret_value = FAIL; - FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) -} /* end H5TS__rw_wrlock() */ - -/*-------------------------------------------------------------------------- - * Function: H5TS__rw_unlock - * - * Purpose: Attempt to unlock either a read or a write lock on a - * recursive read / write lock. - * - * Return: Non-negative on success / Negative on failure - * - *-------------------------------------------------------------------------- - */ -herr_t -H5TS__rw_unlock(H5TS_rw_lock_t *rw_lock) -{ - bool have_mutex = false; - herr_t ret_value = SUCCEED; - - FUNC_ENTER_NOAPI_NAMECHECK_ONLY - - if (H5_UNLIKELY(NULL == rw_lock)) - HGOTO_DONE(FAIL); - - /* Acquire the mutex */ - if (H5_UNLIKELY(H5TS_mutex_lock(&rw_lock->mutex) < 0)) - HGOTO_DONE(FAIL); - have_mutex = true; - - /* Error check */ - if (H5_UNLIKELY(H5TS_RW_LOCK_UNUSED == rw_lock->lock_type)) /* Unlocking an unused lock? */ - HGOTO_DONE(FAIL); - - if (H5TS_RW_LOCK_WRITE == rw_lock->lock_type) { /* Drop a write lock */ - /* Sanity checks */ - assert(0 == rw_lock->reader_thread_count); - assert(rw_lock->rec_write_lock_count > 0); - - /* Decrement recursive lock count */ - rw_lock->rec_write_lock_count--; -#if H5TS_ENABLE_REC_RW_LOCK_STATS - H5TS__update_stats_wr_unlock(rw_lock); +} /* end H5TS_rwlock_destroy() */ #endif - - /* Check if lock is unused now */ - if (0 == rw_lock->rec_write_lock_count) - rw_lock->lock_type = H5TS_RW_LOCK_UNUSED; - } - else { /* Drop a read lock */ - H5TS_rec_entry_count_t *count; - - /* Sanity and error checks */ - assert(rw_lock->is_key_registered); - assert(rw_lock->reader_thread_count > 0); - assert(0 == rw_lock->rec_write_lock_count); - if (H5_UNLIKELY(H5TS_key_get_value(rw_lock->rec_read_lock_count_key, (void **)&count) < 0)) - HGOTO_DONE(FAIL); - if (H5_UNLIKELY(NULL == count)) - HGOTO_DONE(FAIL); - assert(count->rec_lock_count > 0); - - /* Decrement recursive lock count for this thread */ - count->rec_lock_count--; -#if H5TS_ENABLE_REC_RW_LOCK_STATS - H5TS__update_stats_rd_unlock(rw_lock, count); #endif - /* Check if this thread is releasing its last read lock */ - if (0 == count->rec_lock_count) { - /* Decrement the # of threads with a read lock */ - rw_lock->reader_thread_count--; - - /* Check if lock is unused now */ - if (0 == rw_lock->reader_thread_count) - rw_lock->lock_type = H5TS_RW_LOCK_UNUSED; - } - } - - /* Signal condition variable if lock is unused now */ - if (H5TS_RW_LOCK_UNUSED == rw_lock->lock_type) { - /* Prioritize pending writers if there are any */ - if (rw_lock->waiting_writers_count > 0) { - if (H5_UNLIKELY(H5TS_cond_signal(&rw_lock->writers_cv) < 0)) - HGOTO_DONE(FAIL); - } - else { - if (H5_UNLIKELY(H5TS_cond_broadcast(&rw_lock->readers_cv) < 0)) - HGOTO_DONE(FAIL); - } - } - -done: - if (H5_LIKELY(have_mutex)) - if (H5_UNLIKELY(H5TS_mutex_unlock(&rw_lock->mutex) < 0)) - ret_value = FAIL; - - FUNC_LEAVE_NOAPI_NAMECHECK_ONLY(ret_value) -} /* end H5TS__rw_unlock() */ - #endif /* H5_HAVE_THREADS */ diff --git a/src/H5TSrwlock.h b/src/H5TSrwlock.h new file mode 100644 index 00000000000..16c36d46e5e --- /dev/null +++ b/src/H5TSrwlock.h @@ -0,0 +1,405 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: This file contains support for non-recursive R/W locks, equivalent + * to the pthread 'pthread_rwlock_t' type and capabilities. + * + * Note: Because this threadsafety framework operates outside the library, + * it does not use the error stack (although it does use error macros + * that don't push errors on a stack) and only uses the "namecheck only" + * FUNC_ENTER_* / FUNC_LEAVE_* macros. + */ + +/****************/ +/* Module Setup */ +/****************/ + +/***********/ +/* Headers */ +/***********/ + +/****************/ +/* Local Macros */ +/****************/ + +/******************/ +/* Local Typedefs */ +/******************/ + +/********************/ +/* Local Prototypes */ +/********************/ + +/*********************/ +/* Package Variables */ +/*********************/ + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/*******************/ +/* Local Variables */ +/*******************/ + +#ifdef H5_HAVE_C11_THREADS +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_rdlock + * + * Purpose: Acquire a read lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_rdlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + /* Acquire the lock's mutex */ + if (H5_UNLIKELY(mtx_lock(&lock->mutex) != thrd_success)) + return FAIL; + + /* Check for writers */ + if (lock->writers || lock->write_waiters) { + /* Read waiting */ + lock->read_waiters++; + + /* Wait for writers */ + do { + if (H5_UNLIKELY(thrd_success != cnd_wait(&lock->read_cv, &lock->mutex))) { + mtx_unlock(&lock->mutex); + return FAIL; + } + } while (lock->writers || lock->write_waiters); + + /* Read not waiting any longer */ + lock->read_waiters--; + } + + /* Increment # of readers */ + lock->readers++; + + /* Release mutex */ + if (H5_UNLIKELY(mtx_unlock(&lock->mutex) != thrd_success)) + return FAIL; + + return SUCCEED; +} /* end H5TS_rwlock_rdlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_rdunlock + * + * Purpose: Release a read lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_rdunlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + /* Acquire the lock's mutex */ + if (H5_UNLIKELY(mtx_lock(&lock->mutex) != thrd_success)) + return FAIL; + + /* Decrement # of readers */ + lock->readers--; + + /* Check for waiting writers when last readers */ + if (lock->write_waiters && 0 == lock->readers) + if (H5_UNLIKELY(cnd_signal(&lock->write_cv) != thrd_success)) { + mtx_unlock(&lock->mutex); + return FAIL; + } + + /* Release mutex */ + if (H5_UNLIKELY(mtx_unlock(&lock->mutex) != thrd_success)) + return FAIL; + + return SUCCEED; +} /* end H5TS_rwlock_rdunlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_wrlock + * + * Purpose: Acquire a write lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_wrlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + /* Acquire the lock's mutex */ + if (H5_UNLIKELY(mtx_lock(&lock->mutex) != thrd_success)) + return FAIL; + + /* Check for readers or other writers */ + if (lock->readers || lock->writers) { + /* Write waiting */ + lock->write_waiters++; + + /* Wait for mutex */ + do { + if (H5_UNLIKELY(thrd_success != cnd_wait(&lock->write_cv, &lock->mutex))) { + mtx_unlock(&lock->mutex); + return FAIL; + } + } while (lock->readers || lock->writers); + + /* Write not waiting any longer */ + lock->write_waiters--; + } + + /* Increment # of writers */ + lock->writers++; + + /* Release mutex */ + if (H5_UNLIKELY(mtx_unlock(&lock->mutex) != thrd_success)) + return FAIL; + + return SUCCEED; +} /* end H5TS_rwlock_wrlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_wrunlock + * + * Purpose: Release a write lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_wrunlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + /* Acquire the lock's mutex */ + if (H5_UNLIKELY(mtx_lock(&lock->mutex) != thrd_success)) + return FAIL; + + /* Decrement # of writers */ + lock->writers--; + + /* Check for waiting writers */ + if (lock->write_waiters) { + if (H5_UNLIKELY(cnd_signal(&lock->write_cv) != thrd_success)) { + mtx_unlock(&lock->mutex); + return FAIL; + } + } + else if (lock->read_waiters) + if (H5_UNLIKELY(cnd_broadcast(&lock->read_cv) != thrd_success)) { + mtx_unlock(&lock->mutex); + return FAIL; + } + + /* Release mutex */ + if (H5_UNLIKELY(mtx_unlock(&lock->mutex) != thrd_success)) + return FAIL; + + return SUCCEED; +} /* end H5TS_rwlock_wrunlock() */ + +#else +#ifdef H5_HAVE_WIN_THREADS +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_rdlock + * + * Purpose: Acquire a read lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_rdlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + AcquireSRWLockShared(lock); + + return SUCCEED; +} /* end H5TS_rwlock_rdlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_rdunlock + * + * Purpose: Release a read lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_rdunlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + ReleaseSRWLockShared(lock); + + return SUCCEED; +} /* end H5TS_rwlock_rdunlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_wrlock + * + * Purpose: Acquire a write lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_wrlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + AcquireSRWLockExclusive(lock); + + return SUCCEED; +} /* end H5TS_rwlock_wrlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_wrunlock + * + * Purpose: Release a write lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_wrunlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + ReleaseSRWLockExclusive(lock); + + return SUCCEED; +} /* end H5TS_rwlock_wrunlock() */ + +#else +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_rdlock + * + * Purpose: Acquire a read lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_rdlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + if (H5_UNLIKELY(pthread_rwlock_rdlock(lock))) + return FAIL; + + return SUCCEED; +} /* end H5TS_rwlock_rdlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_rdunlock + * + * Purpose: Release a read lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_rdunlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + if (H5_UNLIKELY(pthread_rwlock_unlock(lock))) + return FAIL; + + return SUCCEED; +} /* end H5TS_rwlock_rdunlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_wrlock + * + * Purpose: Acquire a write lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_wrlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + if (H5_UNLIKELY(pthread_rwlock_wrlock(lock))) + return FAIL; + + return SUCCEED; +} /* end H5TS_rwlock_wrlock() */ + +/*------------------------------------------------------------------------- + * Function: H5TS_rwlock_rdunlock + * + * Purpose: Release a write lock + * + * Return: Non-negative on success / Negative on failure + * + *------------------------------------------------------------------------- + */ +static inline herr_t +H5TS_rwlock_wrunlock(H5TS_rwlock_t *lock) +{ + /* Check argument */ + if (H5_UNLIKELY(NULL == lock)) + return FAIL; + + if (H5_UNLIKELY(pthread_rwlock_unlock(lock))) + return FAIL; + + return SUCCEED; +} /* end H5TS_rwlock_wrunlock() */ +#endif +#endif diff --git a/src/H5TSsemaphore.h b/src/H5TSsemaphore.h index 0db1a888809..340b1cf9a11 100644 --- a/src/H5TSsemaphore.h +++ b/src/H5TSsemaphore.h @@ -179,7 +179,7 @@ H5TS_semaphore_wait(H5TS_semaphore_t *sem) * * Purpose: Increments (unlocks) the semaphore. If the semaphore's value * becomes greater than zero, then another thread blocked in a wait - * call will be woken up and proceed to lock the semaphore. + * call will proceed to lock the semaphore. * * Return: Non-negative on success / Negative on failure * diff --git a/src/H5TSwin.c b/src/H5TSwin.c index 4eca38e34db..913dbff0099 100644 --- a/src/H5TSwin.c +++ b/src/H5TSwin.c @@ -45,10 +45,12 @@ /********************/ /* Local Prototypes */ /********************/ +#ifdef H5_HAVE_THREADSAFE #if defined(H5_BUILT_AS_DYNAMIC_LIB) && defined(H5_HAVE_WIN32_API) static herr_t H5TS__win32_thread_enter(void); static herr_t H5TS__win32_thread_exit(void); #endif +#endif /*********************/ /* Package Variables */ diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c index d64c4e82439..92853c63058 100644 --- a/src/H5Tcommit.c +++ b/src/H5Tcommit.c @@ -349,7 +349,7 @@ H5Tcommit_anon(hid_t loc_id, hid_t type_id, hid_t tcpl_id, hid_t tapl_id) loc_params.obj_type = H5I_get_type(loc_id); /* Get the file object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier"); /* Commit the datatype */ diff --git a/src/H5Tdeprec.c b/src/H5Tdeprec.c index cc998346cb4..3483597346e 100644 --- a/src/H5Tdeprec.c +++ b/src/H5Tdeprec.c @@ -116,7 +116,7 @@ H5Tcommit1(hid_t loc_id, const char *name, hid_t type_id) loc_params.obj_type = H5I_get_type(loc_id); /* get the object from the loc_id */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid object identifier"); /* Commit the datatype */ @@ -167,7 +167,7 @@ H5Topen1(hid_t loc_id, const char *name) loc_params.obj_type = H5I_get_type(loc_id); /* Get the location object */ - if (NULL == (vol_obj = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid location identifier"); /* Open the datatype */ diff --git a/src/H5Tmodule.h b/src/H5Tmodule.h index fd2a278fd22..3e121469108 100644 --- a/src/H5Tmodule.h +++ b/src/H5Tmodule.h @@ -304,7 +304,7 @@ * *
    - * @see H5R + * @see @ref H5R *
    * - * Code for a compound datatype nested in a compound datatype + * TText for a compound datatype nested in a compound datatype * \code * typedef struct { * complex_t x; @@ -2164,6 +2164,7 @@ filled according to the value of this property. The padding can be: * \endcode * * The example below shows the content of the file written on a little-endian machine. + * * Create and write a little-endian dataset with a compound datatype in C * \code * HDF5 “SDScompound.h5” { @@ -2248,6 +2249,7 @@ filled according to the value of this property. The padding can be: * * The figure below shows the content of the file written on a little-endian machine. Only float and * double fields are written. The default fill value is used to initialize the unwritten integer field. + * * Writing floats and doubles to a dataset on a little-endian system * \code * HDF5 “SDScompound.h5” { @@ -2285,6 +2287,7 @@ filled according to the value of this property. The padding can be: * compound datatype. As this example illustrates, writing and reading compound datatypes in * Fortran is always done by fields. The content of the written file is the same as shown in the * example above. + * * Create and write a dataset with a compound datatype in Fortran * \code * ! One cannot write an array of a derived datatype in @@ -2921,6 +2924,7 @@ filled according to the value of this property. The padding can be: * declaration of a datatype of type #H5T_C_S1 which is set to #H5T_VARIABLE. The HDF5 * Library automatically translates between this and the vl_t structure. Note: the #H5T_VARIABLE * size can only be used with string datatypes. + * * Set the string datatype size to H5T_VARIABLE * \code * tid1 = H5Tcopy (H5T_C_S1); @@ -2929,6 +2933,7 @@ filled according to the value of this property. The padding can be: * * Variable-length strings can be read into C strings (in other words, pointers to zero terminated * arrays of char). See the example below. + * * Read variable-length strings into C strings * \code * char *rdata[SPACE1_DIM1]; @@ -3053,6 +3058,7 @@ filled according to the value of this property. The padding can be: * would be as an array of integers. The example below shows an example of how to create an * enumeration with five elements. The elements map symbolic names to 2-byte integers. See the * table below. + * * Create an enumeration with five elements * \code * hid_t hdf_en_colors; @@ -3582,6 +3588,7 @@ filled according to the value of this property. The padding can be: * * To create two or more datasets that share a common datatype, first commit the datatype, and then * use that datatype to create the datasets. See the example below. + * * Create a shareable datatype * \code * hid_t t1 = ...some transient type...; @@ -3697,6 +3704,7 @@ filled according to the value of this property. The padding can be: * memory. The destination datatype must be specified in the #H5Dread call. The example below * shows an example of reading a dataset of 32-bit integers. The figure below the example shows * the data transformation that is performed. + * * Specify the destination datatype with H5Dread * \code * // Stored as H5T_STD_BE32 @@ -3797,6 +3805,7 @@ filled according to the value of this property. The padding can be: * The currently supported text format used by #H5LTtext_to_dtype and #H5LTdtype_to_text is the * data description language (DDL) and conforms to the \ref DDLBNF114. The portion of the * \ref DDLBNF114 that defines HDF5 datatypes appears below. + * * The definition of HDF5 datatypes from the HDF5 DDL * \code * ::= | | | @@ -4006,8 +4015,8 @@ filled according to the value of this property. The padding can be: * component, they have a C-like type name. * \li If the type begins with \c U then it is the unsigned version of * the integer type; other integer types are signed. - * \li The datatype \c LLONG corresponds C's \Code{long long} and - * \c LDOUBLE is \Code{long double}. These types might be the same + * \li The datatype \c LLONG corresponds C's \TText{long long} and + * \c LDOUBLE is \TText{long double}. These types might be the same * as \c LONG and \c DOUBLE, respectively. *
    * \snippet{doc} tables/predefinedDatatypes.dox predefined_native_datatypes_table diff --git a/src/H5Tpublic.h b/src/H5Tpublic.h index 83761af2f58..57a5b6047c3 100644 --- a/src/H5Tpublic.h +++ b/src/H5Tpublic.h @@ -52,7 +52,7 @@ typedef enum H5T_class_t { typedef enum H5T_order_t { H5T_ORDER_ERROR = -1, /**< error */ H5T_ORDER_LE = 0, /**< little endian */ - H5T_ORDER_BE = 1, /**< bit endian */ + H5T_ORDER_BE = 1, /**< big endian */ H5T_ORDER_VAX = 2, /**< VAX mixed endian */ H5T_ORDER_MIXED = 3, /**< Compound type with mixed member orders */ H5T_ORDER_NONE = 4 /**< no particular order (strings, bits,..) */ @@ -755,72 +755,72 @@ H5_DLLVAR hid_t H5T_VAX_F64_g; #define H5T_NATIVE_CHAR (CHAR_MIN ? H5T_NATIVE_SCHAR : H5T_NATIVE_UCHAR) /** * \ingroup PDTNAT - * C-style \Code{signed char} + * C-style \TText{signed char} */ #define H5T_NATIVE_SCHAR (H5OPEN H5T_NATIVE_SCHAR_g) /** * \ingroup PDTNAT - * C-style \Code{unsigned char} + * C-style \TText{unsigned char} */ #define H5T_NATIVE_UCHAR (H5OPEN H5T_NATIVE_UCHAR_g) /** * \ingroup PDTNAT - * C-style \Code{short} + * C-style \TText{short} */ #define H5T_NATIVE_SHORT (H5OPEN H5T_NATIVE_SHORT_g) /** * \ingroup PDTNAT - * C-style \Code{unsigned short} + * C-style \TText{unsigned short} */ #define H5T_NATIVE_USHORT (H5OPEN H5T_NATIVE_USHORT_g) /** * \ingroup PDTNAT - * C-style \Code{int} + * C-style \TText{int} */ #define H5T_NATIVE_INT (H5OPEN H5T_NATIVE_INT_g) /** * \ingroup PDTNAT - * C-style \Code{unsigned int} + * C-style \TText{unsigned int} */ #define H5T_NATIVE_UINT (H5OPEN H5T_NATIVE_UINT_g) /** * \ingroup PDTNAT - * C-style \Code{long} + * C-style \TText{long} */ #define H5T_NATIVE_LONG (H5OPEN H5T_NATIVE_LONG_g) /** * \ingroup PDTNAT - * C-style \Code{unsigned long} + * C-style \TText{unsigned long} */ #define H5T_NATIVE_ULONG (H5OPEN H5T_NATIVE_ULONG_g) /** * \ingroup PDTNAT - * C-style \Code{long long} + * C-style \TText{long long} */ #define H5T_NATIVE_LLONG (H5OPEN H5T_NATIVE_LLONG_g) /** * \ingroup PDTNAT - * C-style \Code{unsigned long long} + * C-style \TText{unsigned long long} */ #define H5T_NATIVE_ULLONG (H5OPEN H5T_NATIVE_ULLONG_g) /** * \ingroup PDTNAT - * C-style \Code{_Float16} + * C-style \TText{_Float16} */ #define H5T_NATIVE_FLOAT16 (H5OPEN H5T_NATIVE_FLOAT16_g) /** * \ingroup PDTNAT - * C-style \Code{float} + * C-style \TText{float} */ #define H5T_NATIVE_FLOAT (H5OPEN H5T_NATIVE_FLOAT_g) /** * \ingroup PDTNAT - * C-style \Code{double} + * C-style \TText{double} */ #define H5T_NATIVE_DOUBLE (H5OPEN H5T_NATIVE_DOUBLE_g) /** * \ingroup PDTNAT - * C-style \Code{long double} + * C-style \TText{long double} */ #define H5T_NATIVE_LDOUBLE (H5OPEN H5T_NATIVE_LDOUBLE_g) /** diff --git a/src/H5VLcallback.c b/src/H5VLcallback.c index 1662776ef9c..0e696088ebf 100644 --- a/src/H5VLcallback.c +++ b/src/H5VLcallback.c @@ -1744,18 +1744,28 @@ H5VL__attr_close(void *obj, const H5VL_class_t *cls, hid_t dxpl_id, void **req) herr_t H5VL_attr_close(const H5VL_object_t *vol_obj, hid_t dxpl_id, void **req) { - herr_t ret_value = SUCCEED; /* Return value */ + bool vol_wrapper_set = false; /* Whether the VOL object wrapping context was set up */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) /* Sanity check */ assert(vol_obj); + /* Set wrapper info in API context */ + if (H5VL_set_vol_wrapper(vol_obj) < 0) + HGOTO_ERROR(H5E_VOL, H5E_CANTSET, FAIL, "can't set VOL wrapper info"); + vol_wrapper_set = true; + /* Call the corresponding internal VOL routine */ if (H5VL__attr_close(vol_obj->data, vol_obj->connector->cls, dxpl_id, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTCLOSEOBJ, FAIL, "attribute close failed"); done: + /* Reset object wrapping info in API context */ + if (vol_wrapper_set && H5VL_reset_vol_wrapper() < 0) + HDONE_ERROR(H5E_VOL, H5E_CANTRESET, FAIL, "can't reset VOL wrapper info"); + FUNC_LEAVE_NOAPI(ret_value) } /* end H5VL_attr_close() */ @@ -5090,6 +5100,10 @@ H5VL_link_move(const H5VL_object_t *src_vol_obj, const H5VL_loc_params_t *loc_pa FUNC_ENTER_NOAPI(FAIL) + /* Sanity check */ + assert(src_vol_obj); + assert(src_vol_obj->data); + /* Set wrapper info in API context */ vol_obj = (src_vol_obj->data ? src_vol_obj : dst_vol_obj); if (H5VL_set_vol_wrapper(vol_obj) < 0) diff --git a/src/H5VLint.c b/src/H5VLint.c index a40d64e3ac4..19a11e9b6b8 100644 --- a/src/H5VLint.c +++ b/src/H5VLint.c @@ -1761,6 +1761,41 @@ H5VL_vol_object(hid_t id) FUNC_LEAVE_NOAPI(ret_value) } /* end H5VL_vol_object() */ +/*------------------------------------------------------------------------- + * Function: H5VL_vol_object_verify + * + * Purpose: Utility function to return the object pointer associated with + * an ID of the specified type. This routine is the same as + * H5VL_vol_object except it takes the additional argument + * obj_type to verify the ID's type against. + * + * Return: Success: object pointer + * Failure: NULL + * + *------------------------------------------------------------------------- + */ +H5VL_object_t * +H5VL_vol_object_verify(hid_t id, H5I_type_t obj_type) +{ + void *obj = NULL; + H5VL_object_t *ret_value = NULL; + + FUNC_ENTER_NOAPI(NULL) + + if (NULL == (obj = H5I_object_verify(id, obj_type))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "identifier is not of specified type"); + + /* If this is a datatype, get the VOL object attached to the H5T_t struct */ + if (H5I_DATATYPE == obj_type) + if (NULL == (obj = H5T_get_named_type((H5T_t *)obj))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a named datatype"); + + ret_value = (H5VL_object_t *)obj; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} + /*------------------------------------------------------------------------- * Function: H5VL_object_data * diff --git a/src/H5VLmodule.h b/src/H5VLmodule.h index 4e998a1783c..535027325fe 100644 --- a/src/H5VLmodule.h +++ b/src/H5VLmodule.h @@ -83,7 +83,7 @@ * to be much more common than internal implementations. * * A list of VOL connectors can be found here: - * + * * Registered VOL Connectors * * This list is incomplete and only includes the VOL connectors that have been registered with diff --git a/src/H5VLnative.c b/src/H5VLnative.c index ceee7f16c43..6f6b2d0768d 100644 --- a/src/H5VLnative.c +++ b/src/H5VLnative.c @@ -393,7 +393,7 @@ H5VLnative_addr_to_token(hid_t loc_id, haddr_t addr, H5O_token_t *token) bool is_native_vol_obj; /* Get the location object */ - if (NULL == (vol_obj_container = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj_container = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Make sure that the VOL object is a native connector object */ @@ -486,7 +486,7 @@ H5VLnative_token_to_addr(hid_t loc_id, H5O_token_t token, haddr_t *addr) bool is_native_vol_obj; /* Get the location object */ - if (NULL == (vol_obj_container = (H5VL_object_t *)H5I_object(loc_id))) + if (NULL == (vol_obj_container = H5VL_vol_object(loc_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid location identifier"); /* Make sure that the VOL object is a native connector object */ diff --git a/src/H5VLpassthru.c b/src/H5VLpassthru.c index 09ac6617e09..df13afc2513 100644 --- a/src/H5VLpassthru.c +++ b/src/H5VLpassthru.c @@ -2686,7 +2686,7 @@ H5VL_pass_through_request_wait(void *obj, uint64_t timeout, H5VL_request_status_ ret_value = H5VLrequest_wait(o->under_object, o->under_vol_id, timeout, status); - if (ret_value >= 0 && *status != H5ES_STATUS_IN_PROGRESS) + if (ret_value >= 0 && *status != H5VL_REQUEST_STATUS_IN_PROGRESS) H5VL_pass_through_free_obj(o); return ret_value; diff --git a/src/H5VLprivate.h b/src/H5VLprivate.h index 79ed93c03ba..2904a5b1c15 100644 --- a/src/H5VLprivate.h +++ b/src/H5VLprivate.h @@ -96,6 +96,7 @@ H5_DLL void *H5VL_object_data(const H5VL_object_t *vol_obj); H5_DLL void *H5VL_object_unwrap(const H5VL_object_t *vol_obj); H5_DLL void *H5VL_object_verify(hid_t id, H5I_type_t obj_type); H5_DLL H5VL_object_t *H5VL_vol_object(hid_t id); +H5_DLL H5VL_object_t *H5VL_vol_object_verify(hid_t id, H5I_type_t obj_type); H5_DLL H5VL_object_t *H5VL_create_object(void *object, H5VL_t *vol_connector); H5_DLL H5VL_object_t *H5VL_create_object_using_vol_id(H5I_type_t type, void *obj, hid_t connector_id); H5_DLL hsize_t H5VL_object_inc_rc(H5VL_object_t *obj); diff --git a/src/H5module.h b/src/H5module.h index a7aa05a0644..a0f1af77363 100644 --- a/src/H5module.h +++ b/src/H5module.h @@ -28,6 +28,7 @@ /** \page H5DM_UG HDF5 Data Model and File Structure * * \section sec_data_model The HDF5 Data Model and File Structure + * * \subsection subsec_data_model_intro Introduction * The Hierarchical Data Format (HDF) implements a model for managing and storing data. The * model includes an abstract data model and an abstract storage model (the data format), and @@ -100,8 +101,11 @@ * model, and stored in a storage medium. The stored objects include header blocks, free lists, data * blocks, B-trees, and other objects. Each group or dataset is stored as one or more header and data * blocks. - * @see HDF5 File Format Specification - * for more information on how these objects are organized. The HDF5 library can also use other + * + * For more information on how these objects are organized; + * see HDF5 File Format Specification + * + * The HDF5 library can also use other * libraries and modules such as compression. * * @@ -778,7 +782,7 @@ * item must be closed separately. * * For more information, - * @see Using Identifiers + * @see Using Identifiers * in the HDF5 Application Developer's Guide under General Topics in HDF5. * *

    How Closing a File Effects Other Open Structural Elements

    diff --git a/src/H5private.h b/src/H5private.h index ab005067668..9950d0ccadc 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -1129,34 +1129,34 @@ H5_DLL herr_t H5_trace_args(struct H5RS_str_t *rs, const char *type, va_list ap) * Handles H5XY_. */ #define H5_IS_API(S) \ - ('_' != ((const char *)S)[2] /* underscore at position 2 */ \ - && '_' != ((const char *)S)[3] /* underscore at position 3 */ \ - && !( /* NOT */ \ - ((const char *)S)[4] /* pos 4 exists */ \ - && (isupper(S[3]) || isdigit(S[3])) /* pos 3 dig | uc */ \ - && '_' == ((const char *)S)[4] /* pos 4 underscore */ \ + ('_' != ((const char *)S)[2] /* underscore at position 2 */ \ + && '_' != ((const char *)S)[3] /* underscore at position 3 */ \ + && !( /* NOT */ \ + ((const char *)S)[4] /* pos 4 exists */ \ + && (isupper((int)S[3]) || isdigit((int)S[3])) /* pos 3 dig | uc */ \ + && '_' == ((const char *)S)[4] /* pos 4 underscore */ \ )) /* `S' is the name of a function which is being tested to check if it's */ /* a public API function */ #define H5_IS_PUB(S) \ - (((isdigit(S[1]) || isupper(S[1])) && islower(S[2])) || \ - ((isdigit(S[2]) || isupper(S[2])) && islower(S[3])) || \ - (!S[4] || ((isdigit(S[3]) || isupper(S[3])) && islower(S[4])))) + (((isdigit((int)S[1]) || isupper((int)S[1])) && islower((int)S[2])) || \ + ((isdigit((int)S[2]) || isupper((int)S[2])) && islower((int)S[3])) || \ + (!S[4] || ((isdigit((int)S[3]) || isupper((int)S[3])) && islower((int)S[4])))) /* `S' is the name of a function which is being tested to check if it's */ /* a private library function */ #define H5_IS_PRIV(S) \ - (((isdigit(S[1]) || isupper(S[1])) && '_' == S[2] && islower(S[3])) || \ - ((isdigit(S[2]) || isupper(S[2])) && '_' == S[3] && islower(S[4])) || \ - ((isdigit(S[3]) || isupper(S[3])) && '_' == S[4] && islower(S[5]))) + (((isdigit((int)S[1]) || isupper((int)S[1])) && '_' == S[2] && islower((int)S[3])) || \ + ((isdigit((int)S[2]) || isupper((int)S[2])) && '_' == S[3] && islower((int)S[4])) || \ + ((isdigit((int)S[3]) || isupper((int)S[3])) && '_' == S[4] && islower((int)S[5]))) /* `S' is the name of a function which is being tested to check if it's */ /* a package private function */ #define H5_IS_PKG(S) \ - (((isdigit(S[1]) || isupper(S[1])) && '_' == S[2] && '_' == S[3] && islower(S[4])) || \ - ((isdigit(S[2]) || isupper(S[2])) && '_' == S[3] && '_' == S[4] && islower(S[5])) || \ - ((isdigit(S[3]) || isupper(S[3])) && '_' == S[4] && '_' == S[5] && islower(S[6]))) + (((isdigit((int)S[1]) || isupper((int)S[1])) && '_' == S[2] && '_' == S[3] && islower((int)S[4])) || \ + ((isdigit((int)S[2]) || isupper((int)S[2])) && '_' == S[3] && '_' == S[4] && islower((int)S[5])) || \ + ((isdigit((int)S[3]) || isupper((int)S[3])) && '_' == S[4] && '_' == S[5] && islower((int)S[6]))) /* global library version information string */ extern char H5_lib_vers_info_g[]; diff --git a/src/H5public.h b/src/H5public.h index 7551e088ffe..9dd18cd5ff7 100644 --- a/src/H5public.h +++ b/src/H5public.h @@ -443,7 +443,7 @@ extern "C" { * \details H5open() initializes the HDF5 library. * * \details When the HDF5 library is used in a C application, the library is - * automatically initialized when the first HDf5 function call is + * automatically initialized when the first HDF5 function call is * issued. If one finds that an HDF5 library function is failing * inexplicably, H5open() can be called first. It is safe to call * H5open() before an application issues any other function calls to @@ -651,7 +651,7 @@ H5_DLL herr_t H5get_libversion(unsigned *majnum, unsigned *minnum, unsigned *rel * example: * * An official HDF5 release is labelled as follows: - * HDF5 Release \Code{\.\.\}\n + * HDF5 Release \TText{\.\.\}\n * For example, in HDF5 Release 1.8.5: * \li 1 is the major version number, \p majnum. * \li 8 is the minor version number, \p minnum. @@ -835,15 +835,15 @@ H5_DLL void *H5allocate_memory(size_t size, hbool_t clear); * This function is intended to have the semantics of realloc(): * *
    - * + * * - * + * * - * + * * - * + * * *
    \Code{H5resize_memory(buffer, size)}
    \TText{H5resize_memory(buffer, size)}Resizes buffer. Returns pointer to resized buffer.
    \Code{H5resize_memory(NULL, size)}
    \TText{H5resize_memory(NULL, size)}Allocates memory using HDF5 Library allocator. * Returns pointer to new buffer
    \Code{H5resize_memory(buffer, 0)}
    \TText{H5resize_memory(buffer, 0)}Frees memory using HDF5 Library allocator. * Returns NULL.
    \Code{H5resize_memory(NULL, 0)}
    \TText{H5resize_memory(NULL, 0)}Returns NULL (undefined in C standard).
    * diff --git a/src/Makefile.am b/src/Makefile.am index c887a9aa73a..87b12d08b06 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -99,7 +99,7 @@ libhdf5_la_SOURCES= H5.c H5build_settings.c H5checksum.c H5dbg.c H5system.c \ H5Tvlen.c \ H5TS.c H5TSatomic.c H5TSbarrier.c H5TSc11.c H5TScond.c \ H5TSint.c H5TSkey.c H5TSmutex.c H5TSonce.c H5TSpool.c H5TSpthread.c \ - H5TSrwlock.c H5TSsemaphore.c H5TSthread.c H5TSwin.c \ + H5TSrec_rwlock.c H5TSrwlock.c H5TSsemaphore.c H5TSthread.c H5TSwin.c \ H5VL.c H5VLcallback.c H5VLdyn_ops.c H5VLint.c H5VLnative.c \ H5VLnative_attr.c H5VLnative_blob.c H5VLnative_dataset.c \ H5VLnative_datatype.c H5VLnative_file.c H5VLnative_group.c \ diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 5a7e483210a..02a9891d512 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -350,7 +350,8 @@ set (ttsafe_SOURCES ${HDF5_TEST_SOURCE_DIR}/ttsafe_dcreate.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_develop.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_error.c - ${HDF5_TEST_SOURCE_DIR}/ttsafe_rec_rw_lock.c + ${HDF5_TEST_SOURCE_DIR}/ttsafe_rwlock.c + ${HDF5_TEST_SOURCE_DIR}/ttsafe_rec_rwlock.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_semaphore.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_thread_id.c ${HDF5_TEST_SOURCE_DIR}/ttsafe_thread_pool.c diff --git a/test/Makefile.am b/test/Makefile.am index 3625ac3dcce..061bfcf0df5 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -162,7 +162,8 @@ LDADD=libh5test.la $(LIBHDF5) # List the source files for tests that have more than one ttsafe_SOURCES=ttsafe.c ttsafe_acreate.c ttsafe_atomic.c ttsafe_attr_vlen.c \ ttsafe_cancel.c ttsafe_dcreate.c ttsafe_develop.c ttsafe_error.c \ - ttsafe_rec_rw_lock.c ttsafe_semaphore.c ttsafe_thread_id.c ttsafe_thread_pool.c + ttsafe_rwlock.c ttsafe_rec_rwlock.c ttsafe_semaphore.c \ + ttsafe_thread_id.c ttsafe_thread_pool.c cache_image_SOURCES=cache_image.c genall5.c mirror_vfd_SOURCES=mirror_vfd.c genall5.c diff --git a/test/error_test.c b/test/error_test.c index fe27300f431..204055097b8 100644 --- a/test/error_test.c +++ b/test/error_test.c @@ -175,12 +175,13 @@ test_error(hid_t file) static herr_t init_error(void) { - ssize_t cls_size = (ssize_t)strlen(ERR_CLS_NAME) + 1; + ssize_t cls_size = (ssize_t)strlen(ERR_CLS_NAME); ssize_t msg_size = (ssize_t)strlen(ERR_MIN_SUBROUTINE_MSG) + 1; char *cls_name = NULL; char *msg = NULL; H5E_type_t msg_type; + /* Account for null terminator */ if (NULL == (cls_name = (char *)malloc(strlen(ERR_CLS_NAME) + 1))) TEST_ERROR; if (NULL == (msg = (char *)malloc(strlen(ERR_MIN_SUBROUTINE_MSG) + 1))) @@ -189,7 +190,8 @@ init_error(void) if ((ERR_CLS = H5Eregister_class(ERR_CLS_NAME, PROG_NAME, PROG_VERS)) < 0) TEST_ERROR; - if (cls_size != H5Eget_class_name(ERR_CLS, cls_name, (size_t)cls_size) + 1) + /* Account for null terminator */ + if (cls_size != H5Eget_class_name(ERR_CLS, cls_name, (size_t)cls_size + 1)) TEST_ERROR; if (strcmp(ERR_CLS_NAME, cls_name) != 0) TEST_ERROR; diff --git a/test/h5test.c b/test/h5test.c index 01216ca1059..61c01da529c 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -2563,3 +2563,24 @@ h5_driver_uses_multiple_files(const char *drv_name, unsigned flags) return ret_val; } + +/* Deterministic random number functions that don't modify the underlying + * C/POSIX library rand/random state, as this can cause spurious test failures. + * + * Adapted from the example code in the POSIX.1-2001 standard. + */ + +static unsigned int next_g = 1; + +int +h5_local_rand(void) +{ + next_g = next_g * 1103515245 + 12345; + return next_g & RAND_MAX; +} + +void +h5_local_srand(unsigned int seed) +{ + next_g = seed; +} diff --git a/test/h5test.h b/test/h5test.h index 238bd38acd6..1ec537c62e3 100644 --- a/test/h5test.h +++ b/test/h5test.h @@ -319,6 +319,14 @@ H5TEST_DLL herr_t h5_using_parallel_driver(hid_t fapl_id, bool *driver_i H5TEST_DLL herr_t h5_driver_is_default_vfd_compatible(hid_t fapl_id, bool *default_vfd_compatible); H5TEST_DLL bool h5_driver_uses_multiple_files(const char *drv_name, unsigned flags); +/* Random number functions that don't modify the underlying rand/random state. + * These use rand_r with a state pointer under the hood. The state is always + * initialized to the same value so that each process in the parallel tests + * always gets the same sequence. + */ +H5TEST_DLL int h5_local_rand(void); +H5TEST_DLL void h5_local_srand(unsigned int seed); + /* Functions that will replace components of a FAPL */ H5TEST_DLL herr_t h5_get_vfd_fapl(hid_t fapl_id); H5TEST_DLL herr_t h5_get_libver_fapl(hid_t fapl_id); diff --git a/test/links.c b/test/links.c index 222b3b66039..ad9948af04d 100644 --- a/test/links.c +++ b/test/links.c @@ -1938,17 +1938,20 @@ test_move_preserves(hid_t fapl_id, bool new_format) *------------------------------------------------------------------------- */ #ifndef H5_NO_DEPRECATED_SYMBOLS +#define NUM_OBJS 3 /* number of groups in FILENAME[0] file */ static int test_deprec(hid_t fapl, bool new_format) { hid_t file_id = H5I_INVALID_HID; hid_t group1_id = H5I_INVALID_HID; hid_t group2_id = H5I_INVALID_HID; + hid_t group3_id = H5I_INVALID_HID; H5G_stat_t sb_hard1, sb_hard2, sb_soft1, sb_soft2; H5G_obj_t obj_type; /* Object type */ hsize_t num_objs; /* Number of objects in a group */ char filename[1024]; char tmpstr[1024]; + int len = 0; /* Length of comment */ if (new_format) TESTING("backwards compatibility (w/new group format)"); @@ -1966,14 +1969,25 @@ test_deprec(hid_t fapl, bool new_format) FAIL_STACK_ERROR; if ((group2_id = H5Gcreate2(file_id, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR; + if ((group3_id = H5Gcreate2(file_id, "group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; /* Test H5Gset and get comment */ + if (H5Gset_comment(file_id, "group1", "comment") < 0) FAIL_STACK_ERROR; - if (H5Gget_comment(file_id, "group1", sizeof(tmpstr), tmpstr) < 0) + if ((len = H5Gget_comment(file_id, "group1", 0, NULL)) < 0) + FAIL_STACK_ERROR; + + /* Returned length should be the same as strlen of the comment */ + if ((size_t)len != strlen("comment")) + FAIL_STACK_ERROR; + + /* Get and verify the comment */ + if (H5Gget_comment(file_id, "group1", (size_t)len + 1, tmpstr) < 0) FAIL_STACK_ERROR; if (strcmp(tmpstr, "comment") != 0) - TEST_ERROR; + FAIL_STACK_ERROR; /* Create links using H5Glink and H5Glink2 */ if (H5Glink(file_id, H5G_LINK_HARD, "group2", "group1/link_to_group2") < 0) @@ -2012,7 +2026,7 @@ test_deprec(hid_t fapl, bool new_format) /* Test getting the number of objects in a group */ if (H5Gget_num_objs(file_id, &num_objs) < 0) FAIL_STACK_ERROR; - if (num_objs != 2) + if (num_objs != NUM_OBJS) TEST_ERROR; if (H5Gget_num_objs(group1_id, &num_objs) < 0) FAIL_STACK_ERROR; @@ -2103,9 +2117,43 @@ test_deprec(hid_t fapl, bool new_format) /* Test H5Gmove and H5Gmove2 */ if (H5Gmove(file_id, "group1", "moved_group1") < 0) - FAIL_STACK_ERROR; + TEST_ERROR; if (H5Gmove2(file_id, "group2", group1_id, "moved_group2") < 0) - FAIL_STACK_ERROR; + TEST_ERROR; + if (H5Gmove2(file_id, "group3", H5L_SAME_LOC, "moved_group3") < 0) + TEST_ERROR; + if (H5Gmove2(file_id, "moved_group3", group2_id, "moved_group3_to_group2") < 0) + TEST_ERROR; + + /* Test H5Gmove2 with H5L_SAME_LOC */ + if (H5Gmove2(group2_id, "moved_group3_to_group2", H5L_SAME_LOC, "group3_same_loc") < 0) + TEST_ERROR; + + /* Test H5Gmove2 with H5L_SAME_LOC */ + if (H5Gmove2(H5L_SAME_LOC, "moved_group1/moved_group2", file_id, "moved_group2_again") < 0) + TEST_ERROR; + + /* Put back moved_group2 for subsequent tests */ + if (H5Gmove2(file_id, "moved_group2_again", file_id, "moved_group1/moved_group2") < 0) + TEST_ERROR; + + /* Test passing in invalid ID */ + H5E_BEGIN_TRY + { + hid_t bad_id = H5I_BADID; + if (H5Gmove2(bad_id, "group2", group1_id, "moved_group2") >= 0) + TEST_ERROR; + } + H5E_END_TRY + + /* Test passing in invalid ID */ + H5E_BEGIN_TRY + { + hid_t bad_id = H5I_BADID; + if (H5Gmove2(file_id, "group2", bad_id, "moved_group2") >= 0) + TEST_ERROR; + } + H5E_END_TRY /* Ensure that both groups can be opened */ if (H5Gclose(group2_id) < 0) @@ -2119,6 +2167,8 @@ test_deprec(hid_t fapl, bool new_format) FAIL_STACK_ERROR; /* Close open IDs */ + if (H5Gclose(group3_id) < 0) + FAIL_STACK_ERROR; if (H5Gclose(group2_id) < 0) FAIL_STACK_ERROR; if (H5Gclose(group1_id) < 0) @@ -2144,6 +2194,7 @@ test_deprec(hid_t fapl, bool new_format) error: H5E_BEGIN_TRY { + H5Gclose(group3_id); H5Gclose(group2_id); H5Gclose(group1_id); H5Fclose(file_id); @@ -3283,7 +3334,8 @@ external_link_closing_deprec(hid_t fapl, bool new_format) /* Test copy (as of this test, it uses the same code as move) */ if (H5Lcopy(fid1, "elink/elink/elink", fid1, "elink/elink/elink_copied", H5P_DEFAULT, H5P_DEFAULT) < 0) FAIL_STACK_ERROR; - if (H5Lcopy(fid1, "elink/elink/elink", fid1, "elink/elink/elink/elink_copied2", H5P_DEFAULT, + /* Also exercise H5L_SAME_LOC */ + if (H5Lcopy(H5L_SAME_LOC, "elink/elink/elink", fid1, "elink/elink/elink/elink_copied2", H5P_DEFAULT, H5P_DEFAULT) < 0) FAIL_STACK_ERROR; @@ -4315,7 +4367,8 @@ lapl_nlinks_deprec(hid_t fapl, bool new_format) */ if (H5Lcopy(fid, "soft17", fid, "soft17/newer_soft", H5P_DEFAULT, plist) < 0) TEST_ERROR; - if (H5Lmove(fid, "soft17/newer_soft", fid, "soft17/newest_soft", H5P_DEFAULT, plist) < 0) + /* Also exercise H5L_SAME_LOC */ + if (H5Lmove(fid, "soft17/newer_soft", H5L_SAME_LOC, "soft17/newest_soft", H5P_DEFAULT, plist) < 0) TEST_ERROR; /* H5Olink */ diff --git a/test/mirror_vfd.c b/test/mirror_vfd.c index 124fc6a6b0c..fe6695b9a06 100644 --- a/test/mirror_vfd.c +++ b/test/mirror_vfd.c @@ -1236,6 +1236,7 @@ create_mirroring_split_fapl(const char *basename, struct mirrortest_filenames *n mirror_conf.handshake_port = opts->portno; if (strncpy(mirror_conf.remote_ip, opts->ip, H5FD_MIRROR_MAX_IP_LEN) == NULL) TEST_ERROR; + mirror_conf.remote_ip[H5FD_MIRROR_MAX_IP_LEN] = '\0'; if ((splitter_config->wo_fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR; if (H5Pset_fapl_mirror(splitter_config->wo_fapl_id, &mirror_conf) < 0) @@ -1248,8 +1249,10 @@ create_mirroring_split_fapl(const char *basename, struct mirrortest_filenames *n /* Set file paths for w/o and logfile */ if (strncpy(splitter_config->wo_path, (const char *)names->wo, H5FD_SPLITTER_PATH_MAX) == NULL) TEST_ERROR; + splitter_config->wo_path[H5FD_SPLITTER_PATH_MAX] = '\0'; if (strncpy(splitter_config->log_file_path, (const char *)names->log, H5FD_SPLITTER_PATH_MAX) == NULL) TEST_ERROR; + splitter_config->log_file_path[H5FD_SPLITTER_PATH_MAX] = '\0'; /* Create Splitter FAPL */ if ((ret_value = H5Pcreate(H5P_FILE_ACCESS)) < 0) diff --git a/test/mount.c b/test/mount.c index 92a0c13d84c..2d833fbf102 100644 --- a/test/mount.c +++ b/test/mount.c @@ -164,6 +164,7 @@ test_illegal(hid_t fapl) { hid_t file1 = H5I_INVALID_HID, file1b = H5I_INVALID_HID, file2 = H5I_INVALID_HID, file3 = H5I_INVALID_HID, file3b = H5I_INVALID_HID, mnt = H5I_INVALID_HID; + hid_t dtype = H5I_INVALID_HID; /* To test invalid ID */ char filename1[1024], filename2[1024], filename3[1024]; herr_t status; @@ -259,6 +260,30 @@ test_illegal(hid_t fapl) if (H5Funmount(file1, "/mnt1") < 0) FAIL_STACK_ERROR; + /* Try passing in IDs that are not a file or group ID */ + if ((dtype = H5Tcopy(H5T_C_S1)) < 0) + FAIL_STACK_ERROR; + H5E_BEGIN_TRY + { + status = H5Fmount(dtype, "/mnt1", file1b, H5P_DEFAULT); + } + H5E_END_TRY + if (status >= 0) { + H5_FAILED(); + puts(" Passing in an ID other than file or group ID should have failed."); + TEST_ERROR; + } /* end if */ + H5E_BEGIN_TRY + { + status = H5Funmount(dtype, "/mnt1"); + } + H5E_END_TRY + if (status >= 0) { + H5_FAILED(); + puts(" Passing in an ID other than file or group ID should have failed."); + TEST_ERROR; + } /* end if */ + /* Close everything and return */ if (H5Fclose(file1) < 0) FAIL_STACK_ERROR; diff --git a/test/tfile.c b/test/tfile.c index 62881264441..02f996f66a3 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -2420,10 +2420,11 @@ test_file_getname(void) file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file_id, FAIL, "H5Fcreate"); - /* Get and verify file name */ + /* Get and verify file name and its length */ name_len = H5Fget_name(file_id, name, (size_t)TESTA_NAME_BUF_SIZE); CHECK(name_len, FAIL, "H5Fget_name"); VERIFY_STR(name, FILE1, "H5Fget_name"); + VERIFY(name_len, strlen(FILE1), "H5Fget_name"); /* Create a group in the root group */ group_id = H5Gcreate2(file_id, TESTA_GROUPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -8100,6 +8101,42 @@ test_min_dset_ohdr(void) CHECK(ret, FAIL, "H5Fclose"); } /* end test_min_dset_ohdr() */ +/**************************************************************** +** +** test_unseekable_file(): +** Test that attempting to create/open an unseekable file fails gracefully +** without a segfault (see hdf5#1498) +****************************************************************/ +static void +test_unseekable_file(void) +{ + /* Output message about test being performed */ + MESSAGE(5, ("Testing creating/opening an unseekable file\n")); + + /* Flush message in case this test segfaults */ + fflush(stdout); + + /* Creation */ +#ifdef H5_HAVE_WIN32_API + H5Fcreate("NUL", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); +#else + H5Fcreate("/dev/null", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); +#endif + + /* Should fail without segfault */ + /* TODO - Does not properly fail on all systems */ + /* VERIFY(file_id, H5I_INVALID_HID, "H5Fcreate"); */ + + /* Opening */ +#ifdef H5_HAVE_WIN32_API + H5Fopen("NUL", H5F_ACC_RDWR, H5P_DEFAULT); +#else + H5Fopen("/dev/null", H5F_ACC_RDWR, H5P_DEFAULT); +#endif + + /* TODO - Does not properly fail on all systems */ + /* VERIFY(file_id, H5I_INVALID_HID, "H5Fopen"); */ +} /**************************************************************** ** ** test_deprec(): @@ -8154,6 +8191,14 @@ test_deprec(const char *driver_name) fcpl = H5Fget_create_plist(file); CHECK(fcpl, FAIL, "H5Fget_create_plist"); + /* Test passing in an ID that is not a file ID, should fail */ + H5E_BEGIN_TRY + { + ret = H5Fset_latest_format(fcpl, true); + } + H5E_END_TRY + VERIFY(ret, FAIL, "H5Fset_latest_format"); + /* Get the file's version information */ ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr); CHECK(ret, FAIL, "H5Pget_version"); @@ -8419,10 +8464,11 @@ test_file(void) test_libver_bounds(); /* Test compatibility for file space management */ test_libver_bounds_low_high(driver_name); - test_libver_macros(); /* Test the macros for library version comparison */ - test_libver_macros2(); /* Test the macros for library version comparison */ - test_incr_filesize(); /* Test H5Fincrement_filesize() and H5Fget_eoa() */ - test_min_dset_ohdr(); /* Test dataset object header minimization */ + test_libver_macros(); /* Test the macros for library version comparison */ + test_libver_macros2(); /* Test the macros for library version comparison */ + test_incr_filesize(); /* Test H5Fincrement_filesize() and H5Fget_eoa() */ + test_min_dset_ohdr(); /* Test dataset object header minimization */ + test_unseekable_file(); /* Test attempting to open/create an unseekable file */ #ifndef H5_NO_DEPRECATED_SYMBOLS test_file_ishdf5(driver_name); /* Test detecting HDF5 files correctly */ test_deprec(driver_name); /* Test deprecated routines */ diff --git a/test/th5o.c b/test/th5o.c index 801091f6b9f..815b5648e4d 100644 --- a/test/th5o.c +++ b/test/th5o.c @@ -545,6 +545,7 @@ test_h5o_refcount(void) hid_t grp, dset, dtype, dspace; /* Object identifiers */ char filename[1024]; H5O_info2_t oinfo; /* Object info struct */ + H5L_info2_t linfo; /* Buffer for H5Lget_info */ hsize_t dims[RANK]; herr_t ret; /* Value returned from API calls */ @@ -568,6 +569,10 @@ test_h5o_refcount(void) ret = H5Tcommit2(fid, "datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK(ret, FAIL, "H5Tcommit2"); + /* Test passing a datatype ID to H5Lget_info2, it should not fail */ + ret = H5Lget_info2(dtype, "/datatype", &linfo, H5P_DEFAULT); + CHECK(ret, FAIL, "H5Lget_info2"); + /* Create the data space for the dataset. */ dims[0] = DIM0; dims[1] = DIM1; @@ -747,6 +752,7 @@ test_h5o_plist(void) hid_t grp, dset, dtype, dspace; /* Object identifiers */ hid_t fapl; /* File access property list */ hid_t gcpl, dcpl, tcpl; /* Object creation properties */ + hid_t bad_pl = H5I_INVALID_HID; /* Invalid property list dues to invalid arg */ char filename[1024]; unsigned def_max_compact, def_min_dense; /* Default phase change parameters */ unsigned max_compact, min_dense; /* Actual phase change parameters */ @@ -849,6 +855,14 @@ test_h5o_plist(void) dcpl = H5Dget_create_plist(dset); CHECK(dcpl, FAIL, "H5Dget_create_plist"); + /* Test passing in a non-group identifier to the H5G API */ + H5E_BEGIN_TRY + { + bad_pl = H5Gget_create_plist(dtype); + } + H5E_END_TRY + VERIFY(bad_pl, H5I_INVALID_HID, "H5Gget_create_plist"); + /* Retrieve attribute phase change values on each creation property list and verify */ ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense); CHECK(ret, FAIL, "H5Pget_attr_phase_change"); @@ -1215,6 +1229,7 @@ test_h5o_comment(void) /* Getting the comment on the file and verify it */ comment_len = H5Oget_comment(fid, NULL, (size_t)0); CHECK(comment_len, FAIL, "H5Oget_comment"); + VERIFY(comment_len, strlen(file_comment), "H5Oget_comment"); len = H5Oget_comment(fid, check_comment, (size_t)comment_len + 1); CHECK(len, FAIL, "H5Oget_comment"); @@ -1232,6 +1247,7 @@ test_h5o_comment(void) len = H5Oget_comment(grp, check_comment, (size_t)comment_len + 1); CHECK(len, FAIL, "H5Oget_comment"); + VERIFY(len, strlen(grp_comment), "H5Oget_comment"); ret_value = strcmp(grp_comment, check_comment); VERIFY(ret_value, 0, "H5Oget_comment"); @@ -1243,6 +1259,7 @@ test_h5o_comment(void) /* Getting the comment on the datatype and verify it */ comment_len = H5Oget_comment(dtype, NULL, (size_t)0); CHECK(comment_len, FAIL, "H5Oget_comment"); + VERIFY(comment_len, strlen(dtype_comment), "H5Oget_comment"); len = H5Oget_comment(dtype, check_comment, (size_t)comment_len + 1); CHECK(len, FAIL, "H5Oget_comment"); @@ -1260,6 +1277,7 @@ test_h5o_comment(void) len = H5Oget_comment(dset, check_comment, (size_t)comment_len + 1); CHECK(ret, len, "H5Oget_comment"); + VERIFY(len, strlen(dset_comment), "H5Oget_comment"); ret_value = strcmp(dset_comment, check_comment); VERIFY(ret_value, 0, "H5Oget_comment"); @@ -1401,6 +1419,7 @@ test_h5o_comment_by_name(void) len = H5Oget_comment_by_name(fid, ".", check_comment, (size_t)comment_len + 1, H5P_DEFAULT); CHECK(len, FAIL, "H5Oget_comment_by_name"); + VERIFY(len, strlen(file_comment), "H5Oget_comment"); ret_value = strcmp(file_comment, check_comment); VERIFY(ret_value, 0, "H5Oget_comment_by_name"); diff --git a/test/tmisc.c b/test/tmisc.c index a9d94a5ec97..63bf5d8edcb 100644 --- a/test/tmisc.c +++ b/test/tmisc.c @@ -4296,6 +4296,7 @@ test_misc23(void) namelen = H5Iget_name(tmp_id, objname, (size_t)MISC23_NAME_BUF_SIZE); CHECK(namelen, FAIL, "H5Iget_name"); VERIFY_STR(objname, "/A/B01/grp", "H5Iget_name"); + VERIFY(namelen, strlen("/A/B01/grp"), "H5Iget_name"); status = H5Gclose(tmp_id); CHECK(status, FAIL, "H5Gclose"); diff --git a/test/trefer.c b/test/trefer.c index fcd0a21484a..b1e4a3854b4 100644 --- a/test/trefer.c +++ b/test/trefer.c @@ -618,21 +618,22 @@ test_reference_obj(void) /* Check file name for reference */ namelen = H5Rget_file_name(&rbuf[0], NULL, 0); - CHECK(namelen, FAIL, "H5Dget_file_name"); - VERIFY(namelen, strlen(FILE_REF_OBJ), "H5Dget_file_name"); + CHECK(namelen, FAIL, "H5Rget_file_name"); + VERIFY(namelen, strlen(FILE_REF_OBJ), "H5Rget_file_name"); /* Make sure size parameter is ignored */ namelen = H5Rget_file_name(&rbuf[0], NULL, 200); - CHECK(namelen, FAIL, "H5Dget_file_name"); - VERIFY(namelen, strlen(FILE_REF_OBJ), "H5Dget_file_name"); + CHECK(namelen, FAIL, "H5Rget_file_name"); + VERIFY(namelen, strlen(FILE_REF_OBJ), "H5Rget_file_name"); /* Get the file name for the reference */ namebuf = (char *)malloc((size_t)namelen + 1); - namelen = H5Rget_file_name(&rbuf[0], (char *)namebuf, (size_t)namelen + 1); - CHECK(namelen, FAIL, "H5Dget_file_name"); + namelen = H5Rget_file_name(&rbuf[0], namebuf, (size_t)namelen + 1); + CHECK(namelen, FAIL, "H5Rget_file_name"); + VERIFY(strcmp(namebuf, FILE_REF_OBJ), 0, "namebuf vs FILE_REF_OBJ"); + VERIFY(namelen, strlen(FILE_REF_OBJ), "H5Rget_file_name"); - ret = !((strcmp(namebuf, FILE_REF_OBJ) == 0) && (namelen == strlen(FILE_REF_OBJ))); - CHECK(ret, FAIL, "H5Literate"); + free(namebuf); /* Testing Dataset1 */ @@ -644,7 +645,8 @@ test_reference_obj(void) namebuf = (char *)malloc((size_t)namelen + 1); namelen = H5Rget_obj_name(&rbuf[0], H5P_DEFAULT, namebuf, (size_t)namelen + 1); CHECK(namelen, FAIL, "H5Rget_obj_name"); - VERIFY(strcmp(namebuf, DS1_REF_OBJ), 0, "strcmp namebuf vs DS1_REF_OBJ"); + VERIFY(strcmp(namebuf, DS1_REF_OBJ), 0, "namebuf vs DS1_REF_OBJ"); + VERIFY(namelen, strlen(DS1_REF_OBJ), "H5Rget_obj_name"); /* Open dataset object */ ref_ds1 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id); @@ -697,13 +699,12 @@ test_reference_obj(void) /* Getting the name of the referenced object and verify it */ namelen = H5Rget_obj_name(&rbuf[1], H5P_DEFAULT, NULL, 0); - CHECK(namelen, FAIL, "H5Rget_obj_name"); VERIFY(namelen, strlen(DS2_REF_OBJ), "H5Rget_obj_name"); namebuf = (char *)malloc((size_t)namelen + 1); namelen = H5Rget_obj_name(&rbuf[1], H5P_DEFAULT, namebuf, (size_t)namelen + 1); - CHECK(namelen, FAIL, "H5Rget_obj_name"); - VERIFY(strcmp(namebuf, DS2_REF_OBJ), 0, "strcmp namebuf vs DS2_REF_OBJ"); + VERIFY(namelen, strlen(DS2_REF_OBJ), "H5Rget_obj_name"); + VERIFY(strcmp(namebuf, DS2_REF_OBJ), 0, "namebuf vs DS2_REF_OBJ"); /* Open dataset object */ ref_ds2 = H5Ropen_object(&rbuf[1], H5P_DEFAULT, dapl_id); @@ -2416,6 +2417,7 @@ test_reference_group(void) H5P_DEFAULT); CHECK(size, (-1), "H5Lget_name_by_idx"); VERIFY_STR(objname, DSETNAME2, "H5Lget_name_by_idx"); + VERIFY(size, strlen(DSETNAME2), "H5Lget_name_by_idx"); ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT); diff --git a/test/ttsafe.c b/test/ttsafe.c index 08022eb1992..2f5f26db331 100644 --- a/test/ttsafe.c +++ b/test/ttsafe.c @@ -127,20 +127,19 @@ main(int argc, char *argv[]) /* C11 atomics only tested when emulated */ AddTest("atomics", tts_atomics, NULL, "emulation of C11 atomics", NULL); #endif /* H5_HAVE_STDATOMIC_H */ + AddTest("rwlock", tts_rwlock, NULL, "simple R/W locks", NULL); #ifndef H5_HAVE_WIN_THREADS /* Recursive R/W locks */ - AddTest("rec_rwlock_1", tts_rec_rw_lock_smoke_check_1, NULL, "recursive R/W lock smoke check 1 -- basic", + AddTest("rec_rwlock_1", tts_rec_rwlock_smoke_check_1, NULL, "recursive R/W lock smoke check 1 -- basic", NULL); - AddTest("rec_rwlock_2", tts_rec_rw_lock_smoke_check_2, NULL, + AddTest("rec_rwlock_2", tts_rec_rwlock_smoke_check_2, NULL, "recursive R/W lock smoke check 2 -- mob of readers", NULL); - AddTest("rec_rwlock_3", tts_rec_rw_lock_smoke_check_3, NULL, + AddTest("rec_rwlock_3", tts_rec_rwlock_smoke_check_3, NULL, "recursive R/W lock smoke check 3 -- mob of writers", NULL); - AddTest("rec_rwlock_4", tts_rec_rw_lock_smoke_check_4, NULL, + AddTest("rec_rwlock_4", tts_rec_rwlock_smoke_check_4, NULL, "recursive R/W lock smoke check 4 -- mixed mob", NULL); #endif /* !H5_HAVE_WIN_THREADS */ -#ifdef H5_HAVE_STDATOMIC_H AddTest("semaphore", tts_semaphore, NULL, "lightweight system semaphores", NULL); -#endif /* H5_HAVE_STDATOMIC_H */ #ifdef H5_HAVE_THREADSAFE AddTest("thread_id", tts_thread_id, NULL, "thread IDs", NULL); diff --git a/test/ttsafe.h b/test/ttsafe.h index d2066782b6b..7544b3ff32d 100644 --- a/test/ttsafe.h +++ b/test/ttsafe.h @@ -38,13 +38,14 @@ void tts_is_threadsafe(void); #ifdef H5_HAVE_THREADS void tts_thread_pool(void); void tts_atomics(void); -#ifdef H5_HAVE_STDATOMIC_H +void tts_rwlock(void); void tts_semaphore(void); -#endif /* H5_HAVE_STDATOMIC_H */ -void tts_rec_rw_lock_smoke_check_1(void); -void tts_rec_rw_lock_smoke_check_2(void); -void tts_rec_rw_lock_smoke_check_3(void); -void tts_rec_rw_lock_smoke_check_4(void); +#ifndef H5_HAVE_WIN_THREADS +void tts_rec_rwlock_smoke_check_1(void); +void tts_rec_rwlock_smoke_check_2(void); +void tts_rec_rwlock_smoke_check_3(void); +void tts_rec_rwlock_smoke_check_4(void); +#endif /* !H5_HAVE_WIN_THREADS */ #ifdef H5_HAVE_THREADSAFE void tts_dcreate(void); void tts_error(void); diff --git a/test/ttsafe_acreate.c b/test/ttsafe_acreate.c index b3a83e5690c..6ee12d03987 100644 --- a/test/ttsafe_acreate.c +++ b/test/ttsafe_acreate.c @@ -63,7 +63,11 @@ tts_acreate(void) int buffer, i; herr_t status; - ttsafe_name_data_t *attrib_data; + ttsafe_name_data_t *attrib_data[NUM_THREADS]; + + char *attribute_name = NULL; + + memset(attrib_data, 0, sizeof(attrib_data)); /* * Create an HDF5 file using H5F_ACC_TRUNC access, default file @@ -97,12 +101,12 @@ tts_acreate(void) * with the dataset */ for (i = 0; i < NUM_THREADS; i++) { - attrib_data = (ttsafe_name_data_t *)malloc(sizeof(ttsafe_name_data_t)); - attrib_data->dataset = dataset; - attrib_data->datatype = datatype; - attrib_data->dataspace = dataspace; - attrib_data->current_index = i; - if (H5TS_thread_create(&threads[i], tts_acreate_thread, attrib_data) < 0) + attrib_data[i] = (ttsafe_name_data_t *)malloc(sizeof(ttsafe_name_data_t)); + attrib_data[i]->dataset = dataset; + attrib_data[i]->datatype = datatype; + attrib_data[i]->dataspace = dataspace; + attrib_data[i]->current_index = i; + if (H5TS_thread_create(&threads[i], tts_acreate_thread, attrib_data[i]) < 0) TestErrPrintf("thread # %d did not start", i); } @@ -112,7 +116,9 @@ tts_acreate(void) /* verify the correctness of the test */ for (i = 0; i < NUM_THREADS; i++) { - attribute = H5Aopen(dataset, gen_name(i), H5P_DEFAULT); + attribute_name = gen_name(i); + + attribute = H5Aopen(dataset, attribute_name, H5P_DEFAULT); CHECK(attribute, H5I_INVALID_HID, "H5Aopen"); if (attribute < 0) @@ -125,6 +131,8 @@ tts_acreate(void) status = H5Aclose(attribute); CHECK(status, FAIL, "H5Aclose"); } + + free(attribute_name); } /* close remaining resources */ @@ -136,6 +144,10 @@ tts_acreate(void) CHECK(status, FAIL, "H5Dclose"); status = H5Fclose(file); CHECK(status, FAIL, "H5Fclose"); + + for (i = 0; i < NUM_THREADS; i++) + if (attrib_data[i]) + free(attrib_data[i]); } /* end tts_acreate() */ H5TS_THREAD_RETURN_TYPE @@ -164,6 +176,8 @@ tts_acreate_thread(void *client_data) status = H5Aclose(attribute); CHECK(status, FAIL, "H5Aclose"); + free(attribute_data); + free(attribute_name); return (H5TS_thread_ret_t)0; } /* end tts_acreate_thread() */ diff --git a/test/ttsafe_atomic.c b/test/ttsafe_atomic.c index 71046be44af..0150ccecade 100644 --- a/test/ttsafe_atomic.c +++ b/test/ttsafe_atomic.c @@ -5,7 +5,7 @@ * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/test/ttsafe_attr_vlen.c b/test/ttsafe_attr_vlen.c index 913ad0011a9..10ce4c63d78 100644 --- a/test/ttsafe_attr_vlen.c +++ b/test/ttsafe_attr_vlen.c @@ -123,6 +123,7 @@ tts_attr_vlen_thread(void H5_ATTR_UNUSED *client_data) hid_t fid = H5I_INVALID_HID; /* File ID */ hid_t gid = H5I_INVALID_HID; /* Group ID */ hid_t aid = H5I_INVALID_HID; /* Attribute ID */ + hid_t asid = H5I_INVALID_HID; /* Dataspace ID for the attribute */ hid_t atid = H5I_INVALID_HID; /* Datatype ID for the attribute */ char *string_attr_check; /* The attribute data being read */ const char *string_attr = "2.0"; /* The expected attribute data */ @@ -144,6 +145,10 @@ tts_attr_vlen_thread(void H5_ATTR_UNUSED *client_data) atid = H5Aget_type(aid); CHECK(atid, H5I_INVALID_HID, "H5Aget_type"); + /* Get the dataspace for the attribute */ + asid = H5Aget_space(aid); + CHECK(asid, H5I_INVALID_HID, "H5Aget_space"); + /* Read the attribute */ ret = H5Aread(aid, atid, &string_attr_check); CHECK(ret, FAIL, "H5Aclose"); @@ -151,10 +156,17 @@ tts_attr_vlen_thread(void H5_ATTR_UNUSED *client_data) /* Verify the attribute data is as expected */ VERIFY_STR(string_attr_check, string_attr, "H5Aread"); + /* Free the attribute data */ + ret = H5Dvlen_reclaim(atid, asid, H5P_DEFAULT, &string_attr_check); + CHECK(ret, FAIL, "H5Dvlen_reclaim"); + /* Close IDs */ ret = H5Aclose(aid); CHECK(ret, FAIL, "H5Aclose"); + ret = H5Sclose(asid); + CHECK(ret, FAIL, "H5Aclose"); + ret = H5Gclose(gid); CHECK(ret, FAIL, "H5Aclose"); diff --git a/test/ttsafe_cancel.c b/test/ttsafe_cancel.c index f2bb4c517a1..7aaf5aaaa6b 100644 --- a/test/ttsafe_cancel.c +++ b/test/ttsafe_cancel.c @@ -47,6 +47,10 @@ typedef struct cleanup_struct { hid_t dataspace; } cancel_cleanup_t; +/* Used by tts_cancel_thread. + * Global because the thread gets cancelled and can't clean up its allocations */ +cancel_cleanup_t cleanup_structure = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID}; + pthread_t childthread; static H5TS_barrier_t barrier; @@ -94,14 +98,13 @@ tts_cancel(void) void * tts_cancel_thread(void H5_ATTR_UNUSED *arg) { - hid_t dataspace = H5I_INVALID_HID; - hid_t datatype = H5I_INVALID_HID; - hid_t dataset = H5I_INVALID_HID; - int datavalue; - int buffer; - hsize_t dimsf[1]; /* dataset dimensions */ - cancel_cleanup_t *cleanup_structure; - herr_t status; + hid_t dataspace = H5I_INVALID_HID; + hid_t datatype = H5I_INVALID_HID; + hid_t dataset = H5I_INVALID_HID; + int datavalue; + int buffer; + hsize_t dimsf[1]; /* dataset dimensions */ + herr_t status; /* define dataspace for dataset */ dimsf[0] = 1; @@ -120,11 +123,10 @@ tts_cancel_thread(void H5_ATTR_UNUSED *arg) CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); /* If thread is cancelled, make cleanup call */ - cleanup_structure = (cancel_cleanup_t *)malloc(sizeof(cancel_cleanup_t)); - cleanup_structure->dataset = dataset; - cleanup_structure->datatype = datatype; - cleanup_structure->dataspace = dataspace; - pthread_cleanup_push(cancellation_cleanup, cleanup_structure); + cleanup_structure.dataset = dataset; + cleanup_structure.datatype = datatype; + cleanup_structure.dataspace = dataspace; + pthread_cleanup_push(cancellation_cleanup, &cleanup_structure); datavalue = 1; status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &datavalue); @@ -189,14 +191,14 @@ tts_cancel_callback(void *elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_U void cancellation_cleanup(void *arg) { - cancel_cleanup_t *cleanup_structure = (cancel_cleanup_t *)arg; + cancel_cleanup_t *_cleanup_structure = (cancel_cleanup_t *)arg; herr_t status; - status = H5Dclose(cleanup_structure->dataset); + status = H5Dclose(_cleanup_structure->dataset); CHECK(status, FAIL, "H5Dclose"); - status = H5Tclose(cleanup_structure->datatype); + status = H5Tclose(_cleanup_structure->datatype); CHECK(status, FAIL, "H5Tclose"); - status = H5Sclose(cleanup_structure->dataspace); + status = H5Sclose(_cleanup_structure->dataspace); CHECK(status, FAIL, "H5Sclose"); } /* end cancellation_cleanup() */ diff --git a/test/ttsafe_develop.c b/test/ttsafe_develop.c index 82ee8640ca1..1ecf1757618 100644 --- a/test/ttsafe_develop.c +++ b/test/ttsafe_develop.c @@ -5,7 +5,7 @@ * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/test/ttsafe_rec_rw_lock.c b/test/ttsafe_rec_rwlock.c similarity index 77% rename from test/ttsafe_rec_rw_lock.c rename to test/ttsafe_rec_rwlock.c index f7230af3d9d..a09dc6ea9c5 100644 --- a/test/ttsafe_rec_rw_lock.c +++ b/test/ttsafe_rec_rwlock.c @@ -5,7 +5,7 @@ * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -40,9 +40,9 @@ */ /*********************************************************************** * - * Structure rec_rw_lock_test_udata_t + * Structure rec_rwlock_test_udata_t * - * Arrays of instances of rec_rw_lock_test_udata_t are used to configure + * Arrays of instances of rec_rwlock_test_udata_t are used to configure * the threads used to test the recursive R/W lock, and to collect * statistics on their behaviour. These statistics are aggregated and * used to cross-check the statistics collected by the recursive R/W @@ -50,7 +50,7 @@ * * The fields of the structure are discussed below: * - * rw_lock: Pointer to the recursive R/W under test. + * lock: Pointer to the recursive R/W lock under test. * * target_rd_lock_cycles: The number of times the test thread is * required to obtain and drop the read lock. Note @@ -73,17 +73,17 @@ * * The remaining fields are used for statistics collection. They are * thread specific versions of the fields of the same name in - * H5TS_rw_lock_stats_t. See the header comment for that + * H5TS_rec_rwlock_stats_t. See the header comment for that * structure (in H5TSprivate.h) for further details. * ***********************************************************************/ -typedef struct rec_rw_lock_test_udata_t { +typedef struct rec_rwlock_test_udata_t { /* thread control fields */ - H5TS_rw_lock_t *rw_lock; - int32_t target_rd_lock_cycles; - int32_t target_wr_lock_cycles; - int32_t max_recursive_lock_depth; + H5TS_rec_rwlock_t *lock; + int32_t target_rd_lock_cycles; + int32_t target_wr_lock_cycles; + int32_t max_recursive_lock_depth; /* thread stats fields */ int64_t read_locks_granted; @@ -95,11 +95,11 @@ typedef struct rec_rw_lock_test_udata_t { int64_t real_write_locks_granted; int64_t real_write_locks_released; -} rec_rw_lock_test_udata_t; +} rec_rwlock_test_udata_t; /* ********************************************************************** - * tts_rw_lock_smoke_check_test_thread + * tts_rec_rwlock_smoke_check_test_thread * * Perform a sequence of recursive read and/or write locks on the * target recursive R/W lock as directed by the supplied user data. @@ -113,22 +113,22 @@ typedef struct rec_rw_lock_test_udata_t { ********************************************************************** */ static H5TS_THREAD_RETURN_TYPE -tts_rw_lock_smoke_check_test_thread(void *_udata) +tts_rec_rwlock_smoke_check_test_thread(void *_udata) { - hbool_t read; - int32_t rec_lock_depth = 0; - int32_t max_rec_lock_depth; - int32_t rd_locks_remaining; - int32_t wr_locks_remaining; - herr_t result; - H5TS_rw_lock_t *rw_lock; - rec_rw_lock_test_udata_t *udata = (rec_rw_lock_test_udata_t *)_udata; + hbool_t read; + int32_t rec_lock_depth = 0; + int32_t max_rec_lock_depth; + int32_t rd_locks_remaining; + int32_t wr_locks_remaining; + herr_t result; + H5TS_rec_rwlock_t *lock; + rec_rwlock_test_udata_t *udata = (rec_rwlock_test_udata_t *)_udata; assert(_udata); rd_locks_remaining = udata->target_rd_lock_cycles; wr_locks_remaining = udata->target_wr_lock_cycles; max_rec_lock_depth = udata->max_recursive_lock_depth; - rw_lock = udata->rw_lock; + lock = udata->lock; while (rd_locks_remaining > 0 || wr_locks_remaining > 0) { if (wr_locks_remaining == 0) @@ -143,8 +143,8 @@ tts_rw_lock_smoke_check_test_thread(void *_udata) } if (read) { - result = H5TS__rw_rdlock(rw_lock); - CHECK_I(result, "H5TS__rw_rdlock"); + result = H5TS__rec_rwlock_rdlock(lock); + CHECK_I(result, "H5TS__rec_rwlock_rdlock"); udata->read_locks_granted++; udata->real_read_locks_granted++; @@ -153,15 +153,15 @@ tts_rw_lock_smoke_check_test_thread(void *_udata) while (rec_lock_depth > 0) { if (rec_lock_depth >= max_rec_lock_depth || (rand() % 2) == 0) { - result = H5TS__rw_unlock(rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_rdunlock(lock); + CHECK_I(result, "H5TS__rec_rwlock_rdunlock"); rec_lock_depth--; udata->read_locks_released++; } else { - result = H5TS__rw_rdlock(rw_lock); - CHECK_I(result, "H5TS__rw_rdlock"); + result = H5TS__rec_rwlock_rdlock(lock); + CHECK_I(result, "H5TS__rec_rwlock_rdlock"); rec_lock_depth++; udata->read_locks_granted++; @@ -171,8 +171,8 @@ tts_rw_lock_smoke_check_test_thread(void *_udata) udata->real_read_locks_released++; } else { - result = H5TS__rw_wrlock(rw_lock); - CHECK_I(result, "H5TS__rw_wrlock"); + result = H5TS__rec_rwlock_wrlock(lock); + CHECK_I(result, "H5TS__rec_rwlock_wrlock"); udata->write_locks_granted++; udata->real_write_locks_granted++; @@ -181,15 +181,15 @@ tts_rw_lock_smoke_check_test_thread(void *_udata) while (rec_lock_depth > 0) { if (rec_lock_depth >= max_rec_lock_depth || (rand() % 2) == 0) { - result = H5TS__rw_unlock(rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_wrunlock(lock); + CHECK_I(result, "H5TS__rec_rwlock_wrunlock"); rec_lock_depth--; udata->write_locks_released++; } else { - result = H5TS__rw_wrlock(rw_lock); - CHECK_I(result, "H5TS__rw_wrlock"); + result = H5TS__rec_rwlock_wrlock(lock); + CHECK_I(result, "H5TS__rec_rwlock_wrlock"); rec_lock_depth++; udata->write_locks_granted++; @@ -201,11 +201,11 @@ tts_rw_lock_smoke_check_test_thread(void *_udata) } return (H5TS_thread_ret_t)0; -} /* end tts_rw_lock_smoke_check_test_thread() */ +} /* end tts_rec_rwlock_smoke_check_test_thread() */ /* ********************************************************************** - * tts_rec_rw_lock_smoke_check_1 + * tts_rec_rwlock_smoke_check_1 * * Single thread test to verify basic functionality and error * rejection of the recursive R/W lock. @@ -263,33 +263,33 @@ tts_rw_lock_smoke_check_test_thread(void *_udata) ********************************************************************** */ void -tts_rec_rw_lock_smoke_check_1(void) +tts_rec_rwlock_smoke_check_1(void) { herr_t result; -#if H5TS_ENABLE_REC_RW_LOCK_STATS - H5TS_rw_lock_stats_t stats; +#if H5TS_ENABLE_REC_RWLOCK_STATS + H5TS_rec_rwlock_stats_t stats; #endif - H5TS_rw_lock_t rec_rw_lock; + H5TS_rec_rwlock_t lock; /* 1) Initialize an instance of the recursive R/W lock. */ - result = H5TS__rw_lock_init(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_init"); + result = H5TS__rec_rwlock_init(&lock); + CHECK_I(result, "H5TS__rec_rwlock_init"); /* 2) Obtain a read lock. */ - result = H5TS__rw_rdlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_rdlock"); + result = H5TS__rec_rwlock_rdlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_rdlock"); /* 3) Drop the read lock. */ - result = H5TS__rw_unlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_rdunlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_rdunlock"); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* 4) Verify the expected stats, and then reset them. */ - result = H5TS__rw_lock_get_stats(&rec_rw_lock, &stats); - CHECK_I(result, "H5TS__rw_lock_get_stats"); + result = H5TS__rec_rwlock_get_stats(&lock, &stats); + CHECK_I(result, "H5TS__rec_rwlock_get_stats"); - result = H5TS__rw_lock_reset_stats(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_reset_stats"); + result = H5TS__rec_rwlock_reset_stats(&lock); + CHECK_I(result, "H5TS__rec_rwlock_reset_stats"); /* clang-format makes this conditional unreadable, so turn it off. */ /* clang-format off */ @@ -310,34 +310,34 @@ tts_rec_rw_lock_smoke_check_1(void) stats.max_write_locks_pending != 0 ) { TestErrPrintf("Unexpected recursive R/W lock stats -- 1"); - H5TS__rw_lock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); } /* clang-format on */ #endif /* 5) Obtain a read lock. */ - result = H5TS__rw_rdlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_rdlock"); + result = H5TS__rec_rwlock_rdlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_rdlock"); /* 6) Obtain the read lock a second time. */ - result = H5TS__rw_rdlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_rdlock"); + result = H5TS__rec_rwlock_rdlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_rdlock"); /* 7) Drop the read lock. */ - result = H5TS__rw_unlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_rdunlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_rdunlock"); /* 8) Drop the read lock a second time. */ - result = H5TS__rw_unlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_rdunlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_rdunlock"); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* 9) Verify the expected stats, and then reset them. */ - result = H5TS__rw_lock_get_stats(&rec_rw_lock, &stats); - CHECK_I(result, "H5TS__rw_lock_get_stats"); + result = H5TS__rec_rwlock_get_stats(&lock, &stats); + CHECK_I(result, "H5TS__rec_rwlock_get_stats"); - result = H5TS__rw_lock_reset_stats(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_reset_stats"); + result = H5TS__rec_rwlock_reset_stats(&lock); + CHECK_I(result, "H5TS__rec_rwlock_reset_stats"); /* clang-format makes this conditional unreadable, so turn it off. */ /* clang-format off */ @@ -358,26 +358,26 @@ tts_rec_rw_lock_smoke_check_1(void) stats.max_write_locks_pending != 0 ) { TestErrPrintf("Unexpected recursive R/W lock stats -- 2"); - H5TS__rw_lock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); } /* clang-format on */ #endif /* 10) Obtain a write lock. */ - result = H5TS__rw_wrlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_wrlock"); + result = H5TS__rec_rwlock_wrlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_wrlock"); /* 11) Drop the write lock. */ - result = H5TS__rw_unlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_wrunlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_wrunlock"); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* 12) Verify the expected stats, and then reset them. */ - result = H5TS__rw_lock_get_stats(&rec_rw_lock, &stats); - CHECK_I(result, "H5TS__rw_lock_get_stats"); + result = H5TS__rec_rwlock_get_stats(&lock, &stats); + CHECK_I(result, "H5TS__rec_rwlock_get_stats"); - result = H5TS__rw_lock_reset_stats(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_reset_stats"); + result = H5TS__rec_rwlock_reset_stats(&lock); + CHECK_I(result, "H5TS__rec_rwlock_reset_stats"); /* clang-format makes this conditional unreadable, so turn it off. */ /* clang-format off */ @@ -398,34 +398,34 @@ tts_rec_rw_lock_smoke_check_1(void) stats.max_write_locks_pending != 0 ) { TestErrPrintf("Unexpected recursive R/W lock stats -- 3"); - H5TS__rw_lock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); } /* clang-format on */ #endif /* 13) Obtain a write lock. */ - result = H5TS__rw_wrlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_wrlock"); + result = H5TS__rec_rwlock_wrlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_wrlock"); /* 14) Obtain the write lock a second time. */ - result = H5TS__rw_wrlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_wrlock"); + result = H5TS__rec_rwlock_wrlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_wrlock"); /* 15) Drop the write lock. */ - result = H5TS__rw_unlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_wrunlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_wrunlock"); /* 16) Drop the write lock a second time. */ - result = H5TS__rw_unlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_wrunlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_wrunlock"); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* 17) Verify the expected stats, and then reset them. */ - result = H5TS__rw_lock_get_stats(&rec_rw_lock, &stats); - CHECK_I(result, "H5TS__rw_lock_get_stats"); + result = H5TS__rec_rwlock_get_stats(&lock, &stats); + CHECK_I(result, "H5TS__rec_rwlock_get_stats"); - result = H5TS__rw_lock_reset_stats(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_reset_stats"); + result = H5TS__rec_rwlock_reset_stats(&lock); + CHECK_I(result, "H5TS__rec_rwlock_reset_stats"); /* clang-format makes this conditional unreadable, so turn it off. */ /* clang-format off */ @@ -446,42 +446,42 @@ tts_rec_rw_lock_smoke_check_1(void) stats.max_write_locks_pending != 0 ) { TestErrPrintf("Unexpected recursive R/W lock stats -- 4"); - H5TS__rw_lock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); } /* clang-format on */ #endif /* 18) Obtain a write lock. */ - result = H5TS__rw_wrlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_wrlock"); + result = H5TS__rec_rwlock_wrlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_wrlock"); /* 19) Attempt to obtain a read lock -- should fail. */ - result = H5TS__rw_rdlock(&rec_rw_lock); - VERIFY(result, FAIL, "H5TS__rw_rdlock"); + result = H5TS__rec_rwlock_rdlock(&lock); + VERIFY(result, FAIL, "H5TS__rec_rwlock_rdlock"); /* 20) Drop the write lock. */ - result = H5TS__rw_unlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_wrunlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_wrunlock"); /* 21) Obtain a read lock. */ - result = H5TS__rw_rdlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_rdlock"); + result = H5TS__rec_rwlock_rdlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_rdlock"); /* 22) Attempt to obtain a write lock -- should fail. */ - result = H5TS__rw_wrlock(&rec_rw_lock); - VERIFY(result, FAIL, "H5TS__rw_wrlock"); + result = H5TS__rec_rwlock_wrlock(&lock); + VERIFY(result, FAIL, "H5TS__rec_rwlock_wrlock"); /* 23) Drop the read lock. */ - result = H5TS__rw_unlock(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_unlock"); + result = H5TS__rec_rwlock_rdunlock(&lock); + CHECK_I(result, "H5TS__rec_rwlock_rdunlock"); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* 24) Verify the expected stats, and then reset them. */ - result = H5TS__rw_lock_get_stats(&rec_rw_lock, &stats); - CHECK_I(result, "H5TS__rw_lock_get_stats"); + result = H5TS__rec_rwlock_get_stats(&lock, &stats); + CHECK_I(result, "H5TS__rec_rwlock_get_stats"); - result = H5TS__rw_lock_reset_stats(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_reset_stats"); + result = H5TS__rec_rwlock_reset_stats(&lock); + CHECK_I(result, "H5TS__rec_rwlock_reset_stats"); /* clang-format makes this conditional unreadable, so turn it off. */ /* clang-format off */ @@ -502,19 +502,19 @@ tts_rec_rw_lock_smoke_check_1(void) stats.max_write_locks_pending != 0 ) { TestErrPrintf("Unexpected recursive R/W lock stats"); - H5TS__rw_lock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); } /* clang-format on */ #endif /* 25) Shut down the recursive R/W lock. */ - result = H5TS__rw_lock_destroy(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_destroy"); -} /* end tts_rec_rw_lock_smoke_check_1() */ + result = H5TS__rec_rwlock_destroy(&lock); + CHECK_I(result, "H5TS__rec_rwlock_destroy"); +} /* end tts_rec_rwlock_smoke_check_1() */ /* ********************************************************************** - * tts_rec_rw_lock_smoke_check_2 -- mob of readers + * tts_rec_rwlock_smoke_check_2 -- mob of readers * * Multi-threaded test to check management of multiple readers ONLY by * the recursive R/W lock. Test proceeds as follows: @@ -525,7 +525,7 @@ tts_rec_rw_lock_smoke_check_1(void) * * 3) Create the reader threads, each with its own user data. * Activities of the reader threads is discussed in the header - * comment to tts_rw_lock_smoke_check_test_thread(). + * comment to tts_rec_rwlock_smoke_check_test_thread(). * * 4) Wait for all threads to complete. * @@ -546,25 +546,25 @@ tts_rec_rw_lock_smoke_check_1(void) ********************************************************************** */ void -tts_rec_rw_lock_smoke_check_2(void) +tts_rec_rwlock_smoke_check_2(void) { - herr_t result; - int express_test; - int i; - int num_threads = MAX_NUM_THREADS; - int lock_cycles = MAX_LOCK_CYCLES; - H5TS_thread_t threads[MAX_NUM_THREADS]; - rec_rw_lock_test_udata_t *udata = NULL; -#if H5TS_ENABLE_REC_RW_LOCK_STATS - hbool_t verbose = FALSE; - int32_t total_target_rd_lock_cycles = 0; - int32_t total_target_wr_lock_cycles = 0; - H5TS_rw_lock_stats_t stats; - H5TS_rw_lock_stats_t expected; + herr_t result; + int express_test; + int i; + int num_threads = MAX_NUM_THREADS; + int lock_cycles = MAX_LOCK_CYCLES; + H5TS_thread_t threads[MAX_NUM_THREADS]; + rec_rwlock_test_udata_t *udata = NULL; +#if H5TS_ENABLE_REC_RWLOCK_STATS + hbool_t verbose = FALSE; + int32_t total_target_rd_lock_cycles = 0; + int32_t total_target_wr_lock_cycles = 0; + H5TS_rec_rwlock_stats_t stats; + H5TS_rec_rwlock_stats_t expected; #endif - H5TS_rw_lock_t rec_rw_lock; + H5TS_rec_rwlock_t lock; -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* Reset expected stats fields to zero -- we will construct the expected * stats from the thread udata after completion. */ @@ -596,30 +596,30 @@ tts_rec_rw_lock_smoke_check_2(void) } /* 1) Initialize an instance of the recursive R/W lock. */ - result = H5TS__rw_lock_init(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_init"); + result = H5TS__rec_rwlock_init(&lock); + CHECK_I(result, "H5TS__rec_rwlock_init"); /* 2) Setup the user data to be passed to each reader test thread. */ for (i = 0; i < MAX_NUM_THREADS; i++) { memset(&udata[i], 0, sizeof(udata[i])); - udata[i].rw_lock = &rec_rw_lock; + udata[i].lock = &lock; udata[i].target_rd_lock_cycles = lock_cycles; udata[i].max_recursive_lock_depth = 10; } -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS uint64_t start_time = H5_now_usec(); #endif /* 3) Create the reader threads, each with its own user data. */ for (i = 0; i < num_threads; i++) - if (H5TS_thread_create(&threads[i], tts_rw_lock_smoke_check_test_thread, &udata[i]) < 0) + if (H5TS_thread_create(&threads[i], tts_rec_rwlock_smoke_check_test_thread, &udata[i]) < 0) TestErrPrintf("thread # %d did not start", i); /* 4) Wait for all threads to complete. */ for (i = 0; i < num_threads; i++) if (H5TS_thread_join(threads[i], NULL) < 0) TestErrPrintf("thread %d failed to join", i); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS uint64_t end_time = H5_now_usec(); uint64_t elap_time = (unsigned long long)(end_time - start_time); if (verbose) @@ -643,7 +643,7 @@ tts_rec_rw_lock_smoke_check_2(void) assert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_granted); assert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_released); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS total_target_rd_lock_cycles += udata[i].target_rd_lock_cycles; total_target_wr_lock_cycles += udata[i].target_wr_lock_cycles; @@ -658,7 +658,7 @@ tts_rec_rw_lock_smoke_check_2(void) #endif } -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* Verify that the threads executed the expected number of read and write * lock cycles. If they didn't, some thread probably encountered an error * and exited early. @@ -677,8 +677,8 @@ tts_rec_rw_lock_smoke_check_2(void) * with the data gathered above. */ - result = H5TS__rw_lock_get_stats(&rec_rw_lock, &stats); - CHECK_I(result, "H5TS__rw_lock_get_stats"); + result = H5TS__rec_rwlock_get_stats(&lock, &stats); + CHECK_I(result, "H5TS__rec_rwlock_get_stats"); /* turn off clang-format for readability */ /* clang-format off */ @@ -700,27 +700,27 @@ tts_rec_rw_lock_smoke_check_2(void) stats.write_locks_delayed != expected.write_locks_delayed || stats.max_write_locks_pending != expected.max_write_locks_pending) { TestErrPrintf("Unexpected recursive R/W lock stats"); - H5TS__rw_lock_print_stats("Actual stats", &stats); - H5TS__rw_lock_print_stats("Expected stats", &expected); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Expected stats", &expected); } /* clang-format on */ if (verbose) - H5TS__rw_lock_print_stats("mob of readers stats", &stats); + H5TS__rec_rwlock_print_stats("mob of readers stats", &stats); #endif /* 7) Shut down the recursive R/W lock. */ - result = H5TS__rw_lock_destroy(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_destroy"); + result = H5TS__rec_rwlock_destroy(&lock); + CHECK_I(result, "H5TS__rec_rwlock_destroy"); /* discard the udata if it exists */ if (udata) free(udata); -} /* end tts_rec_rw_lock_smoke_check_2() */ +} /* end tts_rec_rwlock_smoke_check_2() */ /* ********************************************************************** - * tts_rec_rw_lock_smoke_check_3 -- mob of writers + * tts_rec_rwlock_smoke_check_3 -- mob of writers * * Multi-thread test to check management of multiple writers ONLY by * the recursive R/W lock. Test proceeds as follows: @@ -731,7 +731,7 @@ tts_rec_rw_lock_smoke_check_2(void) * * 3) Create the writer threads, each with its own user data. * Activities of the writer threads is discussed in the header - * comment to tts_rw_lock_smoke_check_test_thread(). + * comment to tts_rec_rwlock_smoke_check_test_thread(). * * 4) Wait for all threads to complete. * @@ -752,25 +752,25 @@ tts_rec_rw_lock_smoke_check_2(void) ********************************************************************** */ void -tts_rec_rw_lock_smoke_check_3(void) +tts_rec_rwlock_smoke_check_3(void) { - herr_t result; - int i; - int express_test; - int num_threads = MAX_NUM_THREADS; - int lock_cycles = MAX_LOCK_CYCLES; - H5TS_thread_t threads[MAX_NUM_THREADS]; - rec_rw_lock_test_udata_t *udata = NULL; -#if H5TS_ENABLE_REC_RW_LOCK_STATS - hbool_t verbose = FALSE; - int32_t total_target_rd_lock_cycles = 0; - int32_t total_target_wr_lock_cycles = 0; - H5TS_rw_lock_stats_t stats; - H5TS_rw_lock_stats_t expected; + herr_t result; + int i; + int express_test; + int num_threads = MAX_NUM_THREADS; + int lock_cycles = MAX_LOCK_CYCLES; + H5TS_thread_t threads[MAX_NUM_THREADS]; + rec_rwlock_test_udata_t *udata = NULL; +#if H5TS_ENABLE_REC_RWLOCK_STATS + hbool_t verbose = FALSE; + int32_t total_target_rd_lock_cycles = 0; + int32_t total_target_wr_lock_cycles = 0; + H5TS_rec_rwlock_stats_t stats; + H5TS_rec_rwlock_stats_t expected; #endif - H5TS_rw_lock_t rec_rw_lock; + H5TS_rec_rwlock_t lock; -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* Reset expected stats fields to zero -- we will construct the expected * stats from the thread udata after completion. */ @@ -802,30 +802,30 @@ tts_rec_rw_lock_smoke_check_3(void) } /* 1) Initialize an instance of the recursive R/W lock. */ - result = H5TS__rw_lock_init(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_init"); + result = H5TS__rec_rwlock_init(&lock); + CHECK_I(result, "H5TS__rec_rwlock_init"); /* 2) Setup the user data to be passed to each writer test thread. */ for (i = 0; i < MAX_NUM_THREADS; i++) { memset(&udata[i], 0, sizeof(udata[i])); - udata[i].rw_lock = &rec_rw_lock; + udata[i].lock = &lock; udata[i].target_wr_lock_cycles = lock_cycles; udata[i].max_recursive_lock_depth = 10; } -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS uint64_t start_time = H5_now_usec(); #endif /* 3) Create the writer threads, each with its own user data. */ for (i = 0; i < num_threads; i++) - if (H5TS_thread_create(&threads[i], tts_rw_lock_smoke_check_test_thread, &udata[i]) < 0) + if (H5TS_thread_create(&threads[i], tts_rec_rwlock_smoke_check_test_thread, &udata[i]) < 0) TestErrPrintf("thread # %d did not start", i); /* 4) Wait for all threads to complete. */ for (i = 0; i < num_threads; i++) if (H5TS_thread_join(threads[i], NULL) < 0) TestErrPrintf("thread %d failed to join", i); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS uint64_t end_time = H5_now_usec(); uint64_t elap_time = (unsigned long long)(end_time - start_time); if (verbose) @@ -849,7 +849,7 @@ tts_rec_rw_lock_smoke_check_3(void) assert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_granted); assert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_released); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS total_target_rd_lock_cycles += udata[i].target_rd_lock_cycles; total_target_wr_lock_cycles += udata[i].target_wr_lock_cycles; @@ -864,7 +864,7 @@ tts_rec_rw_lock_smoke_check_3(void) #endif } -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* Verify that the threads executed the expected number of read and write * lock cycles. If they didn't, some thread probably encountered an error * and exited early. @@ -883,8 +883,8 @@ tts_rec_rw_lock_smoke_check_3(void) /* 6) Obtain the stats from the recursive R/W lock, and compare * with the data gathered above. */ - result = H5TS__rw_lock_get_stats(&rec_rw_lock, &stats); - CHECK_I(result, "H5TS__rw_lock_get_stats"); + result = H5TS__rec_rwlock_get_stats(&lock, &stats); + CHECK_I(result, "H5TS__rec_rwlock_get_stats"); /* turn off clang-format for readability */ /* clang-format off */ @@ -905,27 +905,27 @@ tts_rec_rw_lock_smoke_check_3(void) stats.write_locks_delayed < expected.write_locks_delayed || stats.max_write_locks_pending > expected.max_write_locks_pending) { TestErrPrintf("Unexpected recursive R/W lock stats"); - H5TS__rw_lock_print_stats("Actual stats", &stats); - H5TS__rw_lock_print_stats("Expected stats", &expected); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Expected stats", &expected); } /* clang-format on */ if (verbose) - H5TS__rw_lock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); #endif /* 7) Shut down the recursive R/W lock. */ - result = H5TS__rw_lock_destroy(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_destroy"); + result = H5TS__rec_rwlock_destroy(&lock); + CHECK_I(result, "H5TS__rec_rwlock_destroy"); /* discard the udata if it exists */ if (udata) free(udata); -} /* end tts_rec_rw_lock_smoke_check_3() */ +} /* end tts_rec_rwlock_smoke_check_3() */ /* ********************************************************************** - * tts_rec_rw_lock_smoke_check_4 -- mixed mob + * tts_rec_rwlock_smoke_check_4 -- mixed mob * * Multi-thread test to check management of multiple readers and * writers by the recursive R/W lock. Test proceeds as follows: @@ -936,7 +936,7 @@ tts_rec_rw_lock_smoke_check_3(void) * * 3) Create the reader / writer threads, each with its own user data. * Activities of the reader / writer threads is discussed in the - * header comment to tts_rw_lock_smoke_check_test_thread(). + * header comment to tts_rec_rwlock_smoke_check_test_thread(). * * 4) Wait for all threads to complete. * @@ -958,25 +958,25 @@ tts_rec_rw_lock_smoke_check_3(void) ********************************************************************** */ void -tts_rec_rw_lock_smoke_check_4(void) +tts_rec_rwlock_smoke_check_4(void) { - herr_t result; - int i; - int express_test; - int num_threads = MAX_NUM_THREADS; - int lock_cycles = MAX_LOCK_CYCLES; - H5TS_thread_t threads[MAX_NUM_THREADS]; - rec_rw_lock_test_udata_t *udata = NULL; -#if H5TS_ENABLE_REC_RW_LOCK_STATS - hbool_t verbose = FALSE; - int32_t total_target_rd_lock_cycles = 0; - int32_t total_target_wr_lock_cycles = 0; - H5TS_rw_lock_stats_t stats; - H5TS_rw_lock_stats_t expected; + herr_t result; + int i; + int express_test; + int num_threads = MAX_NUM_THREADS; + int lock_cycles = MAX_LOCK_CYCLES; + H5TS_thread_t threads[MAX_NUM_THREADS]; + rec_rwlock_test_udata_t *udata = NULL; +#if H5TS_ENABLE_REC_RWLOCK_STATS + hbool_t verbose = FALSE; + int32_t total_target_rd_lock_cycles = 0; + int32_t total_target_wr_lock_cycles = 0; + H5TS_rec_rwlock_stats_t stats; + H5TS_rec_rwlock_stats_t expected; #endif - H5TS_rw_lock_t rec_rw_lock; + H5TS_rec_rwlock_t lock; -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* Reset expected stats fields to zero -- we will construct the expected * stats from the thread udata after completion. */ @@ -1008,31 +1008,31 @@ tts_rec_rw_lock_smoke_check_4(void) } /* 1) Initialize an instance of the recursive R/W lock. */ - result = H5TS__rw_lock_init(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_init"); + result = H5TS__rec_rwlock_init(&lock); + CHECK_I(result, "H5TS__rec_rwlock_init"); /* 2) Setup the user data to be passed to each writer test thread. */ for (i = 0; i < MAX_NUM_THREADS; i++) { memset(&udata[i], 0, sizeof(udata[i])); - udata[i].rw_lock = &rec_rw_lock; + udata[i].lock = &lock; udata[i].target_rd_lock_cycles = lock_cycles; udata[i].target_wr_lock_cycles = lock_cycles; udata[i].max_recursive_lock_depth = 10; } -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS uint64_t start_time = H5_now_usec(); #endif /* 3) Create the reader threads, each with its own user data. */ for (i = 0; i < num_threads; i++) - if (H5TS_thread_create(&threads[i], tts_rw_lock_smoke_check_test_thread, &udata[i]) < 0) + if (H5TS_thread_create(&threads[i], tts_rec_rwlock_smoke_check_test_thread, &udata[i]) < 0) TestErrPrintf("thread # %d did not start", i); /* 4) Wait for all threads to complete. */ for (i = 0; i < num_threads; i++) if (H5TS_thread_join(threads[i], NULL) < 0) TestErrPrintf("thread %d failed to join", i); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS uint64_t end_time = H5_now_usec(); uint64_t elap_time = (unsigned long long)(end_time - start_time); if (verbose) @@ -1056,7 +1056,7 @@ tts_rec_rw_lock_smoke_check_4(void) assert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_granted); assert(udata[i].target_wr_lock_cycles == udata[i].real_write_locks_released); -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS total_target_rd_lock_cycles += udata[i].target_rd_lock_cycles; total_target_wr_lock_cycles += udata[i].target_wr_lock_cycles; @@ -1071,7 +1071,7 @@ tts_rec_rw_lock_smoke_check_4(void) #endif } -#if H5TS_ENABLE_REC_RW_LOCK_STATS +#if H5TS_ENABLE_REC_RWLOCK_STATS /* Verify that the threads executed the expected number of read and write * lock cycles. If they didn't, some thread probably encountered an error * and exited early. @@ -1092,8 +1092,8 @@ tts_rec_rw_lock_smoke_check_4(void) /* 6) Obtain the stats from the recursive R/W lock, and compare * with the data gathered above. */ - result = H5TS__rw_lock_get_stats(&rec_rw_lock, &stats); - CHECK_I(result, "H5TS__rw_lock_get_stats"); + result = H5TS__rec_rwlock_get_stats(&lock, &stats); + CHECK_I(result, "H5TS__rec_rwlock_get_stats"); /* turn off clang-format for readability */ /* clang-format off */ @@ -1115,23 +1115,23 @@ tts_rec_rw_lock_smoke_check_4(void) stats.write_locks_delayed < expected.write_locks_delayed || stats.max_write_locks_pending > expected.max_write_locks_pending) { TestErrPrintf("Unexpected recursive R/W lock stats"); - H5TS__rw_lock_print_stats("Actual stats", &stats); - H5TS__rw_lock_print_stats("Expected stats", &expected); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Expected stats", &expected); } /* clang-format on */ if (verbose) - H5TS__rw_lock_print_stats("Actual stats", &stats); + H5TS__rec_rwlock_print_stats("Actual stats", &stats); #endif /* 7) Shut down the recursive R/W lock. */ - result = H5TS__rw_lock_destroy(&rec_rw_lock); - CHECK_I(result, "H5TS__rw_lock_destroy"); + result = H5TS__rec_rwlock_destroy(&lock); + CHECK_I(result, "H5TS__rec_rwlock_destroy"); /* discard the udata if it exists */ if (udata) free(udata); -} /* end tts_rec_rw_lock_smoke_check_4() */ +} /* end tts_rec_rwlock_smoke_check_4() */ #endif /* H5_HAVE_WIN_THREADS */ #endif /* H5_HAVE_THREADS */ diff --git a/test/ttsafe_rwlock.c b/test/ttsafe_rwlock.c new file mode 100644 index 00000000000..d6fcad38bd1 --- /dev/null +++ b/test/ttsafe_rwlock.c @@ -0,0 +1,317 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/******************************************************************** + * + * Test the correctness of the non-recursive R/W lock routines + * + ********************************************************************/ + +#include "ttsafe.h" + +#ifdef H5_HAVE_THREADS + +#define NUM_THREADS 16 +#define NUM_WRITERS 4 + +#define NUM_ITERS 12 +#define COUNT_MAX 512 + +typedef struct { + H5TS_rwlock_t lock; + int val; + H5TS_barrier_t barrier; +} atomic_counter_t; + +static H5TS_THREAD_RETURN_TYPE +incr_task(void *_counter) +{ + atomic_counter_t *counter = (atomic_counter_t *)_counter; + herr_t result; + H5TS_thread_ret_t ret_value = 0; + + result = H5TS_rwlock_wrlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_wrlock"); + + /* Increment value */ + counter->val++; + + result = H5TS_rwlock_wrunlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_wrunlock"); + + return ret_value; +} + +static H5TS_THREAD_RETURN_TYPE +many_read(void *_counter) +{ + atomic_counter_t *counter = (atomic_counter_t *)_counter; + herr_t result; + H5TS_thread_ret_t ret_value = 0; + + result = H5TS_rwlock_rdlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_rdlock"); + + /* Wait at barrier, to confirm that many readers can hold lock */ + result = H5TS_barrier_wait(&counter->barrier); + CHECK_I(result, "H5TS_barrier_wait"); + + result = H5TS_rwlock_rdunlock(&counter->lock); + CHECK_I(result, "H5TS_rdlock_rdunlock"); + + return ret_value; +} + +static H5TS_THREAD_RETURN_TYPE +count_up_and_down(void *_counter) +{ + atomic_counter_t *counter = (atomic_counter_t *)_counter; + herr_t result; + H5TS_thread_ret_t ret_value = 0; + + /* Count up & down a number of times */ + for (unsigned u = 0; u < NUM_ITERS; u++) { + /* Wait at barrier, to ensure all threads are ready to count */ + result = H5TS_barrier_wait(&counter->barrier); + CHECK_I(result, "H5TS_barrier_wait"); + + /* Count up */ + for (unsigned v = 0; v < COUNT_MAX; v++) { + result = H5TS_rwlock_wrlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_wrlock"); + + /* Increment value */ + counter->val++; + + result = H5TS_rwlock_wrunlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_wrunlock"); + } + + /* Wait at barrier, to ensure all threads have finishend counting up */ + result = H5TS_barrier_wait(&counter->barrier); + CHECK_I(result, "H5TS_barrier_wait"); + + /* Count down */ + for (unsigned v = 0; v < COUNT_MAX; v++) { + result = H5TS_rwlock_wrlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_wrlock"); + + /* Decrement value */ + counter->val--; + + result = H5TS_rwlock_wrunlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_wrunlock"); + } + } + + return ret_value; +} + +static H5TS_THREAD_RETURN_TYPE +verify_counting(void *_counter) +{ + atomic_counter_t *counter = (atomic_counter_t *)_counter; + herr_t result; + int last_val = 0; + H5TS_thread_ret_t ret_value = 0; + + /* Count up & down a number of times */ + for (unsigned u = 0; u < NUM_ITERS; u++) { + /* Wait at barrier, to ensure all threads are ready to count */ + result = H5TS_barrier_wait(&counter->barrier); + CHECK_I(result, "H5TS_barrier_wait"); + + /* Verify that counter goes only up */ + do { + result = H5TS_rwlock_rdlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_rdlock"); + + /* Check counter value */ + if (counter->val < last_val) + ERROR("incorrect counter value"); + + /* Save value */ + last_val = counter->val; + + result = H5TS_rwlock_rdunlock(&counter->lock); + CHECK_I(result, "H5TS_rdlock_wrunlock"); + + /* Give the writers a chance to make progress */ + H5TS_thread_yield(); + } while (last_val < (NUM_WRITERS * COUNT_MAX)); + + /* Wait at barrier, to ensure all threads have finishend counting up */ + result = H5TS_barrier_wait(&counter->barrier); + CHECK_I(result, "H5TS_barrier_wait"); + + /* Verify that counter goes only down */ + do { + result = H5TS_rwlock_rdlock(&counter->lock); + CHECK_I(result, "H5TS_rwlock_rdlock"); + + /* Check counter value */ + if (counter->val > last_val) + ERROR("incorrect counter value"); + + /* Save value */ + last_val = counter->val; + + result = H5TS_rwlock_rdunlock(&counter->lock); + CHECK_I(result, "H5TS_rdlock_wrunlock"); + + /* Give the writers a chance to make progress */ + H5TS_thread_yield(); + } while (last_val > 0); + } + + return ret_value; +} + +/* + ********************************************************************** + * tts_rwlock + ********************************************************************** + */ +void +tts_rwlock(void) +{ + H5TS_thread_t threads[NUM_THREADS]; + H5TS_pool_t *pool = NULL; + H5TS_rwlock_t lock; + atomic_counter_t counter; + herr_t result; + + /* Sanity checks on bad input */ + result = H5TS_rwlock_init(NULL); + VERIFY(result, FAIL, "H5TS_rwlock_init"); + result = H5TS_rwlock_rdlock(NULL); + VERIFY(result, FAIL, "H5TS_rwlock_rdlock"); + result = H5TS_rwlock_rdunlock(NULL); + VERIFY(result, FAIL, "H5TS_rwlock_rdunlock"); + result = H5TS_rwlock_wrlock(NULL); + VERIFY(result, FAIL, "H5TS_rwlock_wrlock"); + result = H5TS_rwlock_wrunlock(NULL); + VERIFY(result, FAIL, "H5TS_rwlock_wrunlock"); + result = H5TS_rwlock_destroy(NULL); + VERIFY(result, FAIL, "H5TS_rwlock_destroy"); + + /* Create & destroy lock */ + result = H5TS_rwlock_init(&lock); + CHECK_I(result, "H5TS_rwlock_init"); + + result = H5TS_rwlock_destroy(&lock); + CHECK_I(result, "H5TS_rwlock_destroy"); + + /* Read lock & unlock */ + result = H5TS_rwlock_init(&lock); + CHECK_I(result, "H5TS_rwlock_init"); + + result = H5TS_rwlock_rdlock(&lock); + CHECK_I(result, "H5TS_rwlock_rdlock"); + + result = H5TS_rwlock_rdunlock(&lock); + CHECK_I(result, "H5TS_rwlock_rdunlock"); + + result = H5TS_rwlock_destroy(&lock); + CHECK_I(result, "H5TS_rwlock_destroy"); + + /* Write lock & unlock */ + result = H5TS_rwlock_init(&lock); + CHECK_I(result, "H5TS_rwlock_init"); + + result = H5TS_rwlock_wrlock(&lock); + CHECK_I(result, "H5TS_rwlock_wrlock"); + + result = H5TS_rwlock_wrunlock(&lock); + CHECK_I(result, "H5TS_rwlock_wrunlock"); + + result = H5TS_rwlock_destroy(&lock); + CHECK_I(result, "H5TS_rwlock_destroy"); + + /* Hold read lock w/many threads */ + result = H5TS_rwlock_init(&counter.lock); + CHECK_I(result, "H5TS_rwlock_init"); + + result = H5TS_barrier_init(&counter.barrier, NUM_THREADS); + CHECK_I(result, "H5TS_barrier_init"); + + for (unsigned u = 0; u < NUM_THREADS; u++) { + result = H5TS_thread_create(&threads[u], many_read, &counter); + CHECK_I(result, "H5TS_thread_create"); + } + + for (unsigned u = 0; u < NUM_THREADS; u++) { + result = H5TS_thread_join(threads[u], NULL); + CHECK_I(result, "H5TS_thread_join"); + } + + result = H5TS_barrier_destroy(&counter.barrier); + CHECK_I(result, "H5TS_barrier_destroy"); + + result = H5TS_rwlock_destroy(&counter.lock); + CHECK_I(result, "H5TS_rwlock_destroy"); + + /* Increment counter w/many threads */ + result = H5TS_rwlock_init(&counter.lock); + CHECK_I(result, "H5TS_rwlock_init"); + + result = H5TS_pool_create(&pool, NUM_THREADS); + CHECK_I(result, "H5TS_pool_create"); + + counter.val = 0; + for (unsigned u = 0; u < NUM_THREADS; u++) { + result = H5TS_pool_add_task(pool, incr_task, &counter); + CHECK_I(result, "H5TS_pool_add_task"); + } + + result = H5TS_pool_destroy(pool); + CHECK_I(result, "H5TS_pool_destroy"); + + VERIFY(counter.val, NUM_THREADS, "many incr"); + + result = H5TS_rwlock_destroy(&counter.lock); + CHECK_I(result, "H5TS_rwlock_destroy"); + + /* Increment & decrement counter w/many threads while reading */ + result = H5TS_rwlock_init(&counter.lock); + CHECK_I(result, "H5TS_rwlock_init"); + + result = H5TS_barrier_init(&counter.barrier, NUM_THREADS); + CHECK_I(result, "H5TS_barrier_init"); + + result = H5TS_pool_create(&pool, NUM_THREADS); + CHECK_I(result, "H5TS_pool_create"); + + counter.val = 0; + for (unsigned u = 0; u < NUM_WRITERS; u++) { + result = H5TS_pool_add_task(pool, count_up_and_down, &counter); + CHECK_I(result, "H5TS_pool_add_task"); + } + for (unsigned u = 0; u < (NUM_THREADS - NUM_WRITERS); u++) { + result = H5TS_pool_add_task(pool, verify_counting, &counter); + CHECK_I(result, "H5TS_pool_add_task"); + } + + result = H5TS_pool_destroy(pool); + CHECK_I(result, "H5TS_pool_destroy"); + + VERIFY(counter.val, 0, "count up & down"); + + result = H5TS_barrier_destroy(&counter.barrier); + CHECK_I(result, "H5TS_barrier_destroy"); + + result = H5TS_rwlock_destroy(&counter.lock); + CHECK_I(result, "H5TS_rwlock_destroy"); + +} /* end tts_rwlock() */ + +#endif /*H5_HAVE_THREADS*/ diff --git a/test/ttsafe_semaphore.c b/test/ttsafe_semaphore.c index 41076632a4f..563a4f5d3c1 100644 --- a/test/ttsafe_semaphore.c +++ b/test/ttsafe_semaphore.c @@ -5,7 +5,7 @@ * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -18,16 +18,17 @@ #include "ttsafe.h" -#if defined(H5_HAVE_THREADS) && defined(H5_HAVE_STDATOMIC_H) +#if defined(H5_HAVE_THREADS) -#define NUM_PINGPONG (1000 * 1000) +#define NUM_PINGPONG (250 * 1000) #define NUM_CLIENTSERVER (50 * 1000) #define NUM_THREADS 16 typedef struct { H5TS_semaphore_t ping_sem, pong_sem; - unsigned counter; + unsigned ping_counter; + unsigned pong_counter; } pingpong_t; typedef struct { @@ -47,11 +48,11 @@ ping(void *_test_info) result = H5TS_semaphore_wait(&test_info->ping_sem); CHECK_I(result, "H5TS_semaphore_wait"); - test_info->counter++; + test_info->ping_counter++; result = H5TS_semaphore_signal(&test_info->pong_sem); CHECK_I(result, "H5TS_semaphore_signal"); - } while (test_info->counter < NUM_PINGPONG); + } while (test_info->ping_counter < NUM_PINGPONG); return ret_value; } @@ -67,11 +68,11 @@ pong(void *_test_info) result = H5TS_semaphore_wait(&test_info->pong_sem); CHECK_I(result, "H5TS_semaphore_wait"); - test_info->counter++; + test_info->pong_counter++; result = H5TS_semaphore_signal(&test_info->ping_sem); CHECK_I(result, "H5TS_semaphore_signal"); - } while (test_info->counter < NUM_PINGPONG); + } while (test_info->pong_counter < NUM_PINGPONG); return ret_value; } @@ -93,7 +94,8 @@ tts_semaphore_pingpong(void) CHECK_I(result, "H5TS_semaphore_init"); result = H5TS_semaphore_init(&test_info.pong_sem, 0); CHECK_I(result, "H5TS_semaphore_init"); - test_info.counter = 0; + test_info.ping_counter = 0; + test_info.pong_counter = 0; /* Start ping & pong threads */ result = H5TS_thread_create(&ping_thread, ping, &test_info); @@ -111,7 +113,8 @@ tts_semaphore_pingpong(void) result = H5TS_thread_join(pong_thread, NULL); CHECK_I(result, "H5TS_thread_join"); - VERIFY(test_info.counter, (NUM_PINGPONG + 1), "ping pong"); + VERIFY(test_info.ping_counter, NUM_PINGPONG, "ping counter"); + VERIFY(test_info.pong_counter, NUM_PINGPONG, "pong counter"); /* Destroy semaphores */ result = H5TS_semaphore_destroy(&test_info.ping_sem); @@ -268,4 +271,4 @@ tts_semaphore(void) tts_semaphore_clientserver(); } /* end tts_semaphore() */ -#endif /* defined(H5_HAVE_THREADS) && defined(H5_HAVE_STDATOMIC_H) */ +#endif /* defined(H5_HAVE_THREADS) */ diff --git a/test/ttsafe_thread_id.c b/test/ttsafe_thread_id.c index 49678645819..cd063e05cda 100644 --- a/test/ttsafe_thread_id.c +++ b/test/ttsafe_thread_id.c @@ -5,7 +5,7 @@ * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/test/ttsafe_thread_pool.c b/test/ttsafe_thread_pool.c index 9dd28360e50..e5362111be1 100644 --- a/test/ttsafe_thread_pool.c +++ b/test/ttsafe_thread_pool.c @@ -5,7 +5,7 @@ * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * distribution tree, or in https://www.hdfgroup.org/licenses. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/test/vol.c b/test/vol.c index 041e0c9b88b..9fa4f06c1ca 100644 --- a/test/vol.c +++ b/test/vol.c @@ -914,11 +914,9 @@ test_basic_file_operation(const char *driver_name) TEST_ERROR; } - /* H5Fcreate */ if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) TEST_ERROR; - /* H5Fget_obj_count */ if ((obj_count = H5Fget_obj_count(fid, H5F_OBJ_FILE)) < 0) TEST_ERROR; if ((obj_count = H5Fget_obj_count(fid, H5F_OBJ_ALL)) < 0) @@ -926,7 +924,6 @@ test_basic_file_operation(const char *driver_name) if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET)) < 0) TEST_ERROR; - /* H5Fget_obj_ids */ if ((obj_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, 2, obj_id_list)) < 0) TEST_ERROR; if ((obj_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET, 2, obj_id_list)) < 0) @@ -937,7 +934,6 @@ test_basic_file_operation(const char *driver_name) strcmp(driver_name, "family") != 0 && strcmp(driver_name, "direct") != 0 && strcmp(driver_name, "core") != 0 && strcmp(driver_name, "core_paged") != 0 && strcmp(driver_name, "mpio") != 0 && strcmp(driver_name, "splitter") != 0)) { - /* H5Fget_access_plist */ if ((fapl_id2 = H5Fget_access_plist(fid)) < 0) TEST_ERROR; if (H5Pequal(fapl_id, fapl_id2) != true) @@ -946,53 +942,42 @@ test_basic_file_operation(const char *driver_name) TEST_ERROR; } /* end if */ - /* H5Fget_create_plist */ if ((fcpl_id = H5Fget_create_plist(fid)) < 0) TEST_ERROR; if (H5Pclose(fcpl_id) < 0) TEST_ERROR; - /* H5Fget_filesize */ if (H5Fget_filesize(fid, &file_size) < 0) TEST_ERROR; /* Can't retrieve VFD handle for split / multi / family VFDs */ if ((bool)(strcmp(driver_name, "split") != 0 && strcmp(driver_name, "multi") != 0 && strcmp(driver_name, "family") != 0)) { - /* H5Fget_vfd_handle */ if (H5Fget_vfd_handle(fid, H5P_DEFAULT, &os_file_handle) < 0) TEST_ERROR; } /* end if */ - /* H5Fget_intent */ if (H5Fget_intent(fid, &intent) < 0) TEST_ERROR; - /* H5Fget_info2 */ if (H5Fget_info2(fid, &finfo) < 0) TEST_ERROR; - /* H5Fget_name */ if (H5Fget_name(fid, name, 32) < 0) TEST_ERROR; - /* H5Fclear_elink_file_cache */ if (H5Fclear_elink_file_cache(fid) < 0) TEST_ERROR; - /* H5Fflush */ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) TEST_ERROR; - /* H5Fclose */ if (H5Fclose(fid) < 0) TEST_ERROR; - /* H5Fis_accessible */ if (H5Fis_accessible(filename, fapl_id) < 0) TEST_ERROR; - /* H5Fopen */ if ((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id)) < 0) TEST_ERROR; @@ -1001,7 +986,6 @@ test_basic_file_operation(const char *driver_name) strcmp(driver_name, "family") != 0 && strcmp(driver_name, "direct") != 0 && strcmp(driver_name, "core") != 0 && strcmp(driver_name, "core_paged") != 0 && strcmp(driver_name, "mpio") != 0 && strcmp(driver_name, "splitter") != 0)) { - /* H5Fget_access_plist */ if ((fapl_id2 = H5Fget_access_plist(fid)) < 0) TEST_ERROR; if (H5Pequal(fapl_id, fapl_id2) != true) @@ -1018,7 +1002,6 @@ test_basic_file_operation(const char *driver_name) strcmp(driver_name, "family") != 0 && strcmp(driver_name, "direct") != 0 && strcmp(driver_name, "core") != 0 && strcmp(driver_name, "core_paged") != 0 && strcmp(driver_name, "mpio") != 0 && strcmp(driver_name, "splitter") != 0)) { - /* H5Fget_access_plist */ if ((fapl_id2 = H5Fget_access_plist(fid_reopen)) < 0) TEST_ERROR; if (H5Pequal(fapl_id, fapl_id2) != true) @@ -1034,7 +1017,6 @@ test_basic_file_operation(const char *driver_name) h5_delete_test_file(FILENAME[0], fapl_id); - /* H5Pclose */ if (H5Pclose(fapl_id) < 0) TEST_ERROR; @@ -1086,27 +1068,22 @@ test_basic_group_operation(void) if ((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) TEST_ERROR; - /* H5Gcreate */ if ((gid = H5Gcreate2(fid, NATIVE_VOL_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Gget_create_plist */ if ((gcpl_id = H5Gget_create_plist(gid)) < 0) TEST_ERROR; if (H5Pclose(gcpl_id) < 0) TEST_ERROR; - /* H5Gget_info */ if (H5Gget_info(gid, &info) < 0) TEST_ERROR; if (H5Gget_info(fid, &info) < 0) TEST_ERROR; - /* H5Gget_info_by_name */ if (H5Gget_info_by_name(fid, NATIVE_VOL_TEST_GROUP_NAME, &info, H5P_DEFAULT) < 0) TEST_ERROR; - /* H5Gget_info_by_idx */ if (H5Gget_info_by_idx(fid, "/", H5_INDEX_NAME, H5_ITER_NATIVE, 0, &info, H5P_DEFAULT) < 0) TEST_ERROR; @@ -1117,19 +1094,15 @@ test_basic_group_operation(void) if (H5Gflush(gid) < 0) TEST_ERROR; - /* H5Gclose */ if (H5Gclose(gid) < 0) TEST_ERROR; - /* H5Gopen */ if ((gid = H5Gopen2(fid, NATIVE_VOL_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Gcreate_anon */ if ((gid_a = H5Gcreate_anon(fid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Grefresh */ if (H5Grefresh(gid) < 0) TEST_ERROR; @@ -1142,7 +1115,6 @@ test_basic_group_operation(void) h5_delete_test_file(FILENAME[0], fapl_id); - /* H5Pclose */ if (H5Pclose(fapl_id) < 0) TEST_ERROR; @@ -1213,7 +1185,6 @@ test_basic_dataset_operation(void) out_buf[i] = 0; } - /* H5Dcreate */ curr_dims = 0; if ((sid = H5Screate_simple(1, &curr_dims, &max_dims)) < 0) TEST_ERROR; @@ -1226,7 +1197,6 @@ test_basic_dataset_operation(void) H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Dcreate_anon */ if ((did_a = H5Dcreate_anon(fid, H5T_NATIVE_INT, sid, dcpl_id, H5P_DEFAULT)) < 0) TEST_ERROR; @@ -1235,7 +1205,6 @@ test_basic_dataset_operation(void) if (H5Pclose(dcpl_id) < 0) TEST_ERROR; - /* H5Dset_extent */ curr_dims = N_ELEMENTS; if (H5Dset_extent(did, &curr_dims) < 0) TEST_ERROR; @@ -1247,35 +1216,28 @@ test_basic_dataset_operation(void) if (H5Dflush(did) < 0) TEST_ERROR; - /* H5Dwrite */ if (H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, in_buf) < 0) TEST_ERROR; - /* H5Drefresh */ if (H5Drefresh(did) < 0) TEST_ERROR; - /* H5Dclose */ if (H5Dclose(did) < 0) TEST_ERROR; if (H5Dclose(did_a) < 0) TEST_ERROR; - /* H5Dopen */ if ((did = H5Dopen2(fid, NATIVE_VOL_TEST_DATASET_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Dget_space */ if ((sid = H5Dget_space(did)) < 0) TEST_ERROR; if (H5Sclose(sid) < 0) TEST_ERROR; - /* H5Dget_space_status */ if (H5Dget_space_status(did, &status) < 0) TEST_ERROR; - /* H5Dget_type */ if ((tid = H5Dget_type(did)) < 0) TEST_ERROR; if (H5Tclose(tid) < 0) @@ -1287,13 +1249,11 @@ test_basic_dataset_operation(void) if (H5Tclose(tid) < 0) TEST_ERROR; - /* H5Dget_create_plist */ if ((dcpl_id = H5Dget_create_plist(did)) < 0) TEST_ERROR; if (H5Pclose(dcpl_id) < 0) TEST_ERROR; - /* H5Dget_access_plist */ if ((dapl_id = H5Dget_access_plist(did)) < 0) TEST_ERROR; if (H5Pclose(dapl_id) < 0) @@ -1311,7 +1271,6 @@ test_basic_dataset_operation(void) if (HADDR_UNDEF != (offset = H5Dget_offset(did))) TEST_ERROR; - /* H5Dread */ if (H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, out_buf) < 0) TEST_ERROR; @@ -1326,7 +1285,6 @@ test_basic_dataset_operation(void) h5_delete_test_file(FILENAME[0], fapl_id); - /* H5Pclose */ if (H5Pclose(fapl_id) < 0) TEST_ERROR; @@ -1391,44 +1349,35 @@ test_basic_attribute_operation(void) if ((sid = H5Screate_simple(1, &dims, &dims)) < 0) TEST_ERROR; - /* H5Acreate */ if ((aid = H5Acreate2(fid, NATIVE_VOL_TEST_ATTRIBUTE_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Awrite */ if (H5Awrite(aid, H5T_NATIVE_INT, &data_in) < 0) TEST_ERROR; - /* H5Aread */ if (H5Aread(aid, H5T_NATIVE_INT, &data_out) < 0) TEST_ERROR; if (data_in != data_out) TEST_ERROR; - /* H5Aclose */ if (H5Aclose(aid) < 0) TEST_ERROR; - /* H5Aopen */ if ((aid = H5Aopen(fid, NATIVE_VOL_TEST_ATTRIBUTE_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if (H5Aclose(aid) < 0) TEST_ERROR; - /* H5Adelete */ if (H5Adelete(fid, NATIVE_VOL_TEST_ATTRIBUTE_NAME) < 0) TEST_ERROR; - /* H5Acreate_by_name */ if ((aid_name = H5Acreate_by_name(fid, NATIVE_VOL_TEST_GROUP_NAME, NATIVE_VOL_TEST_ATTRIBUTE_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Aclose */ if (H5Aclose(aid_name) < 0) TEST_ERROR; - /* H5Adelete_by_name */ if (H5Adelete_by_name(fid, NATIVE_VOL_TEST_GROUP_NAME, NATIVE_VOL_TEST_ATTRIBUTE_NAME, H5P_DEFAULT) < 0) TEST_ERROR; @@ -1441,7 +1390,6 @@ test_basic_attribute_operation(void) h5_delete_test_file(FILENAME[0], fapl_id); - /* H5Pclose */ if (H5Pclose(fapl_id) < 0) TEST_ERROR; @@ -1495,23 +1443,19 @@ test_basic_object_operation(void) if ((gid = H5Gcreate2(fid, NATIVE_VOL_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Oget_info */ if (H5Oget_info3(fid, &object_info, H5O_INFO_ALL) < 0) TEST_ERROR; //! [H5Oget_info_by_name3_snip] - /* H5Oget_info_by_name */ if (H5Oget_info_by_name3(fid, NATIVE_VOL_TEST_GROUP_NAME, &object_info, H5O_INFO_ALL, H5P_DEFAULT) < 0) TEST_ERROR; //! [H5Oget_info_by_name3_snip] - /* H5Oexists_by_name */ if (H5Oexists_by_name(fid, NATIVE_VOL_TEST_GROUP_NAME, H5P_DEFAULT) != true) TEST_ERROR; - /* H5Oopen/close */ if ((oid = H5Oopen(fid, NATIVE_VOL_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; if (H5Oclose(oid) < 0) @@ -1524,7 +1468,6 @@ test_basic_object_operation(void) h5_delete_test_file(FILENAME[0], fapl_id); - /* H5Pclose */ if (H5Pclose(fapl_id) < 0) TEST_ERROR; @@ -1572,7 +1515,6 @@ test_basic_link_operation(void) if ((gid = H5Gcreate2(fid, NATIVE_VOL_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Lcreate_hard */ if (H5Lcreate_hard(fid, "/", gid, NATIVE_VOL_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR; @@ -1580,18 +1522,15 @@ test_basic_link_operation(void) if (H5Lcreate_soft("/", fid, NATIVE_VOL_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR; - /* H5Lexists */ if (H5Lexists(gid, NATIVE_VOL_TEST_HARD_LINK_NAME, H5P_DEFAULT) < 0) TEST_ERROR; if (H5Lexists(fid, NATIVE_VOL_TEST_SOFT_LINK_NAME, H5P_DEFAULT) < 0) TEST_ERROR; - /* H5Lcopy */ if (H5Lcopy(gid, NATIVE_VOL_TEST_HARD_LINK_NAME, fid, NATIVE_VOL_TEST_COPY_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR; - /* H5Lmove */ if (H5Lmove(fid, NATIVE_VOL_TEST_COPY_LINK_NAME, gid, NATIVE_VOL_TEST_MOVE_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR; @@ -1603,7 +1542,6 @@ test_basic_link_operation(void) h5_delete_test_file(FILENAME[0], fapl_id); - /* H5Pclose */ if (H5Pclose(fapl_id) < 0) TEST_ERROR; @@ -1654,7 +1592,6 @@ test_basic_datatype_operation(void) if ((tid = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR; - /* H5Tcommit */ if (H5Tcommit2(fid, NATIVE_VOL_TEST_DATATYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR; @@ -1665,23 +1602,18 @@ test_basic_datatype_operation(void) if (H5Tflush(tid) < 0) TEST_ERROR; - /* H5Trefresh */ if (H5Trefresh(tid) < 0) TEST_ERROR; - /* H5Tclose */ if (H5Tclose(tid) < 0) TEST_ERROR; - /* H5Topen */ if ((tid = H5Topen2(fid, NATIVE_VOL_TEST_DATATYPE_NAME, H5P_DEFAULT)) < 0) TEST_ERROR; - /* H5Tget_create_plist */ if ((tcpl_id = H5Tget_create_plist(tid)) < 0) TEST_ERROR; - /* H5Tcommit_anon */ if ((tid_anon = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR; if (H5Tcommit_anon(fid, tid_anon, H5P_DEFAULT, H5P_DEFAULT) < 0) @@ -1698,7 +1630,6 @@ test_basic_datatype_operation(void) h5_delete_test_file(FILENAME[0], fapl_id); - /* H5Pclose */ if (H5Pclose(fapl_id) < 0) TEST_ERROR; diff --git a/testpar/t_pmulti_dset.c b/testpar/t_pmulti_dset.c index 622690dbece..e9819840cb3 100644 --- a/testpar/t_pmulti_dset.c +++ b/testpar/t_pmulti_dset.c @@ -267,7 +267,7 @@ test_pmdset(size_t niter, unsigned flags) for (i = 0; i < niter; i++) { /* Determine number of datasets */ ndsets = (flags & MDSET_FLAG_MLAYOUT) ? 3 - : (flags & MDSET_FLAG_MDSET) ? (size_t)((size_t)rand() % max_dsets) + 1 + : (flags & MDSET_FLAG_MDSET) ? (size_t)((size_t)h5_local_rand() % max_dsets) + 1 : 1; /* Create file */ @@ -280,16 +280,16 @@ test_pmdset(size_t niter, unsigned flags) (flags & MDSET_FLAG_CHUNK) || ((flags & MDSET_FLAG_MLAYOUT) && (j == 1 || j == 2)); /* Generate file dataspace */ - dset_dims[j][0] = (hsize_t)((rand() % MAX_DSET_X) + 1); - dset_dims[j][1] = (hsize_t)((rand() % MAX_DSET_Y) + 1); + dset_dims[j][0] = (hsize_t)((h5_local_rand() % MAX_DSET_X) + 1); + dset_dims[j][1] = (hsize_t)((h5_local_rand() % MAX_DSET_Y) + 1); if ((file_space_ids[j] = H5Screate_simple(2, dset_dims[j], use_chunk ? max_dims : NULL)) < 0) T_PMD_ERROR; /* Generate chunk if called for by configuration (multi layout uses chunked for datasets * 1 and 2) */ if (use_chunk) { - chunk_dims[0] = (hsize_t)((rand() % MAX_CHUNK_X) + 1); - chunk_dims[1] = (hsize_t)((rand() % MAX_CHUNK_Y) + 1); + chunk_dims[0] = (hsize_t)((h5_local_rand() % MAX_CHUNK_X) + 1); + chunk_dims[1] = (hsize_t)((h5_local_rand() % MAX_CHUNK_Y) + 1); if (H5Pset_chunk(dcpl_id[j], 2, chunk_dims) < 0) T_PMD_ERROR; } /* end if */ @@ -297,10 +297,10 @@ test_pmdset(size_t niter, unsigned flags) /* Create dataset */ /* If MDSET_FLAG_TCONV is set, use a different datatype with 50% probability, so * some datasets require type conversion and others do not */ - if ((dset_ids[j] = - H5Dcreate2(file_id, dset_name[j], - (flags & MDSET_FLAG_TCONV && rand() % 2) ? H5T_NATIVE_LONG : H5T_NATIVE_UINT, - file_space_ids[j], H5P_DEFAULT, dcpl_id[j], H5P_DEFAULT)) < 0) + if ((dset_ids[j] = H5Dcreate2(file_id, dset_name[j], + (flags & MDSET_FLAG_TCONV && h5_local_rand() % 2) ? H5T_NATIVE_LONG + : H5T_NATIVE_UINT, + file_space_ids[j], H5P_DEFAULT, dcpl_id[j], H5P_DEFAULT)) < 0) T_PMD_ERROR; } /* end for */ @@ -325,7 +325,7 @@ test_pmdset(size_t niter, unsigned flags) /* Perform read/write operations */ for (j = 0; j < OPS_PER_FILE; j++) { /* Decide whether to read or write */ - do_read = (bool)(rand() % 2); + do_read = (bool)(h5_local_rand() % 2); /* Barrier to ensure processes have finished the previous operation */ @@ -387,9 +387,9 @@ test_pmdset(size_t niter, unsigned flags) (int)((unsigned)max_dsets * MAX_DSET_X * MAX_DSET_Y) * ((int)l - (int)mpi_rank); /* Decide whether to do a hyperslab or point selection */ - if (rand() % 2) { + if (h5_local_rand() % 2) { /* Hyperslab */ - size_t nhs = (size_t)((rand() % MAX_HS) + 1); /* Number of hyperslabs */ + size_t nhs = (size_t)((h5_local_rand() % MAX_HS) + 1); /* Number of hyperslabs */ size_t max_hs_x = (MAX_HS_X <= dset_dims[k][0]) ? MAX_HS_X : dset_dims[k][0]; /* Determine maximum hyperslab size in X */ @@ -401,14 +401,16 @@ test_pmdset(size_t niter, unsigned flags) overlap = true; for (n = 0; overlap && (n < MAX_SEL_RETRIES); n++) { /* Generate hyperslab */ - count[m][0] = (hsize_t)(((hsize_t)rand() % max_hs_x) + 1); - count[m][1] = (hsize_t)(((hsize_t)rand() % max_hs_y) + 1); - start[m][0] = (count[m][0] == dset_dims[k][0]) - ? 0 - : (hsize_t)rand() % (dset_dims[k][0] - count[m][0] + 1); - start[m][1] = (count[m][1] == dset_dims[k][1]) - ? 0 - : (hsize_t)rand() % (dset_dims[k][1] - count[m][1] + 1); + count[m][0] = (hsize_t)(((hsize_t)h5_local_rand() % max_hs_x) + 1); + count[m][1] = (hsize_t)(((hsize_t)h5_local_rand() % max_hs_y) + 1); + start[m][0] = + (count[m][0] == dset_dims[k][0]) + ? 0 + : (hsize_t)h5_local_rand() % (dset_dims[k][0] - count[m][0] + 1); + start[m][1] = + (count[m][1] == dset_dims[k][1]) + ? 0 + : (hsize_t)h5_local_rand() % (dset_dims[k][1] - count[m][1] + 1); /* If writing, check for overlap with other processes */ overlap = false; @@ -460,7 +462,8 @@ test_pmdset(size_t niter, unsigned flags) } /* end if */ else { /* Point selection */ - size_t npoints = (size_t)(((size_t)rand() % MAX_POINTS) + 1); /* Number of points */ + size_t npoints = + (size_t)(((size_t)h5_local_rand() % MAX_POINTS) + 1); /* Number of points */ /* Reset dataset usage array if reading, since in this case we don't care * about overlapping selections between processes */ @@ -472,8 +475,8 @@ test_pmdset(size_t niter, unsigned flags) overlap = true; for (n = 0; overlap && (n < MAX_SEL_RETRIES); n++) { /* Generate point */ - points[2 * m] = (unsigned)((hsize_t)rand() % dset_dims[k][0]); - points[(2 * m) + 1] = (unsigned)((hsize_t)rand() % dset_dims[k][1]); + points[2 * m] = (unsigned)((hsize_t)h5_local_rand() % dset_dims[k][0]); + points[(2 * m) + 1] = (unsigned)((hsize_t)h5_local_rand() % dset_dims[k][1]); /* Check for overlap with other processes (write) or this process * (always) */ @@ -664,7 +667,7 @@ main(int argc, char *argv[]) /* Seed random number generator with shared seed (so all ranks generate the * same sequence) */ - srand(seed); + h5_local_srand(seed); /* Fill dset_name array */ for (i = 0; i < MAX_DSETS; i++) { diff --git a/testpar/t_select_io_dset.c b/testpar/t_select_io_dset.c index 271d38c7fbd..a6e62c6f8e5 100644 --- a/testpar/t_select_io_dset.c +++ b/testpar/t_select_io_dset.c @@ -1588,7 +1588,7 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned s mwbuf ? "mwbuf" : "nomwbuf"); /* Flip a coin to see if we're doing type conversion */ - tconv = rand() % 2; + tconv = h5_local_rand() % 2; if (tconv) any_tconv = true; @@ -2079,7 +2079,7 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned select, uns } /* Case c */ - mm = rand() % (int)ndsets; + mm = h5_local_rand() % (int)ndsets; if (!mm) mm++; @@ -2719,9 +2719,9 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un P_TEST_ERROR; } else { - if ((dset_dids[i] = - H5Dcreate2(fid, dset_names[i], ((rand() % 2) ? H5T_NATIVE_LLONG : H5T_NATIVE_SHORT), - file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], + ((h5_local_rand() % 2) ? H5T_NATIVE_LLONG : H5T_NATIVE_SHORT), + file_sids[i], H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) P_TEST_ERROR; } } @@ -2790,7 +2790,7 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un * process 0: get 0 row; other processes: hyperslab */ - mm = rand() % (int)ndsets; + mm = h5_local_rand() % (int)ndsets; if (mm == 0) mm++; @@ -3169,7 +3169,7 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned select, un if ((mem_sids[i] = H5Screate_simple(1, block, NULL)) < 0) P_TEST_ERROR; - mm = rand() % (int)ndsets; + mm = h5_local_rand() % (int)ndsets; if (mm == 0) { dset_types[i] = DSET_WITH_NO_CONV; snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s_%s", i, diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c index 27c48250be4..aa2da851eaa 100644 --- a/testpar/t_subfiling_vfd.c +++ b/testpar/t_subfiling_vfd.c @@ -105,6 +105,7 @@ static void test_read_different_stripe_size(void); static void test_subfiling_precreate_rank_0(void); static void test_subfiling_write_many_read_one(void); static void test_subfiling_write_many_read_few(void); +static void test_subfiling_vector_io_extension(void); static void test_subfiling_h5fuse(void); static test_func tests[] = { @@ -118,6 +119,7 @@ static test_func tests[] = { test_subfiling_precreate_rank_0, test_subfiling_write_many_read_one, test_subfiling_write_many_read_few, + test_subfiling_vector_io_extension, test_subfiling_h5fuse, }; @@ -2568,6 +2570,120 @@ test_subfiling_write_many_read_few(void) #undef SUBF_HDF5_TYPE #undef SUBF_C_TYPE +/* + * Test to check for a bug where the vector I/O sizes + * array wasn't being extended when an entry in the + * array was 0. + */ +#define SUBF_FILENAME "test_subfiling_vector_io_extension.h5" +#define SUBF_C_TYPE int +static void +test_subfiling_vector_io_extension(void) +{ + H5FD_subfiling_params_t cfg; + h5_stat_size_t file_size; + SUBF_C_TYPE *read_buf = NULL; + H5FD_mem_t *types = NULL; + h5_stat_t file_info; + uint32_t count = 64; + haddr_t *addrs = NULL; + haddr_t file_end_addr; + herr_t read_status; + size_t *sizes = NULL; + H5FD_t *file_ptr = NULL; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + void **bufs = NULL; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("I/O vector size extension functionality"); + + /* Must use at least 2 subfiles to cause generation of + * I/O vectors within the VFD. + */ + cfg.ioc_selection = SELECT_IOC_ONE_PER_NODE; + cfg.stripe_size = (stripe_size_g > 0) ? stripe_size_g : 1048576; + cfg.stripe_count = num_iocs_g > 1 ? num_iocs_g : 2; + + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, true, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + /* Re-open file through H5FDopen for direct reads */ + file_ptr = H5FDopen(SUBF_FILENAME, H5F_ACC_RDWR, fapl_id, HADDR_UNDEF); + VRFY(file_ptr, "H5FDopen succeeded"); + + /* + * Get the current file size to see where we can safely + * write to in the file without overwriting the superblock + */ + memset(&file_info, 0, sizeof(h5_stat_t)); + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + file_size = (h5_stat_size_t)file_info.st_size; + + H5_CHECK_OVERFLOW(file_size, h5_stat_size_t, haddr_t); + file_end_addr = (haddr_t)file_size; + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "DXPL creation succeeded"); + + /* Set independent I/O on DXPL */ + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_INDEPENDENT) >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Set EOA for following read call */ + VRFY((H5FDset_eoa(file_ptr, H5FD_MEM_DEFAULT, file_end_addr + (count * sizeof(int))) >= 0), + "H5FDset_eoa succeeded"); + + read_buf = malloc(count * sizeof(*read_buf)); + types = malloc(count * sizeof(*types)); + addrs = malloc(count * sizeof(*addrs)); + sizes = malloc(2 * sizeof(size_t)); + bufs = malloc(count * sizeof(*bufs)); + + sizes[0] = sizeof(SUBF_C_TYPE); + sizes[1] = 0; + + for (size_t i = 0; i < count; i++) { + types[i] = H5FD_MEM_DRAW; + addrs[i] = file_end_addr + (i * sizeof(SUBF_C_TYPE)); + bufs[i] = (void *)&(read_buf[i]); + } + + read_status = H5FDread_vector(file_ptr, dxpl_id, count, types, addrs, sizes, bufs); + VRFY((read_status >= 0), "H5FDread_vector succeeded"); + + VRFY((H5FDclose(file_ptr) >= 0), "H5FDclose succeeded"); + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded"); + + free(bufs); + free(sizes); + free(addrs); + free(types); + free(read_buf); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME +#undef SUBF_C_TYPE + /* * Test that the subfiling file can be read with the * sec2 driver after being fused back together with diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c index dc86e526294..6f37c1bc2be 100644 --- a/tools/src/h5dump/h5dump.c +++ b/tools/src/h5dump/h5dump.c @@ -333,10 +333,9 @@ usage(const char *prog) PRINTVALSTREAM( rawoutstream, " D - is the file driver to use in opening the file. Acceptable values are available from\n"); - PRINTVALSTREAM( - rawoutstream, - " " - "https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html.\n"); + PRINTVALSTREAM(rawoutstream, " " + "https://support.hdfgroup.org/releases/hdf5/documentation/" + "registered_virtual_file_drivers_vfds.md.\n"); PRINTVALSTREAM(rawoutstream, " Without the file driver flag, the file will be opened with each driver in\n"); PRINTVALSTREAM(rawoutstream, " turn and in the order specified above until one driver succeeds\n"); diff --git a/tools/src/h5dump/h5dump.h b/tools/src/h5dump/h5dump.h index 879206dfa62..f50950b5548 100644 --- a/tools/src/h5dump/h5dump.h +++ b/tools/src/h5dump/h5dump.h @@ -135,9 +135,9 @@ * * \subsubsection subsubsec_cltools_h5dump_options_args Option Argument Conventions * \li D - is the file driver to use in opening the file. Acceptable values are available - * from https://support.hdfgroup.org/documentation/HDF5/registered_virtual_file_drivers_vfds.html. Without - * the file driver flag, the file will be opened with each driver in turn and in the order specified above - * until one driver succeeds in opening the file. See examples below for family, split, and multi driver + * from https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. + * Without the file driver flag, the file will be opened with each driver in turn and in the order specified + * above until one driver succeeds in opening the file. See examples below for family, split, and multi driver * special file name usage. * * \li F - is a filename. diff --git a/tools/src/h5perf/pio_engine.c b/tools/src/h5perf/pio_engine.c index 8b11f51fc68..7930d9645a0 100644 --- a/tools/src/h5perf/pio_engine.c +++ b/tools/src/h5perf/pio_engine.c @@ -72,12 +72,6 @@ } while (0) /* POSIX I/O macros */ -#ifdef H5_HAVE_WIN32_API -/* Can't link against the library, so this test will use the older, non-Unicode - * _open() call on Windows. - */ -#define HDopen(S, F, ...) _open(S, F | _O_BINARY, __VA_ARGS__) -#endif /* H5_HAVE_WIN32_API */ #define POSIXCREATE(fn) HDopen(fn, O_CREAT | O_TRUNC | O_RDWR, 0600) #define POSIXOPEN(fn, F) HDopen(fn, F, 0600) #define POSIXCLOSE(F) HDclose(F) diff --git a/tools/src/h5perf/sio_engine.c b/tools/src/h5perf/sio_engine.c index 12305f54a76..0d2240fa69f 100644 --- a/tools/src/h5perf/sio_engine.c +++ b/tools/src/h5perf/sio_engine.c @@ -53,12 +53,6 @@ } while (0) /* POSIX I/O macros */ -#ifdef H5_HAVE_WIN32_API -/* Can't link against the library, so this test will use the older, non-Unicode - * _open() call on Windows. - */ -#define HDopen(S, F, ...) _open(S, F | _O_BINARY, __VA_ARGS__) -#endif /* H5_HAVE_WIN32_API */ #define POSIXCREATE(fn) HDopen(fn, O_CREAT | O_TRUNC | O_RDWR, 0600) #define POSIXOPEN(fn, F) HDopen(fn, F, 0600) #define POSIXCLOSE(F) HDclose(F) diff --git a/tools/test/h5dump/expected/h5dump-help.txt b/tools/test/h5dump/expected/h5dump-help.txt index a78d8d820ec..2f17b51da95 100644 --- a/tools/test/h5dump/expected/h5dump-help.txt +++ b/tools/test/h5dump/expected/h5dump-help.txt @@ -105,7 +105,7 @@ usage: h5dump [OPTIONS] files --------------- Option Argument Conventions --------------- D - is the file driver to use in opening the file. Acceptable values are available from - https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html. + https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. Without the file driver flag, the file will be opened with each driver in turn and in the order specified above until one driver succeeds in opening the file. diff --git a/tools/test/h5dump/expected/pbits/tnofilename-with-packed-bits.ddl b/tools/test/h5dump/expected/pbits/tnofilename-with-packed-bits.ddl index a78d8d820ec..2f17b51da95 100644 --- a/tools/test/h5dump/expected/pbits/tnofilename-with-packed-bits.ddl +++ b/tools/test/h5dump/expected/pbits/tnofilename-with-packed-bits.ddl @@ -105,7 +105,7 @@ usage: h5dump [OPTIONS] files --------------- Option Argument Conventions --------------- D - is the file driver to use in opening the file. Acceptable values are available from - https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html. + https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. Without the file driver flag, the file will be opened with each driver in turn and in the order specified above until one driver succeeds in opening the file. diff --git a/tools/test/h5dump/expected/pbits/tpbitsIncomplete.ddl b/tools/test/h5dump/expected/pbits/tpbitsIncomplete.ddl index a78d8d820ec..2f17b51da95 100644 --- a/tools/test/h5dump/expected/pbits/tpbitsIncomplete.ddl +++ b/tools/test/h5dump/expected/pbits/tpbitsIncomplete.ddl @@ -105,7 +105,7 @@ usage: h5dump [OPTIONS] files --------------- Option Argument Conventions --------------- D - is the file driver to use in opening the file. Acceptable values are available from - https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html. + https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. Without the file driver flag, the file will be opened with each driver in turn and in the order specified above until one driver succeeds in opening the file. diff --git a/tools/test/h5dump/expected/pbits/tpbitsLengthExceeded.ddl b/tools/test/h5dump/expected/pbits/tpbitsLengthExceeded.ddl index a78d8d820ec..2f17b51da95 100644 --- a/tools/test/h5dump/expected/pbits/tpbitsLengthExceeded.ddl +++ b/tools/test/h5dump/expected/pbits/tpbitsLengthExceeded.ddl @@ -105,7 +105,7 @@ usage: h5dump [OPTIONS] files --------------- Option Argument Conventions --------------- D - is the file driver to use in opening the file. Acceptable values are available from - https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html. + https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. Without the file driver flag, the file will be opened with each driver in turn and in the order specified above until one driver succeeds in opening the file. diff --git a/tools/test/h5dump/expected/pbits/tpbitsLengthPositive.ddl b/tools/test/h5dump/expected/pbits/tpbitsLengthPositive.ddl index a78d8d820ec..2f17b51da95 100644 --- a/tools/test/h5dump/expected/pbits/tpbitsLengthPositive.ddl +++ b/tools/test/h5dump/expected/pbits/tpbitsLengthPositive.ddl @@ -105,7 +105,7 @@ usage: h5dump [OPTIONS] files --------------- Option Argument Conventions --------------- D - is the file driver to use in opening the file. Acceptable values are available from - https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html. + https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. Without the file driver flag, the file will be opened with each driver in turn and in the order specified above until one driver succeeds in opening the file. diff --git a/tools/test/h5dump/expected/pbits/tpbitsMaxExceeded.ddl b/tools/test/h5dump/expected/pbits/tpbitsMaxExceeded.ddl index a78d8d820ec..2f17b51da95 100644 --- a/tools/test/h5dump/expected/pbits/tpbitsMaxExceeded.ddl +++ b/tools/test/h5dump/expected/pbits/tpbitsMaxExceeded.ddl @@ -105,7 +105,7 @@ usage: h5dump [OPTIONS] files --------------- Option Argument Conventions --------------- D - is the file driver to use in opening the file. Acceptable values are available from - https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html. + https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. Without the file driver flag, the file will be opened with each driver in turn and in the order specified above until one driver succeeds in opening the file. diff --git a/tools/test/h5dump/expected/pbits/tpbitsOffsetExceeded.ddl b/tools/test/h5dump/expected/pbits/tpbitsOffsetExceeded.ddl index a78d8d820ec..2f17b51da95 100644 --- a/tools/test/h5dump/expected/pbits/tpbitsOffsetExceeded.ddl +++ b/tools/test/h5dump/expected/pbits/tpbitsOffsetExceeded.ddl @@ -105,7 +105,7 @@ usage: h5dump [OPTIONS] files --------------- Option Argument Conventions --------------- D - is the file driver to use in opening the file. Acceptable values are available from - https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html. + https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. Without the file driver flag, the file will be opened with each driver in turn and in the order specified above until one driver succeeds in opening the file. diff --git a/tools/test/h5dump/expected/pbits/tpbitsOffsetNegative.ddl b/tools/test/h5dump/expected/pbits/tpbitsOffsetNegative.ddl index a78d8d820ec..2f17b51da95 100644 --- a/tools/test/h5dump/expected/pbits/tpbitsOffsetNegative.ddl +++ b/tools/test/h5dump/expected/pbits/tpbitsOffsetNegative.ddl @@ -105,7 +105,7 @@ usage: h5dump [OPTIONS] files --------------- Option Argument Conventions --------------- D - is the file driver to use in opening the file. Acceptable values are available from - https://portal.hdfgroup.org/documentation/hdf5-docs/registered_virtual_file_drivers_vfds.html. + https://support.hdfgroup.org/releases/hdf5/documentation/registered_virtual_file_drivers_vfds.md. Without the file driver flag, the file will be opened with each driver in turn and in the order specified above until one driver succeeds in opening the file.