diff --git a/.azuredevops/components/MIOpen.yml b/.azuredevops/components/MIOpen.yml index 5e39ebf103..16d3cbaaa2 100644 --- a/.azuredevops/components/MIOpen.yml +++ b/.azuredevops/components/MIOpen.yml @@ -197,3 +197,4 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml parameters: componentName: MIOpen + testParameters: '-VV --output-on-failure --force-new-ctest-process --output-junit test_output.xml --exclude-regex test_rnn_seq_api' diff --git a/.azuredevops/components/llvm-project.yml b/.azuredevops/components/llvm-project.yml index a19ddd6bb0..9331bc3c21 100644 --- a/.azuredevops/components/llvm-project.yml +++ b/.azuredevops/components/llvm-project.yml @@ -126,6 +126,7 @@ jobs: componentName: comgr extraBuildFlags: >- -DCMAKE_PREFIX_PATH="$(Build.SourcesDirectory)/llvm/build;$(Build.SourcesDirectory)/amd/device-libs/build" + -DCOMGR_DISABLE_SPIRV=1 -DCMAKE_BUILD_TYPE=Release cmakeBuildDir: 'amd/comgr/build' - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml diff --git a/.azuredevops/components/omniperf.yml b/.azuredevops/components/omniperf.yml new file mode 100644 index 0000000000..818d7ca0de --- /dev/null +++ b/.azuredevops/components/omniperf.yml @@ -0,0 +1,166 @@ +parameters: +- name: checkoutRepo + type: string + default: 'self' +- name: checkoutRef + type: string + default: '' +- name: aptPackages + type: object + default: + - cmake + - python3-pip +- name: pipModules + type: object + default: + - astunparse==1.6.2 + - colorlover + - dash>=1.12.0 + - matplotlib + - numpy>=1.17.5 + - pandas>=1.4.3 + - pymongo + - pyyaml + - tabulate + - tqdm + - dash-svg + - dash-bootstrap-components + - kaleido + - setuptools + - plotille + - mock + - pytest + - pytest-cov + - pytest-xdist +- name: rocmDependencies + type: object + default: + - clr + - llvm-project + - rocm-cmake + - rocm-core + - rocminfo + - ROCR-Runtime + - rocprofiler + - rocprofiler-register + - roctracer + +jobs: +- job: omniperf + variables: + - group: common + - template: /.azuredevops/variables-global.yml + pool: + vmImage: ${{ variables.BASE_BUILD_POOL }} + workspace: + clean: all + strategy: + matrix: + gfx942: + JOB_GPU_TARGET: gfx942 + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + parameters: + ${{ if eq(parameters.checkoutRef, '') }}: + dependencySource: staging + ${{ elseif ne(parameters.checkoutRef, '') }}: + dependencySource: tag-builds + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + dependencyList: ${{ parameters.rocmDependencies }} + gpuTarget: $(JOB_GPU_TARGET) + # CI case: download latest default branch build + ${{ if eq(parameters.checkoutRef, '') }}: + dependencySource: staging + # manual build case: triggered by ROCm/ROCm repo + ${{ elseif ne(parameters.checkoutRef, '') }}: + dependencySource: tag-builds + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-upload.yml + parameters: + gpuTarget: $(JOB_GPU_TARGET) + +- job: omniperf_testing + dependsOn: omniperf + condition: and(succeeded(), eq(variables.ENABLE_GFX942_TESTS, 'true'), not(containsValue(split(variables.DISABLED_GFX942_TESTS, ','), variables['Build.DefinitionName']))) + variables: + - group: common + - template: /.azuredevops/variables-global.yml + - name: PYTHON_VERSION + value: 3.10 + pool: $(JOB_TEST_POOL) + workspace: + clean: all + strategy: + matrix: + gfx942: + JOB_GPU_TARGET: gfx942 + JOB_TEST_POOL: ${{ variables.GFX942_TEST_POOL }} + steps: + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-other.yml + parameters: + aptPackages: ${{ parameters.aptPackages }} + pipModules: ${{ parameters.pipModules }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/preamble.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml + parameters: + checkoutRepo: ${{ parameters.checkoutRepo }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/local-artifact-download.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + parameters: + ${{ if eq(parameters.checkoutRef, '') }}: + dependencySource: staging + ${{ elseif ne(parameters.checkoutRef, '') }}: + dependencySource: tag-builds + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml + parameters: + dependencyList: ${{ parameters.rocmDependencies }} + gpuTarget: $(JOB_GPU_TARGET) + ${{ if eq(parameters.checkoutRef, '') }}: + dependencySource: staging + ${{ elseif ne(parameters.checkoutRef, '') }}: + dependencySource: tag-builds + - task: Bash@3 + displayName: Add ROCm binaries to PATH + inputs: + targetType: inline + script: echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/bin" + - task: Bash@3 + displayName: Add ROCm compilers to PATH + inputs: + targetType: inline + script: echo "##vso[task.prependpath]$(Agent.BuildDirectory)/rocm/llvm/bin" + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/build-cmake.yml + parameters: + extraBuildFlags: >- + -DCMAKE_HIP_ARCHITECTURES=$(JOB_GPU_TARGET) + -DCMAKE_C_COMPILER=$(Agent.BuildDirectory)/rocm/llvm/bin/amdclang + -DCMAKE_MODULE_PATH=$(Agent.BuildDirectory)/rocm/lib/cmake/hip + -DCMAKE_PREFIX_PATH=$(Agent.BuildDirectory)/rocm + -DCMAKE_BUILD_TYPE=Release + -DENABLE_TESTS=ON + -DINSTALL_TESTS=ON + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/gpu-diagnostics.yml + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/test.yml + parameters: + componentName: omniperf + testDir: $(Build.BinariesDirectory)/libexec/omniperf + testExecutable: export OMNIPERF_ARCH_OVERRIDE="MI300X"; ctest + - task: Bash@3 + displayName: Remove ROCm binaries from PATH + inputs: + targetType: inline + script: echo "##vso[task.setvariable variable=PATH]$(echo $PATH | sed -e 's;:$(Agent.BuildDirectory)/rocm/bin;;' -e 's;^/;;' -e 's;/$;;')" + - task: Bash@3 + displayName: Remove ROCm compilers from PATH + inputs: + targetType: inline + script: echo "##vso[task.setvariable variable=PATH]$(echo $PATH | sed -e 's;:$(Agent.BuildDirectory)/rocm/llvm/bin;;' -e 's;^/;;' -e 's;/$;;')" diff --git a/.azuredevops/components/rocPyDecode.yml b/.azuredevops/components/rocPyDecode.yml index 1b99367882..9f466c3b18 100644 --- a/.azuredevops/components/rocPyDecode.yml +++ b/.azuredevops/components/rocPyDecode.yml @@ -181,6 +181,7 @@ jobs: parameters: dependencyList: ${{ parameters.rocmDependencies }} gpuTarget: $(JOB_GPU_TARGET) + setupHIPLibrarySymlinks: true ${{ if eq(parameters.checkoutRef, '') }}: dependencySource: staging ${{ elseif ne(parameters.checkoutRef, '') }}: diff --git a/.azuredevops/components/rocprofiler-sdk.yml b/.azuredevops/components/rocprofiler-sdk.yml index 4c13d3406c..5d386a90e7 100644 --- a/.azuredevops/components/rocprofiler-sdk.yml +++ b/.azuredevops/components/rocprofiler-sdk.yml @@ -41,6 +41,7 @@ parameters: - ROCR-Runtime - rocprofiler-register - roctracer + - aomp jobs: - job: rocprofilersdk diff --git a/.azuredevops/components/rocprofiler-systems.yml b/.azuredevops/components/rocprofiler-systems.yml index 4a1ff25f73..02a388b15e 100644 --- a/.azuredevops/components/rocprofiler-systems.yml +++ b/.azuredevops/components/rocprofiler-systems.yml @@ -51,6 +51,7 @@ parameters: - rocprofiler - rocprofiler-register - roctracer + - rocprofiler-sdk jobs: - job: rocprofiler_systems @@ -73,6 +74,12 @@ jobs: - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/checkout.yml parameters: checkoutRepo: ${{ parameters.checkoutRepo }} + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-aqlprofile.yml + parameters: + ${{ if eq(parameters.checkoutRef, '') }}: + dependencySource: staging + ${{ elseif ne(parameters.checkoutRef, '') }}: + dependencySource: tag-builds - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/dependencies-rocm.yml parameters: dependencyList: ${{ parameters.rocmDependencies }} @@ -109,6 +116,7 @@ jobs: -DROCPROFSYS_BUILD_TESTING=ON -DROCPROFSYS_BUILD_DYNINST=ON -DROCPROFSYS_BUILD_LIBUNWIND=ON + -DROCPROFSYS_DISABLE_EXAMPLES="openmp-target" -DDYNINST_BUILD_TBB=ON -DDYNINST_BUILD_ELFUTILS=ON -DDYNINST_BUILD_LIBIBERTY=ON diff --git a/.azuredevops/nightly/pytorch.yml b/.azuredevops/nightly/pytorch.yml index 4338630adc..0282ae2192 100644 --- a/.azuredevops/nightly/pytorch.yml +++ b/.azuredevops/nightly/pytorch.yml @@ -142,6 +142,10 @@ parameters: - binary_ufuncs - autograd # - inductor/torchinductor takes too long +# set to false to disable torchvision build and test +- name: includeVision + type: boolean + default: false trigger: none pr: none @@ -237,6 +241,12 @@ jobs: git clone https://github.com/pytorch/builder.git --depth=1 --recurse-submodules sudo ln -s $(Build.SourcesDirectory)/builder /builder workingDirectory: $(Build.SourcesDirectory) + - task: Bash@3 + displayName: Temporarily Patch CK Submodule + inputs: + targetType: inline + script: git pull origin develop + workingDirectory: $(Build.SourcesDirectory)/pytorch/third_party/composable_kernel - task: Bash@3 displayName: Install patchelf inputs: @@ -296,59 +306,60 @@ jobs: sourceDir: /remote/wheelhouserocm$(ROCM_VERSION) contentsString: '*.whl' # common helper source for pytorch vision and audio - - task: Bash@3 - displayName: git clone pytorch test-infra - inputs: - targetType: inline - script: git clone https://github.com/pytorch/test-infra.git --depth=1 --recurse-submodules - workingDirectory: $(Build.SourcesDirectory) - - task: Bash@3 - displayName: install package helper - inputs: - targetType: inline - script: python3 -m pip install test-infra/tools/pkg-helpers - workingDirectory: $(Build.SourcesDirectory) - - task: Bash@3 - displayName: pytorch pkg helpers - inputs: - targetType: inline - script: CU_VERSION=${CU_VERSION} CHANNEL=${CHANNEL} python -m pytorch_pkg_helpers -# get torch vision source and build - - task: Bash@3 - displayName: git clone pytorch vision - inputs: - targetType: inline - script: git clone https://github.com/pytorch/vision.git --depth=1 --recurse-submodules - workingDirectory: $(Build.SourcesDirectory) - - task: Bash@3 - displayName: Build vision - inputs: - targetType: inline - script: >- - TORCH_PACKAGE_NAME=torch.$(ROCM_BRANCH).$(JOB_GPU_TARGET) - TORCHVISION_PACKAGE_NAME=torchvision.$(ROCM_BRANCH).$(JOB_GPU_TARGET) - PYTORCH_VERSION=$(cat $(Build.SourcesDirectory)/pytorch/version.txt | cut -da -f1)post$(date -u +%Y%m%d) - BUILD_VERSION=$(cat $(Build.SourcesDirectory)/vision/version.txt | cut -da -f1)post$(date -u +%Y%m%d) - python3 setup.py bdist_wheel - workingDirectory: $(Build.SourcesDirectory)/vision - - task: Bash@3 - displayName: Relocate vision - inputs: - targetType: inline - script: python3 packaging/wheel/relocate.py - workingDirectory: $(Build.SourcesDirectory)/vision - - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-prepare-package.yml - parameters: - sourceDir: $(Build.SourcesDirectory)/vision/dist - contentsString: '*.whl' - clean: false + - ${{ if eq(parameters.includeVision, true) }}: + - task: Bash@3 + displayName: git clone pytorch test-infra + inputs: + targetType: inline + script: git clone https://github.com/pytorch/test-infra.git --depth=1 --recurse-submodules + workingDirectory: $(Build.SourcesDirectory) + - task: Bash@3 + displayName: install package helper + inputs: + targetType: inline + script: python3 -m pip install test-infra/tools/pkg-helpers + workingDirectory: $(Build.SourcesDirectory) + - task: Bash@3 + displayName: pytorch pkg helpers + inputs: + targetType: inline + script: CU_VERSION=${CU_VERSION} CHANNEL=${CHANNEL} python -m pytorch_pkg_helpers + # get torch vision source and build + - task: Bash@3 + displayName: git clone pytorch vision + inputs: + targetType: inline + script: git clone https://github.com/pytorch/vision.git --depth=1 --recurse-submodules + workingDirectory: $(Build.SourcesDirectory) + - task: Bash@3 + displayName: Build vision + inputs: + targetType: inline + script: >- + TORCH_PACKAGE_NAME=torch.$(ROCM_BRANCH).$(JOB_GPU_TARGET) + TORCHVISION_PACKAGE_NAME=torchvision.$(ROCM_BRANCH).$(JOB_GPU_TARGET) + PYTORCH_VERSION=$(cat $(Build.SourcesDirectory)/pytorch/version.txt | cut -da -f1)post$(date -u +%Y%m%d) + BUILD_VERSION=$(cat $(Build.SourcesDirectory)/vision/version.txt | cut -da -f1)post$(date -u +%Y%m%d) + python3 setup.py bdist_wheel + workingDirectory: $(Build.SourcesDirectory)/vision + - task: Bash@3 + displayName: Relocate vision + inputs: + targetType: inline + script: python3 packaging/wheel/relocate.py + workingDirectory: $(Build.SourcesDirectory)/vision + - template: ${{ variables.CI_TEMPLATE_PATH }}/steps/artifact-prepare-package.yml + parameters: + sourceDir: $(Build.SourcesDirectory)/vision/dist + contentsString: '*.whl' + clean: false - task: PublishPipelineArtifact@1 displayName: 'wheel file Publish' retryCountOnTaskFailure: 3 inputs: targetPath: $(Build.BinariesDirectory) -- job: torchvision_testing +- job: pytorch_testing dependsOn: pytorch condition: and(succeeded(), eq(variables.ENABLE_GFX942_TESTS, 'true'), not(containsValue(split(variables.DISABLED_GFX942_TESTS, ','), variables['Build.DefinitionName']))) variables: @@ -401,12 +412,13 @@ jobs: targetType: inline script: git clone https://github.com/pytorch/pytorch.git --depth=1 --recurse-submodules workingDirectory: $(Build.SourcesDirectory) - - task: Bash@3 - displayName: git clone pytorch vision - inputs: - targetType: inline - script: git clone https://github.com/pytorch/vision.git --depth=1 --recurse-submodules - workingDirectory: $(Build.SourcesDirectory) + - ${{ if eq(parameters.includeVision, true) }}: + - task: Bash@3 + displayName: git clone pytorch vision + inputs: + targetType: inline + script: git clone https://github.com/pytorch/vision.git --depth=1 --recurse-submodules + workingDirectory: $(Build.SourcesDirectory) - task: Bash@3 displayName: Install Wheel Files inputs: @@ -510,13 +522,14 @@ jobs: script: pytest test/test_${{ torchTest }}.py # Reference on what tests to run for torchvision found in private repo: # https://github.com/ROCm/rocAutomation/blob/jenkins-pipelines/pytorch/pytorch_ci/test_torchvision.sh#L51 - - task: Bash@3 - displayName: Test vision/transforms - continueOnError: true - inputs: - targetType: inline - script: pytest test/test_transforms.py - workingDirectory: $(Build.SourcesDirectory)/vision + - ${{ if eq(parameters.includeVision, true) }}: + - task: Bash@3 + displayName: Test vision/transforms + continueOnError: true + inputs: + targetType: inline + script: pytest test/test_transforms.py + workingDirectory: $(Build.SourcesDirectory)/vision - task: Bash@3 displayName: Uninstall Wheel Files inputs: diff --git a/.azuredevops/nightly/rocm-nightly.yml b/.azuredevops/nightly/rocm-nightly.yml index 0c46ee7d77..192102da95 100644 --- a/.azuredevops/nightly/rocm-nightly.yml +++ b/.azuredevops/nightly/rocm-nightly.yml @@ -26,6 +26,7 @@ parameters: - llvm-project - MIOpen - MIVisionX + - omniperf - rccl - rdc - rocAL diff --git a/.azuredevops/tag-builds/omniperf.yml b/.azuredevops/tag-builds/omniperf.yml new file mode 100644 index 0000000000..a1a3961030 --- /dev/null +++ b/.azuredevops/tag-builds/omniperf.yml @@ -0,0 +1,29 @@ +variables: +- group: common +- template: /.azuredevops/variables-global.yml + +parameters: +- name: checkoutRef + type: string + default: refs/tags/$(LATEST_RELEASE_TAG) + +resources: + repositories: + - repository: pipelines_repo + type: github + endpoint: ROCm + name: ROCm/ROCm + - repository: release_repo + type: github + endpoint: ROCm + name: ROCm/omniperf + ref: ${{ parameters.checkoutRef }} + +trigger: none +pr: none + +jobs: + - template: ${{ variables.CI_COMPONENT_PATH }}/omniperf.yml + parameters: + checkoutRepo: release_repo + checkoutRef: ${{ parameters.checkoutRef }} diff --git a/.azuredevops/templates/steps/artifact-download.yml b/.azuredevops/templates/steps/artifact-download.yml index bc21b21e8d..cf602bbf06 100644 --- a/.azuredevops/templates/steps/artifact-download.yml +++ b/.azuredevops/templates/steps/artifact-download.yml @@ -62,7 +62,7 @@ parameters: ROCgdb: amd-staging rocJPEG: develop rocm-cmake: develop - rocm-core: master + rocm-core: amd-staging rocm-examples: develop rocminfo: amd-staging rocMLIR: develop diff --git a/.azuredevops/templates/steps/dependencies-rocm.yml b/.azuredevops/templates/steps/dependencies-rocm.yml index ba078264b3..87f77658d2 100644 --- a/.azuredevops/templates/steps/dependencies-rocm.yml +++ b/.azuredevops/templates/steps/dependencies-rocm.yml @@ -165,6 +165,11 @@ parameters: - name: skipLlvmSymlink type: boolean default: false +# set to true if dlopen calls for HIP libraries are causing failures +# because they do not follow shared library symlink convention +- name: setupHIPLibrarySymlinks + type: boolean + default: false # some ROCm components can specify GPU target and this will affect downloads - name: gpuTarget type: string @@ -280,6 +285,37 @@ steps: for file in amdclang amdclang++ amdclang-cl amdclang-cpp amdflang amdlld aompcc mygpu mycpu offload-arch; do sudo ln -s $(Agent.BuildDirectory)/rocm/llvm/bin/$file $(Agent.BuildDirectory)/rocm/bin/$file done +# dlopen calls within a ctest or pytest sequence runs into issues when shared library symlink convention is not followed +# the convention is as follows: +# unversioned .so is a symlink to major version .so +# major version .so is a symlink to detailed version .so +# HIP libraries do not follow this convention, and each .so is a copy of each other +# changing the library structure to follow the symlink convention resolves some test failures +- ${{ if eq(parameters.setupHIPLibrarySymlinks, true) }}: + - task: Bash@3 + displayName: Setup symlinks for hip libraries + inputs: + targetType: inline + workingDirectory: $(Agent.BuildDirectory)/rocm/lib + script: | + LIBRARIES=("libamdhip64" "libhiprtc-builtins" "libhiprtc") + for LIB_NAME in "${LIBRARIES[@]}"; do + VERSIONED_SO=$(ls ${LIB_NAME}.so.* 2>/dev/null | grep -E "${LIB_NAME}\.so\.[0-9]+\.[0-9]+\.[0-9]+(-.*)?" | sort -V | tail -n 1) + if [[ -z "$VERSIONED_SO" ]]; then + continue + fi + MAJOR_VERSION=$(echo "$VERSIONED_SO" | grep -oP "${LIB_NAME}\.so\.\K[0-9]+") + if [[ -e "${LIB_NAME}.so.${MAJOR_VERSION}" && ! -L "${LIB_NAME}.so.${MAJOR_VERSION}" ]]; then + rm -f "${LIB_NAME}.so.${MAJOR_VERSION}" + fi + if [[ -e "${LIB_NAME}.so" && ! -L "${LIB_NAME}.so" ]]; then + rm -f "${LIB_NAME}.so" + fi + ln -sf "$VERSIONED_SO" "${LIB_NAME}.so.${MAJOR_VERSION}" + ln -sf "${LIB_NAME}.so.${MAJOR_VERSION}" "${LIB_NAME}.so" + echo "Symlinks created for $LIB_NAME:" + ls -l ${LIB_NAME}.so* + done - task: Bash@3 displayName: 'List downloaded ROCm files' inputs: diff --git a/.wordlist.txt b/.wordlist.txt index 1f96aa75c2..f37b79f188 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -90,6 +90,7 @@ Dask DataFrame DataLoader DataParallel +Debian DeepSpeed Dependabot Deprecations @@ -212,6 +213,7 @@ MiB MIGraphX MIOpen MIOpenGEMM +MIOpen's MIVisionX MLM MMA @@ -286,6 +288,7 @@ PCC PCI PCIe PEFT +PEQT PIL PILImage POR @@ -315,6 +318,7 @@ RDMA RDNA README RHEL +RMW RNN RNNs ROC @@ -331,6 +335,7 @@ ROCmSoftwarePlatform ROCmValidationSuite ROCprofiler ROCr +RPP RST RW Radeon @@ -338,6 +343,7 @@ RelWithDebInfo Req Rickle RoCE +Runfile Ryzen SALU SBIOS @@ -350,6 +356,7 @@ SENDMSG SGPR SGPRs SHA +SHARK's SIGQUIT SIMD SIMDs @@ -519,6 +526,7 @@ dbgapi de deallocation debuggability +debian denoise denoised denoises @@ -571,6 +579,7 @@ hipBLASLt's hipblaslt hipCUB hipFFT +hipFORT hipLIB hipRAND hipSOLVER @@ -592,6 +601,7 @@ hpp hsa hsakmt hyperparameter +hyperparameters iDRAC ib_core inband @@ -705,6 +715,7 @@ rocALUTION rocBLAS rocDecode rocFFT +rocHPCG rocJPEG rocLIB rocMLIR diff --git a/README.md b/README.md index 1dea391bbd..7839504ccf 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ The Build time will reduce significantly if we limit the GPU Architecture/s agai mkdir -p ~/WORKSPACE/ # Or any folder name other than WORKSPACE cd ~/WORKSPACE/ -export ROCM_VERSION=6.3.0 +export ROCM_VERSION=6.3.1 ~/bin/repo init -u http://github.com/ROCm/ROCm.git -b roc-6.3.x -m tools/rocm-build/rocm-${ROCM_VERSION}.xml ~/bin/repo sync diff --git a/RELEASE.md b/RELEASE.md index 22e9785152..ade28cb266 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -10,7 +10,7 @@ -# ROCm 6.3.0 release notes +# ROCm 6.3.1 release notes The release notes provide a summary of notable changes since the previous ROCm release. @@ -30,181 +30,62 @@ The release notes provide a summary of notable changes since the previous ROCm r ```{note} If you’re using Radeon™ PRO or Radeon GPUs in a workstation setting with a -display connected, continue to use ROCm 6.2.3. See the [Use ROCm on Radeon -GPUs](https://rocm.docs.amd.com/projects/radeon/en/latest/index.html) +display connected, continue to use ROCm 6.2.3. See the [Use ROCm on Radeon GPUs](https://rocm.docs.amd.com/projects/radeon/en/latest/index.html) documentation to verify compatibility and system requirements. ``` - ## Release highlights -The following are notable new features and improvements in ROCm 6.3.0. For changes to individual components, see +The following are notable new features and improvements in ROCm 6.3.1. For changes to individual components, see [Detailed component changes](#detailed-component-changes). -### rocJPEG added - -ROCm 6.3.0 introduces the rocJPEG library to the ROCm software stack. rocJPEG is a high performance -JPEG decode SDK for AMD GPUs. For more information, see the [rocJPEG -documentation](https://rocm.docs.amd.com/projects/rocJPEG/en/docs-6.3.0/index.html). - -### ROCm Compute Profiler and ROCm Systems Profiler - -These ROCm components have been renamed to reflect their new direction as part of the ROCm software -stack. - -- **ROCm Compute Profiler**, formerly Omniperf. For more information, see the [ROCm Compute Profiler - documentation](https://rocm.docs.amd.com/projects/rocprofiler-compute/en/docs-6.3.0/index.html) and - [https://github.com/ROCm/rocprofiler-compute](https://github.com/ROCm/rocprofiler-compute) on GitHub. - -- **ROCm Systems Profiler**, formerly Omnitrace. For more information, see the [ROCm Systems Profiler - documentation](https://rocm.docs.amd.com/projects/rocprofiler-systems/en/docs-6.3.0/index.html) and - [https://github.com/ROCm/rocprofiler-systems](https://github.com/ROCm/rocprofiler-systems) on GitHub. - For future compatibility, the Omnitrace project is available at [https://github.com/ROCm/omnitrace](https://github.com/ROCm/omnitrace). - See the [Omnitrace documentation](https://rocm.docs.amd.com/projects/omnitrace/en/latest/index.html). - - ```{note} - Update any references to the old binary names `omniperf` and `omnitrace` to - ensure compatibility with the new `rocprof-compute` and `rocprof-sys-*` binaries. - This might include updating environment variables, commands, and paths as - needed to avoid disruptions to your profiling or tracing workflows. - - See [ROCm Compute Profiler 3.0.0](#rocm-compute-profiler-3-0-0) and [ROCm Systems - Profiler 0.1.0](#rocm-systems-profiler-0-1-0). - ``` - -### SHARK AI toolkit for high-speed inferencing and serving introduced - -SHARK is an open-source toolkit for high-performance serving of popular generative AI and large -language models. In its initial release, SHARK contains the [Shortfin high-performance serving -engine](https://github.com/nod-ai/shark-ai/tree/main/shortfin), which is the SHARK inferencing -library that includes example server applications for popular models. - -This initial release includes support for serving the Stable Diffusion XL model on AMD Instinct™ -MI300 devices using ROCm. See the SHARK [release -page](https://github.com/nod-ai/shark-ai/releases/tag/v3.0.0) on GitHub to get started. - -### PyTorch 2.4 support added - -ROCm 6.3.0 adds support for PyTorch 2.4. See the [Compatibility -matrix](https://rocm.docs.amd.com/en/docs-6.3.0/compatibility/compatibility-matrix.html#framework-support-compatibility-matrix) -for the complete list of PyTorch versions tested for compatibility with ROCm. - -### Flash Attention kernels in Triton and Composable Kernel (CK) added to Transformer Engine - -Composable Kernel-based and Triton-based Flash Attention kernels have been integrated into -Transformer Engine via the ROCm Composable Kernel and AOTriton libraries. The -Transformer Engine can now optionally select a flexible and optimized Attention -solution for AMD GPUs. For more information, see [Fused Attention Backends on -ROCm](https://github.com/ROCm/TransformerEngine/tree/dev?tab=readme-ov-file#fused-attention-backends-on-rocm) -on GitHub. +### Per queue resiliency for Instinct MI300 accelerators -### HIP compatibility +The AMDGPU driver now includes enhanced resiliency for misbehaving applications on AMD Instinct MI300 accelerators. This helps isolate the impact of misbehaving applications, ensuring other workloads running on the same accelerator are unaffected. -HIP now includes the `hipStreamLegacy` API. It's equivalent to NVIDIA `cudaStreamLegacy`. For more -information, see [Global enum and -defines](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/reference/hip_runtime_api/global_defines_enums_structs_files/global_enum_and_defines.html#c.hipStreamLegacy) -in the HIP runtime API documentation. +### ROCm Runfile Installer -### Unload active amdgpu-dkms module without a system reboot - -On Instinct MI200 and MI300 systems, you can now unload the active `amdgpu-dkms` modules, and reinstall -and reload newer modules without a system reboot. If the new `dkms` package includes newer firmware -components, the driver will first reset the device and then load newer firmware components. - -### ROCm Offline Installer Creator updates - -The ROCm Offline Installer Creator 6.3 introduces a new feature to uninstall the previous version of -ROCm on the non-connected target system before installing a new version. This feature is only supported -on the Ubuntu distribution. See the [ROCm Offline Installer -Creator](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.0/install/rocm-offline-installer.html) -documentation for more information. - -### OpenCL ICD loader separated from ROCm - -The OpenCL ICD loader is no longer delivered as part of ROCm, and must be installed separately -as part of the [ROCm installation -process](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.0). For Ubuntu and RHEL -installations, the required package is installed as part of the setup described in -[Prerequisites](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.0/install/prerequisites.html). -In other supported Linux distributions like SUSE, the required package must be installed in separate steps, which are included in the installation instructions. - -Because the OpenCL path is now separate from the ROCm installation for versioned and multi-version -installations, you must manually define the `LD_LIBRARY_PATH` to point to the ROCm -installation library as described in the [Post-installation -instructions](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.0/install/post-install.html). -If the `LD_LIBRARY_PATH` is not set as needed for versioned or multi-version installations, OpenCL -applications like `clinfo` will fail to run and return an error. - -### ROCT Thunk Interface integrated into ROCr runtime - -The ROCT Thunk Interface package is now integrated into the ROCr runtime. As a result, the ROCT package -is no longer included as a separate package in the ROCm software stack. +ROCm 6.3.1 introduces the ROCm Runfile Installer, with initial support for Ubuntu 22.04. The ROCm Runfile Installer facilitates ROCm installation without using a native Linux package management system, with or without network or internet access. For more information, see the [ROCm Runfile Installer documentation](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.1/install/rocm-runfile-installer.html). ### ROCm documentation updates -ROCm documentation continues to be updated to provide clearer and more comprehensive guidance for a -wider variety of user needs and use cases. +ROCm documentation continues to be updated to provide clearer and more comprehensive guidance for a wider variety of user needs and use cases. -- Documentation for Tensile is now available. Tensile is a library that creates - benchmark-driven backend implementations for GEMMs, serving primarily as a - backend component of rocBLAS. See the [Tensile - documentation](https://rocm.docs.amd.com/projects/Tensile/en/docs-6.3.0/src/index.html). +* Added documentation on training a model with ROCm Megatron-LM. AMD offers a Docker image for MI300X accelerators + containing essential components to get started, including ROCm libraries, PyTorch, and Megatron-LM utilities. See + [Training a model using ROCm Megatron-LM](https://rocm.docs.amd.com/en/docs-6.3.1/how-to/rocm-for-ai/train-a-model.html) + to get started. -- New documentation has been added to explain the advantages of enabling the IOMMU in passthrough - mode for Instinct accelerators and Radeon GPUs. See [Input-Output Memory Management - Unit](https://rocm.docs.amd.com/en/docs-6.3.0/conceptual/iommu.html). + The new ROCm Megatron-LM training Docker accompanies the [ROCm vLLM inference + Docker](https://rocm.docs.amd.com/en/docs-6.3.1/how-to/performance-validation/mi300x/vllm-benchmark.html) + as a set of ready-to-use containerized solutions to get started with using ROCm + for AI. -- The HIP documentation has been updated and includes the following new topics: +* Updated the [Instinct MI300X workload tuning + guide](https://rocm.docs.amd.com/en/docs-6.3.1/how-to/tuning-guides/mi300x/workload.html) with more current optimization + strategies. The updated sections include guidance on vLLM optimization, PyTorch TunableOp, and hipBLASLt tuning. - - [What is HIP?](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/what_is_hip.html) - - [HIP environment variables](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/reference/env_variables.html) - - [Initialization](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/how-to/hip_runtime_api/initialization.html) - and [error handling](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/how-to/hip_runtime_api/error_handling.html) - - [Hardware features](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/reference/hardware_features.html) - - [Call stack](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/how-to/hip_runtime_api/call_stack.html) - - [External resource interoperability](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/how-to/hip_runtime_api/external_interop.html) +* HIP graph-safe libraries operate safely in HIP execution graphs. [HIP graphs](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.1/how-to/hip_runtime_api/hipgraph.html#how-to-hip-graph) are an alternative way of executing tasks on a GPU that can provide performance benefits over launching kernels using the standard method via streams. A topic that shows whether a [ROCm library is graph-safe](https://rocm.docs.amd.com/en/docs-6.3.1/reference/graph-safe-support.html) has been added. -- The following HIP documentation topics have been updated: +* The [Device memory](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.1/how-to/hip_runtime_api/memory_management/device_memory.html) topic in the HIP memory management section has been updated. - - [HIP FAQ](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/faq.html) - - [Deprecated APIs](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/reference/deprecated_api_list.html) - - [Performance guidelines](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/how-to/performance_guidelines.html) - -- The following HIP documentation topics have been reorganized to improve usability: - - - [HIP documentation landing page](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/index.html) - - [HIP runtime API reference topics](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/reference/hip_runtime_api_reference.html) - - [Programming guide](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.0/how-to/hip_runtime_api.html) +* The HIP documentation has expanded with new resources for developers: + * [Multi device management](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.1/how-to/hip_runtime_api/multi_device.html) + * [OpenGL interoperability](https://rocm.docs.amd.com/projects/HIP/en/docs-6.3.1/how-to/hip_runtime_api/opengl_interop.html) ## Operating system and hardware support changes -ROCm 6.3.0 adds support for the following operating system and kernel versions: - -- Ubuntu 24.04.2 (kernel: 6.8 [GA], 6.11 [HWE]) -- Ubuntu 22.04.5 (kernel: 5.15 [GA], 6.8 [HWE]) -- RHEL 9.5 (kernel: 5.14.0) -- Oracle Linux 8.10 (kernel: 5.15.0) - -See installation instructions at [ROCm installation for -Linux](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.0/). +ROCm 6.3.1 adds support for Debian 12 (kernel: 6.1). Debian is supported only on AMD Instinct accelerators. See the installation instructions at [Debian native installation](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.1/install/native-install/debian.html). -ROCm 6.3.0 marks the end of support (EoS) for: - -- Ubuntu 24.04.1 -- Ubuntu 22.04.4 -- RHEL 9.3 -- RHEL 8.9 -- Oracle Linux 8.9 - -Hardware support remains unchanged in this release. +ROCm 6.3.1 enables support for AMD Instinct MI325X accelerator. For more information, see [AMD Instinct™ MI325X Accelerators](https://www.amd.com/en/products/accelerators/instinct/mi300/mi325x.html). See the [Compatibility -matrix](https://rocm.docs.amd.com/en/docs-6.3.0/compatibility/compatibility-matrix.html) +matrix](https://rocm.docs.amd.com/en/docs-6.3.1/compatibility/compatibility-matrix.html) for more information about operating system and hardware compatibility. ## ROCm components -The following table lists the versions of ROCm components for ROCm 6.3.0, including any version -changes from 6.2.4 to 6.3.0. Click the component's updated version to go to a list of its changes. +The following table lists the versions of ROCm components for ROCm 6.3.1, including any version +changes from 6.3.0 to 6.3.1. Click the component's updated version to go to a list of its changes. Click {fab}`github` to go to the component's source code on GitHub.
@@ -226,48 +107,48 @@ Click {fab}`github` to go to the component's source code on GitHub. Libraries Machine learning and computer vision - Composable Kernel + Composable Kernel 1.1.0 - MIGraphX + MIGraphX 2.11.0 - MIOpen - 3.2.0 ⇒ 3.3.0 + MIOpen + 3.3.0 - MIVisionX - 3.0.0 ⇒ 3.1.0 + MIVisionX + 3.1.0 ⇒ 3.1.0 - rocAL - 2.0.0 ⇒ 2.1.0 + rocAL + 2.1.0 - rocDecode - 0.6.0 ⇒ 0.8.0 + rocDecode + 0.8.0 - rocJPEG + rocJPEG 0.6.0 - rocPyDecode - 0.1.0 ⇒ 0.2.0 + rocPyDecode + 0.2.0 - RPP - 1.8.0 ⇒ 1.9.1 + RPP + 1.9.1 @@ -275,8 +156,8 @@ Click {fab}`github` to go to the component's source code on GitHub. Communication - RCCL - 2.20.5 ⇒ 2.21.5 + RCCL + 2.21.5 ⇒ 2.21.5 @@ -284,84 +165,83 @@ Click {fab}`github` to go to the component's source code on GitHub. Math - hipBLAS - 2.2.0 ⇒ 2.3.0 + hipBLAS + 2.3.0 - hipBLASLt - 0.8.0 ⇒ 0.10.0 + hipBLASLt + 0.10.0 - hipFFT - 1.0.16 ⇒ 1.0.17 + hipFFT + 1.0.17 - hipfort - 0.4.0 ⇒ 0.5.0 + hipfort + 0.5.0 - hipRAND - 2.11.1 ⇒ 2.11.0 * + hipRAND + 2.11.1 - hipSOLVER - 2.2.0 ⇒ 2.3.0 + hipSOLVER + 2.3.0 - hipSPARSE - 3.1.1 ⇒ 3.1.2 + hipSPARSE + 3.1.2 - hipSPARSELt - 0.2.1 ⇒ 0.2.2 + hipSPARSELt + 0.2.2 - rocALUTION - 3.2.0 ⇒ 3.2.1 + rocALUTION + 3.2.1 - rocBLAS - 4.2.4 ⇒ 4.3.0 + rocBLAS + 4.3.0 - rocFFT - 1.0.30 ⇒ 1.0.31 + rocFFT + 1.0.31 - rocRAND - 3.1.1 ⇒ 3.2.0 + rocRAND + 3.2.0 - rocSOLVER - 3.26.2 ⇒ 3.27.0 + rocSOLVER + 3.27.0 - rocSPARSE - 3.2.1 ⇒ 3.3.0 + rocSPARSE + 3.3.0 - rocWMMA - 1.5.0 ⇒ 1.6.0 + rocWMMA + 1.6.0 - Tensile - 4.41.0 ⇒ 4.42.0 + Tensile + 4.42.0 @@ -369,23 +249,23 @@ Click {fab}`github` to go to the component's source code on GitHub. Primitives - hipCUB - 3.2.1 ⇒ 3.3.0 + hipCUB + 3.3.0 - hipTensor - 1.3.0 ⇒ 1.4.0 + hipTensor + 1.4.0 - rocPRIM - 3.2.2 ⇒ 3.3.0 + rocPRIM + 3.3.0 - rocThrust - 3.1.1 ⇒ 3.3.0 + rocThrust + 3.3.0 @@ -393,28 +273,28 @@ Click {fab}`github` to go to the component's source code on GitHub. Tools System management - AMD SMI - 24.6.3 ⇒ 24.7.1 + AMD SMI + 24.7.1 ⇒ 24.7.1 - ROCm Data Center Tool - 0.3.0 ⇒ 0.3.0 + ROCm Data Center Tool + 0.3.0 - rocminfo + rocminfo 1.0.0 - ROCm SMI - 7.3.0 ⇒ 7.4.0 + ROCm SMI + 7.4.0 - ROCmValidationSuite - 1.0.0 ⇒ 1.1.0 + ROCmValidationSuite + 1.1.0 @@ -422,38 +302,38 @@ Click {fab}`github` to go to the component's source code on GitHub. Performance - ROCm Bandwidth + ROCm Bandwidth Test 1.4.0 - ROCm Compute Profiler - 2.0.1 ⇒ 3.0.0 + ROCm Compute Profiler + 3.0.0 ⇒ 3.0.0 - ROCm Systems Profiler - 1.11.2 ⇒ 0.1.0 + ROCm Systems Profiler + 0.1.0 ⇒ 0.1.0 - ROCProfiler - 2.0.0 ⇒ 2.0.0 + ROCProfiler + 2.0.0 - ROCprofiler-SDK - 0.4.0 ⇒ 0.5.0 + ROCprofiler-SDK + 0.5.0 - ROCTracer + ROCTracer 4.1.0 @@ -463,32 +343,32 @@ Click {fab}`github` to go to the component's source code on GitHub. Development - HIPIFY + HIPIFY 18.0.0 ⇒ 18.0.0 - ROCdbgapi - 0.76.0 ⇒ 0.77.0 + ROCdbgapi + 0.77.0 - ROCm CMake + ROCm CMake 0.14.0 - ROCm Debugger (ROCgdb) + ROCm Debugger (ROCgdb) - 14.2 ⇒ 15.2 + 15.2 - ROCr Debug Agent + ROCr Debug Agent 2.0.3 Compilers - HIPCC + HIPCC 1.1.1 - llvm-project - 18.0.0 ⇒ 18.0.0 + llvm-project + 18.0.0 @@ -513,12 +393,12 @@ Click {fab}`github` to go to the component's source code on GitHub. Runtimes - HIP - 6.2.4 ⇒ 6.3.0 + HIP + 6.3.0 ⇒ 6.3.1 - ROCr Runtime + ROCr Runtime 1.14.0 @@ -532,1192 +412,131 @@ The following sections describe key changes to ROCm components. ### **AMD SMI** (24.7.1) -#### Added - -- Support for `amd-smi metric --ecc` & `amd-smi metric --ecc-blocks` on Guest VMs. - -- Support for GPU metrics 1.6 to `amdsmi_get_gpu_metrics_info()` - -- New violation status outputs and APIs: `amdsmi_status_t amdsmi_get_violation_status()`, `amd-smi metric --throttle`, and `amd-smi monitor --violation`. This feature is only available on MI300+ ASICs - -- Ability to view XCP (Graphics Compute Partition) activity within `amd-smi metric --usage`. Partition-specific features are only available on MI300+ ASICs - -- Added `LC_PERF_OTHER_END_RECOVERY` CLI output to `amd-smi metric --pcie` and updated `amdsmi_get_pcie_info()` to include this value. This feature is only available on MI300+ ASICs - -- Ability to retrieve a set of GPUs that are nearest to a given device at a specific link type level - - Added `amdsmi_get_link_topology_nearest()` function to amd-smi C and Python Libraries. - -- More supported utilization count types to `amdsmi_get_utilization_count()` - -- `amd-smi set -L/--clk-limit ...` command. This is equivalent to rocm-smi's `--extremum` command which sets sclk's or mclk's soft minimum or soft maximum clock frequency. - -- Unittest functionality to test `amdsmi` API calls in Python - -- GPU memory overdrive percentage to `amd-smi metric -o` - - Added `amdsmi_get_gpu_mem_overdrive_level()` function to AMD SMI C and Python Libraries. - -- Ability to retrieve connection type and P2P capabilities between two GPUs - - Added `amdsmi_topo_get_p2p_status()` function to amd-smi C and Python Libraries. - - Added retrieving P2P link capabilities to CLI `amd-smi topology`. - -- New `amdsmi_kfd_info_t` type and added information under `amd-smi list` - -- Subsystem device ID to `amd-smi static --asic`. There are no underlying changes to `amdsmi_get_gpu_asic_info`. - -- `Target_Graphics_Version` to `amd-smi static --asic` and `amdsmi_get_gpu_asic_info()`. - #### Changed -- Updated BDF commands to use KFD SYSFS for BDF: `amdsmi_get_gpu_device_bdf()`. This change aligns BDF output with ROCm SMI. - -- Moved Python tests directory path install location. - - `/opt//share/amd_smi/pytest/..` to `/opt//share/amd_smi/tests/python_unittest/..` - - Removed PyTest dependency. Python testing now depends on the unittest framework only. - -- Changed the `power` parameter in `amdsmi_get_energy_count()` to `energy_accumulator`. - - Changes propagate forwards into the Python interface as well. Backwards compatibility is maintained. - -- Updated Partition APIs and struct information and added `partition_id` to `amd-smi static --partition`. - - As part of an overhaul to partition information, some partition information will be made available in the `amdsmi_accelerator_partition_profile_t`. - - This struct will be filled out by a new API, `amdsmi_get_gpu_accelerator_partition_profile()`. - - Future data from these APIs will eventually be added to `amd-smi partition`. - -#### Removed - -- `amd-smi reset --compute-partition` and `... --memory-partition` and associated APIs - - This change is part of the partition redesign. Reset functionality will be reintroduced in a later update. - - Associated APIs include `amdsmi_reset_gpu_compute_partition()` and `amdsmi_reset_gpu_memory_partition()` - -- Usage of `_validate_positive` is removed in parser and replaced with `_positive_int` and `_not_negative_int` as appropriate. - - This will allow `0` to be a valid input for several options in setting CPUs where appropriate (for example, as a mode or NBIOID). - -#### Optimized - -- Adjusted ordering of `gpu_metrics` calls to ensure that `pcie_bw` values remain stable in `amd-smi metric` & `amd-smi monitor`. - - With this change additional padding was added to `PCIE_BW` `amd-smi monitor --pcie` - -#### Known issues - -- See [AMD SMI manual build issue](#amd-smi-manual-build-issue). +* `amd-smi monitor` displays `VCLOCK` and `DCLOCK` instead of `ENC_CLOCK` and `DEC_CLOCK`. #### Resolved issues -- Improved Offline install process and lowered dependency for PyYAML. - -- Fixed CPX not showing total number of logical GPUs. - -- Fixed incorrect implementation of the Python API `amdsmi_get_gpu_metrics_header_info()`. - -- `amdsmitst` `TestGpuMetricsRead` now prints metric in correct units. - -#### Upcoming changes - -- Python API for `amdsmi_get_energy_count()` will deprecate the `power` field in a future ROCm release and use `energy_accumulator` field instead. - -- New memory and compute partition APIs will be added in a future ROCm release. - - These APIs will be updated to fully populate the CLI and allowing compute (accelerator) partitions to be set by profile ID. - - One API will be provided, to reset both memory and compute (accelerator). - - The following APIs will remain: - - ```C - amdsmi_status_t - amdsmi_set_gpu_compute_partition(amdsmi_processor_handle processor_handle, - amdsmi_compute_partition_type_t compute_partition); - amdsmi_status_t - amdsmi_get_gpu_compute_partition(amdsmi_processor_handle processor_handle, - char *compute_partition, uint32_t len); - amdsmi_status_t - amdsmi_get_gpu_memory_partition(amdsmi_processor_handle processor_handle, - - char *memory_partition, uint32_t len); - amdsmi_status_t - amdsmi_set_gpu_memory_partition(amdsmi_processor_handle processor_handle, - amdsmi_memory_partition_type_t memory_partition); - ``` - -- `amd-smi set --compute-partition "SPX/DPX/CPX..."` will no longer be supported in a future ROCm release. - - This is due to aligning with Host setups and providing more robust partition information through the APIs outlined above. Furthermore, new APIs which will be available on both BM/Host can set by profile ID. - -- Added a preliminary `amd-smi partition` command. - - The new partition command can display GPU information, including memory and accelerator partition information. - - The command will be at full functionality once additional partition information from `amdsmi_get_gpu_accelerator_partition_profile()` has been implemented. +* Fixed `amd-smi monitor`'s reporting of encode and decode information. `VCLOCK` and `DCLOCK` are + now associated with both `ENC_UTIL` and `DEC_UTIL`. ```{note} See the full [AMD SMI changelog](https://github.com/ROCm/amdsmi/blob/6.3.x/CHANGELOG.md) for more details and examples. ``` -### **HIP** (6.3.0) - -#### Added - -* New HIP APIs: - - `hipGraphExecGetFlags` returns the flags on executable graph. - - `hipGraphNodeSetParams` updates the parameters of a created node. - - `hipGraphExecNodeSetParams` updates the parameters of a created node on an executable graph. - - `hipDrvGraphMemcpyNodeGetParams` gets a memcpy node's parameters. - - `hipDrvGraphMemcpyNodeSetParams` sets a memcpy node's parameters. - - `hipDrvGraphAddMemFreeNode` creates a memory free node and adds it to a graph. - - `hipDrvGraphExecMemcpyNodeSetParams` sets the parameters for a memcpy node in the given graphExec. - - `hipDrvGraphExecMemsetNodeSetParams` sets the parameters for a memset node in the given graphExec. - - `hipExtHostAlloc` preserves the functionality of `hipHostMalloc`. - -#### Changed - -* Un-deprecated HIP APIs: - - `hipHostAlloc` - - `hipFreeHost` - -#### Optimized - -* Disabled CPU wait in device synchronize to avoid idle time in applications such as Hugging Face models and PyTorch. -* Optimized multi-threaded dispatches to improve performance. -* Limited the software batch size to control the number of command submissions for runtime to handle efficiently. -* Optimizes HSA callback performance when a large number of events are recorded by multiple threads and submitted to multiple GPUs. - -#### Resolved issues - -* Soft hang in runtime wait event when run TensorFlow. -* Memory leak in the API `hipGraphInstantiate` when kernel is launched using `hipExtLaunchKernelGGL` with event. -* Memory leak when the API `hipGraphAddMemAllocNode` is called. -* The `_sync()` version of crosslane builtins such as `shfl_sync()`, - `__all_sync()` and `__any_sync()`, continue to be hidden behind the - preprocessor macro `HIP_ENABLE_WARP_SYNC_BUILTINS`, and will be enabled - unconditionally in the next ROCm release. - -#### Upcoming changes - -* Deprecated HIP APIs: - - `hipHostMalloc` to be replaced by `hipExtHostAlloc`. - - `hipHostFree` to be replaced by `hipFreeHost`. - -### **hipBLAS** (2.3.0) - -#### Added - -* Level 3 functions have an additional `ILP64` API for both C and Fortran (`_64` name suffix) with `int64_t` function arguments - -#### Changed - -* `amdclang` is used as the default compiler instead of `g++`. -* Added a dependency on the `hipblas-common` package. - -### **hipBLASLt** (0.10.0) +### **HIP** (6.3.1) #### Added -* Support for the V2 CPP extension API for backward compatibility -* Support for data type `INT8` in with `INT8` out -* Support for data type `FP32`/`FP64` for gfx110x -* Extension API `hipblaslt_ext::matmulIsTuned` -* Output `atol` and `rtol` for `hipblaslt-bench` validation -* Output the bench command for the hipblaslt CPP ext API path if `HIPBLASLT_LOG_MASK=32` is set -* Support odd sizes for `FP8`/`BF8` GEMM - -#### Changed - -* Reorganized and added more sample code. -* Added a dependency with the `hipblas-common` package and removed the dependency with the `hipblas` package. - -#### Optimized - -* Support fused kernel for `HIPBLASLT_MATMUL_DESC_AMAX_D_POINTER` for the `FP8`/`BF8` data type -* Improved the library loading time. -* Improved the overall performance of the first returned solution. - -#### Upcoming changes - -* The V1 CPP extension API will be deprecated in a future release of hipBLASLt. - -### **hipCUB** (3.3.0) - -#### Added - -* Support for large indices in `hipcub::DeviceSegmentedReduce::*` has been added, with the exception - of `DeviceSegmentedReduce::Arg*`. Although rocPRIM's backend provides support for all reduce - variants, CUB does not support large indices in `DeviceSegmentedReduce::Arg*`. For this reason, - large index support is not available for `hipcub::DeviceSegmentedReduce::Arg*`. - -#### Changed - -* Changed the default value of `rmake.py -a` to `default_gpus`. This is equivalent to `gfx906:xnack-,gfx1030,gfx1100,gfx1101,gfx1102`. -* The NVIDIA backend now requires CUB, Thrust, and libcu++ 2.3.2. +* An activeQueues set that tracks only the queues that have a command submitted to them, which allows fast iteration in ``waitActiveStreams``. #### Resolved issues -* Fixed an issue in `rmake.py` where the list storing cmake options would contain individual characters instead of a full string of options. -* Fixed an issue where `config.hpp` was not included in all hipCUB headers, resulting in build errors. - -### **hipFFT** (1.0.17) - -#### Changed - -* The AMD backend is now compiled using amdclang++ instead of hipcc. The NVIDIA CUDA backend still uses hipcc-nvcc. -* CLI11 replaces Boost Program Options as the command line parser for clients. -* Building with the address sanitizer option sets xnack+ for the relevant GPU architectures. - -### **hipfort** (0.5.0) - -#### Added - -* Added ROC-TX to the hipfort interfaces. - -#### Changed - -* Updated the hipSOLVER bindings. +* A deadlock in a specific customer application by preventing hipLaunchKernel latency degradation with number of idle streams. ### **HIPIFY** (18.0.0) #### Added -* CUDA 12.6.1 support -* cuDNN 9.5.0 support -* LLVM 19.1.1 support -* rocBLAS 64-bit APIs support -* Initial support for direct hipification of cuDNN into MIOpen under the `--roc` option -* Initial support for direct hipification of cuRAND into rocRAND under the `--roc` option -* Added a filtering ability for the supplementary hipification scripts - -#### Resolved issues - -* Correct `roc` header files support - -#### Known issues - -* Support for `fp8` data types - -### **hipRAND** (2.11.0[*](#id22)) - -#### Changed - -* Updated the default value for the `-a` argument from `rmake.py` to `gfx906:xnack-,gfx1030,gfx1100,gfx1101,gfx1102`. - -#### Known issues - -* In ROCm 6.3.0, the hipRAND package version is incorrectly set to `2.11.0`. In ROCm - 6.2.4, the hipRAND package version was `2.11.1`. The hipRAND version number will be corrected in a - future ROCm release. - -#### Resolved issues - -* Fixed an issue in `rmake.py` where the list storing the CMake options would contain individual characters instead of a full string of options. - -### **hipSOLVER** (2.3.0) - -#### Added - -* Auxiliary functions: - * `hipsolverSetDeterministicMode`, `hipsolverGetDeterministicMode` -* Compatibility-only functions: - * `potrf` - * `hipsolverDnXpotrf_bufferSize` - * `hipsolverDnXpotrf` - * `potrs` - * `hipsolverDnXpotrs` - * `geqrf` - * `hipsolverDnXgeqrf_bufferSize` - * `hipsolverDnXgeqrf` - -#### Changed - -* Binaries in debug builds no longer have a `-d` suffix. -* Changed rocSPARSE and SuiteSparse to be runtime dependencies by default. The `BUILD_WITH_SPARSE` CMake option can still be used - to convert them into build-time dependencies (now off by default). -* The `--no-sparse` option for the install script now only affects the hipSOLVER clients and their dependency on hipSPARSE. Use the - `BUILD_HIPSPARSE_TESTS` CMake option to enable tests for the `hipsolverSp` API (on by default). - -#### Upcoming changes - -* The Fortran bindings provided in `hipsolver_module.f90` have been deprecated. - The Fortran bindings provided by the hipfort project are recommended instead. - -### **hipSPARSE** (3.1.2) - -#### Added - -* Added an alpha version of the `hipsparse-bench` executable to facilitate comparing NVIDIA CUDA cuSPARSE and rocSPARSE backends. - -#### Changed - -* Changed the default compiler from hipcc to amdclang in the install script and CMake files. -* Improved the user documentation. - -#### Resolved issues - -* Fixed the gfortran dependency for the Azure Linux operating system. - -#### Known issues - -* In `hipsparseSpSM_solve()`, the external buffer is passed as a parameter. This does not match the NVIDIA CUDA cuSPARSE API. This extra external buffer parameter will be removed in a future release. For now, this extra parameter can be ignored and `nullptr` passed as it is unused internally by `hipsparseSpSM_solve()`. - -### **hipSPARSELt** (0.2.2) - -#### Added - -* Support for a new data type combination: `INT8` inputs, `BF16` output, and `INT32` Matrix Core accumulation -* Support for row-major memory order (`HIPSPARSE_ORDER_ROW`) - -#### Changed - -* Changed the default compiler to amdclang++. - -#### Upcoming changes - -* `hipsparseLtDatatype_t` is deprecated and will be removed in the next major release of ROCm. `hipDataType` should be used instead. - -### **hipTensor** (1.4.0) - -#### Added - -* Added support for tensor reduction, including APIs, CPU reference, unit tests, and documentation - -#### Changed - -* ASAN builds only support xnack+ targets. -* ASAN builds use `-mcmodel=large` to accommodate library sizes greater than 2GB. -* Updated the permute backend to accommodate changes to element-wise operations. -* Updated the actor-critic implementation. -* Various documentation formatting updates. - -#### Optimized - -* Split kernel instances to improve build times. - -#### Resolved issues - -* Fixed a bug in randomized tensor input data generation. -* Fixed the default strides calculation to be in column-major order. -* Fixed a small memory leak by properly destroying HIP event objects in tests. -* Default strides calculations now follow column-major convention. - -### **llvm-project** (18.0.0) - -#### Resolved issues - -* Fixed an issue where the compiler would incorrectly compile a program that used the `__shfl(var, - srcLane, width)` function when one of the parameters to the function is undefined along some path - to the function. See [issue #3499](https://github.com/ROCm/ROCm/issues/3499) on GitHub. - -### **MIOpen** (3.3.0) - -#### Added - -- [RNN] LSTM forward pass -- [Mha] Mask is added for forward pass -- [GLU] Gated Linear Unit (this is an experimental feature) -- [PReLU] Implemented PReLU backward pass (this is an experimental feature) - -#### Optimized - -- MI300 TunaNet Update: CK forward pass and WRW Solvers updated +* Support for: + * NVIDIA CUDA 12.6.2 + * cuDNN 9.5.1 + * LLVM 19.1.3 + * Full `hipBLAS` 64-bit APIs + * Full `rocBLAS` 64-bit APIs #### Resolved issues -- Fixed unset stream when calling `hipMemsetAsync`. -- Fixed a memory leak issue caused by an incorrect transpose in find 2.0. See PR [#3285](https://github.com/ROCm/MIOpen/pull/3285) on GitHub. -- Fixed a `memcopy` data race by replacing `hipMemcpy` with `hipMemcpyWithStream`. +* Added missing support for device intrinsics and built-ins: `__all_sync`, `__any_sync`, `__ballot_sync`, `__activemask`, `__match_any_sync`, `__match_all_sync`, `__shfl_sync`, `__shfl_up_sync`, `__shfl_down_sync`, and `__shfl_xor_sync`. ### **MIVisionX** (3.1.0) #### Changed -* rocDecode is no longer installed by the setup script. -* The rocDecode dependency has been removed from the package installation. - -#### Known issues - -* See [MIVisionX memory access fault in Canny edge detection](#mivisionx-memory-access-fault-in-canny-edge-detection). -* Package installation requires the manual installation of OpenCV. -* Installation on CentOS/RedHat/SLES requires the manual installation of the `FFMPEG Dev` package. -* Hardware decode requires installation with `--usecase=graphics` in addition to `--usecase=rocm`. - -#### Upcoming changes - -* Optimized audio augmentations support for VX_RPP - -### **RCCL** (2.21.5) - -#### Added - -* MSCCL++ integration for specific contexts -* Performance collection to `rccl_replayer` -* Tuner Plugin example for Instinct MI300 -* Tuning table for a large number of nodes -* Support for amdclang++ -* New Rome model - -#### Changed - -* Compatibility with NCCL 2.21.5 -* Increased channel count for MI300X multi-node -* Enabled MSCCL for single-process multi-threaded contexts -* Enabled CPX mode for MI300X -* Enabled tracing with `rocprof` -* Improved version reporting -* Enabled GDRDMA for Linux kernel 6.4.0+ - -#### Resolved issues - -* Fixed an issue where, on systems running Linux kernel 6.8.0 such as Ubuntu 24.04, Direct Memory - Access (DMA) transfers between the GPU and NIC were disabled, impacting multi-node RCCL - performance. See [issue #3772](https://github.com/ROCm/ROCm/issues/3772) on GitHub. -* Fixed model matching with PXN enable - -#### Known issues - -* MSCCL is temporarily disabled for AllGather collectives. - - This can impact in-place messages (< 2 MB) with ~2x latency. - - Older RCCL versions are not impacted. - - This issue will be addressed in a future ROCm release. -* Unit tests do not exit gracefully when running on a single GPU. - - This issue will be addressed in a future ROCm release. - -### **rocAL** (2.1.0) - -#### Added - -* rocAL Pybind support for package installation has been added. To use the rocAL python module, set the `PYTHONPATH`: `export PYTHONPATH=/opt/rocm/lib:$PYTHONPATH` -* Last batch policy, pad last batch, stick to shard, and shard size support have been added for the coco, caffe, caffe2, mxnet, tf, and cifar10 image readers. - -#### Changed - -* rocDecode is no longer installed by the setup script. -* The rocDecode dependency has been removed from the package installation. - -#### Optimized - -* CTest has been updated. +* AMD Clang is now the default CXX and C compiler. +* The dependency on rocDecode has been removed and automatic rocDecode installation is now disabled in the setup script. #### Resolved issues -* Test failures have been fixed. +* Canny failure on Instinct MI300 has been fixed. +* Ubuntu 24.04 CTest failures have been fixed. #### Known issues -* The package installation requires the manual installation of `TurboJPEG` and `RapidJSON`. -* CentOS/RedHat/SLES requires the manual installation of the `FFMPEG Dev` package. -* Hardware decode requires installation with `--usecase=graphics` in addition to `--usecase=rocm`. - -#### Upcoming changes - -* Optimized audio augmentations support. - -### **rocALUTION** (3.2.1) - -#### Changed - -* The default compiler has been changed from `hipcc` to `amdclang` in the installation script and cmake files. -* Changed the address sanitizer build targets. Now only `gfx908:xnack+`, `gfx90a:xnack+`, `gfx940:xnack+`, `gfx941:xnack+`, and `gfx942:xnack+` are built with `BUILD_ADDRESS_SANITIZER=ON`. - -#### Resolved issues - -* Fixed hang in `RS-AMG` for Navi on some specific matrix sparsity patterns. -* Fixed wrong results in `Apply` on multi-GPU setups. - -### **rocBLAS** (4.3.0) - -#### Added - -* Level 3 and EX functions have an additional `ILP64` API for both C and Fortran (`_64` name suffix) with `int64_t` function arguments - -#### Changed - -* amdclang is used as the default compiler instead of hipcc -* Internal performance scripts use AMD SMI instead of the deprecated ROCm SMI - -#### Optimized - -* Improved performance of Level 2 gbmv -* Improved performance of Level 2 gemv for float and double precisions for problem sizes (`TransA == N && m==n && m % 128 == 0`) measured on a gfx942 GPU - -#### Resolved issues - -* Fixed the `stbsv_strided_batched_64` Fortran binding +* CentOS, Red Hat, and SLES requires the manual installation of `OpenCV` and `FFMPEG`. +* Hardware decode requires that ROCm is installed with `--usecase=graphics`. #### Upcoming changes -* `rocblas_Xgemm_kernel_name` APIs are deprecated - -### **ROCdbgapi** (0.77.0) +* Optimized audio augmentations support for VX_RPP. -#### Added - -* Support for setting precise ALU exception reporting - -### **rocDecode** (0.8.0) - -#### Changed - -* Clang is now the default CXX compiler. -* The new minimum supported version of `va-api` is 1.16. -* New build and runtime options have been added to the `rocDecode-setup.py` setup script. - -#### Removed - -* Make tests have been removed. CTEST is now used for both Make tests and package tests. -* `mesa-amdgpu-dri-drivers` has been removed as a dependency on RHEL and SLES. - -#### Resolved issues - -* Fixed a bug in the size of output streams in the `videoDecodeBatch` sample. - -### **rocFFT** (1.0.31) - -#### Added - -* rocfft-test now includes a `--smoketest` option. -* Implemented experimental APIs to allow computing FFTs on data - distributed across multiple MPI ranks. These APIs can be enabled with the - `ROCFFT_MPI_ENABLE` CMake option. This option defaults to `OFF`. - - When `ROCFFT_MPI_ENABLE` is `ON`: - - * `rocfft_plan_description_set_comm` can be called to provide an - MPI communicator to a plan description, which can then be passed - to `rocfft_plan_create`. Each rank calls - `rocfft_field_add_brick` to specify the layout of data bricks on - that rank. - - * An MPI library with ROCm acceleration enabled is required at - build time and at runtime. - -#### Changed - -* Compilation uses amdclang++ instead of hipcc. -* CLI11 replaces Boost Program Options as the command line parser for clients and samples. -* Building with the address sanitizer option sets xnack+ on relevant GPU - architectures and address-sanitizer support is added to runtime-compiled - kernels. - -### **ROCgdb** (15.2) - -#### Added - -- Support for precise ALU exception reporting for supported architectures. Precise ALU exceptions reporting is controlled with the following commands: - - `set amdgpu precise-alu-exceptions` - - `show amdgpu precise-alu-exceptions` +### **RCCL** (2.21.5) #### Changed -- The `sysroot` or `solib-search-path` settings can now be used to locate files containing GPU code objects when opening a core dump. This allows opening GPU code objects on systems different from the one where the core dump was generated. +* Enhanced the user documentation. -#### Resolved issues +#### Resolved Issues -- Fixed possible hangs when opening some AMDGPU core dumps in ROCgdb. -- Addressed cases where the `roccoremerge` utility improperly handled LOAD segment copy from the host core dump to the combined core dump. +* Corrected some user help strings in `install.sh`. ### **ROCm Compute Profiler** (3.0.0) -#### Changed - -* Renamed to ROCm Compute Profiler from Omniperf. - * New package name: `rocprofiler-compute` - * New repository: [https://github.com/ROCm/rocprofiler-compute](https://github.com/ROCm/rocprofiler-compute) - * New binary name: `rocprof-compute` - -#### Known issues - -- See [ROCm Compute Profiler post-upgrade](#rocm-compute-profiler-post-upgrade). - -- See [ROCm Compute Profiler CTest failure in CI](#rocm-compute-profiler-ctest-failure-in-ci). - -### **ROCm Data Center Tool** (0.3.0) - -#### Added - -* RVS integration -* Real time logging for diagnostic command -* `--version` command -* `XGMI_TOTAL_READ_KB` and `XGMI_TOTAL_WRITE_KB` monitoring metrics - -#### Known issues - -- See [ROCm Data Center Tool incorrect RHEL9 package version](#rocm-data-center-tool-incorrect-rhel9-package-version). - -### **ROCm SMI** (7.4.0) - -#### Added - -- **Added `rsmi_dev_memory_partition_capabilities_get` which returns driver memory partition capablities.** -Driver now has the ability to report what the user can set memory partition modes to. User can now see available -memory partition modes upon an invalid argument return from memory partition mode set (`rsmi_dev_memory_partition_set`). - -- Support for GPU metrics 1.6 to `rsmi_dev_gpu_metrics_info_get()`. Updated - `rsmi_dev_gpu_metrics_info_get()` and structure `rsmi_gpu_metrics_t` to include new fields for - PVIOL / TVIOL, XCP (Graphics Compute Partitions) stats, and `pcie_lc_perf_other_end_recovery`. - -- Ability to view raw GPU metrics using `rocm-smi --showmetrics`. - -#### Changed - -- Added back in C++ tests for `memorypartition_read_write` - -- Updated `rsmi_dev_memory_partition_set` to not return until a successful restart of AMD GPU Driver. - -- All APIs now have the ability to catch driver reporting invalid arguments. - -#### Removals - -- Removed `--resetcomputepartition`, and `--resetmemorypartition` options and associated APIs. - - This change is part of the partition feature redesign. - - The related APIs `rsmi_dev_compute_partition_reset()` and `rsmi_dev_memory_partition_reset()`. - #### Resolved issues -- Fixed `rsmi_dev_target_graphics_version_get`, `rocm-smi --showhw`, and `rocm-smi --showprod` not displaying properly for MI2x or Navi 3x ASICs. - -#### Upcoming changes - -- C++ tests for `memorypartition_read_write` are to be re-enabled in a future ROCm release. - -```{note} -See the full [ROCm SMI changelog](https://github.com/ROCm/rocm_smi_lib/blob/6.3.x/CHANGELOG.md) for more details and examples. -``` +* Fixed a minor issue for users upgrading to ROCm 6.3 from 6.2 post-rename from `omniperf`. + See [ROCm Compute Profiler and ROCm Systems Profiler post-upgrade issues](#rocm-compute-profiler-and-rocm-systems-profiler-post-upgrade-issues). ### **ROCm Systems Profiler** (0.1.0) -#### Changed - -* Renamed to ROCm Systems Profiler from Omnitrace. - * New package name: `rocprofiler-systems` - * New repository: [https://github.com/ROCm/rocprofiler-systems](https://github.com/ROCm/rocprofiler-systems) - * Reset the version to `0.1.0` - * New binary prefix: `rocprof-sys-*` - -#### Known issues - -- See [ROCm Systems Profiler post-upgrade](#rocm-systems-profiler-post-upgrade). - -### **ROCm Validation Suite** (1.1.0) - -#### Added - -- Support for hipBLASLT blas library and option to select blas library in `conf` file. - -#### Changed - -- Babel parameters made runtime configurable. - -#### Known issues - -- See [ROCm Validation Suite needs specified configuration file](#rocm-validation-suite-needs-specified-configuration-file). - -### **rocPRIM** (3.3.0) - -#### Added - -* The `--test smoke` option has been added to `rtest.py`. When `rtest.py` is called with this option it runs a subset of tests such that the total test time is 5 minutes. Use `python3 ./rtest.py --test smoke` or `python3 ./rtest.py -t smoke` to run the smoke test. -* The `--seed` option has been added to `run_benchmarks.py`. The `--seed` option specifies a seed for the generation of random inputs. When the option is omitted, the default behavior is to use a random seed for each benchmark measurement. -* Added configuration autotuning to device partition (`rocprim::partition`, `rocprim::partition_two_way`, and `rocprim::partition_three_way`), to device select (`rocprim::select`, `rocprim::unique`, and `rocprim::unique_by_key`), and to device reduce by key (`rocprim::reduce_by_key`) to improve performance on selected architectures. -* Added `rocprim::uninitialized_array` to provide uninitialized storage in local memory for user-defined types. -* Added large segment support for `rocprim:segmented_reduce`. -* Added a parallel `nth_element` device function similar to `std::nth_element`. `nth_element` places elements that are smaller than the nth element before the nth element, and elements that are bigger than the nth element after the nth element. -* Added deterministic (bitwise reproducible) algorithm variants `rocprim::deterministic_inclusive_scan`, `rocprim::deterministic_exclusive_scan`, `rocprim::deterministic_inclusive_scan_by_key`, `rocprim::deterministic_exclusive_scan_by_key`, and `rocprim::deterministic_reduce_by_key`. These provide run-to-run stable results with non-associative operators such as float operations, at the cost of reduced performance. -* Added a parallel `partial_sort` and `partial_sort_copy` device functions similar to `std::partial_sort` and `std::partial_sort_copy`. `partial_sort` and `partial_sort_copy` arrange elements such that the elements are in the same order as a sorted list up to and including the middle index. - -#### Changed - -* Changed the default value of `rmake.py -a` to `default_gpus`. This is equivalent to `gfx906:xnack-,gfx1030,gfx1100,gfx1101,gfx1102`. -* Modified the input size in device adjacent difference benchmarks. Observed performance with these benchmarks might be different. -* Changed the default seed for `device_benchmark_segmented_reduce`. - -#### Removed - -* `rocprim::thread_load()` and `rocprim::thread_store()` have been deprecated. Use `dereference()` instead. - -#### Resolved issues - -* Fixed an issue in `rmake.py` where the list storing cmake options would contain individual characters instead of a full string of options. -* Resolved an issue in `rtest.py` where it crashed if the `build` folder was created without `release` or `debug` subdirectories. -* Resolved an issue with `rtest.py` on Windows where passing an absolute path to `--install_dir` caused a `FileNotFound` error. -* rocPRIM functions are no longer forcefully inlined on Windows. This significantly reduces the build time of debug builds. -* `block_load`, `block_store`, `block_shuffle`, `block_exchange`, and `warp_exchange` now use placement `new` instead of copy assignment (`operator=`) when writing to local memory. This fixes the behavior of custom types with non-trivial copy assignments. -* Fixed a bug in the generation of input data for benchmarks, which caused incorrect performance to be reported in specific cases. It may affect the reported performance for one-byte types (`uint8_t` and `int8_t`) and instantiations of `custom_type`. Specifically, device binary search, device histogram, device merge and warp sort are affected. -* Fixed a bug for `rocprim::merge_path_search` where using `unsigned` offsets would produce incorrect results. -* Fixed a bug for `rocprim::thread_load` and `rocprim::thread_store` where `float` and `double` were not cast to the correct type, resulting in incorrect results. -* Resolved an issue where tests were failing when they were compiled with `-D_GLIBCXX_ASSERTIONS=ON`. -* Resolved an issue where algorithms that used an internal serial merge routine caused a memory access fault that resulted in potential performance drops when using block sort, device merge sort (block merge), device merge, device partial sort, and device sort (merge sort). -* Fixed memory leaks in unit tests due to missing calls to `hipFree()` and the incorrect use of hipGraphs. -* Fixed an issue where certain inputs to `block_sort_merge()`, `device_merge_sort_merge_path()`, `device_merge()`, and `warp_sort_stable()` caused an assertion error during the call to `serial_merge()`. - -### **ROCProfiler** (2.0.0) - -#### Added - -- JSON output plugin for `rocprofv2`. The JSON file matches Google Trace Format making it easy to load on Perfetto, Chrome tracing, or Speedscope. For Speedscope, use `--disable-json-data-flows` option as speedscope doesn't work with data flows. -- `--no-serialization` flag to disable kernel serialization when `rocprofv2` is in counter collection mode. This allows `rocprofv2` to avoid deadlock when profiling certain programs in counter collection mode. -- `FP64_ACTIVE` and `ENGINE_ACTIVE` metrics to AMD Instinct MI300 accelerator -- New HIP APIs with struct defined inside union. -- Early checks to confirm the eligibility of ELF file in ATT plugin -- Support for kernel name filtering in `rocprofv2` -- Barrier bit to read and stop packets - -#### Changed - -- Extended lifetime for proxy queues -- Setting the `trace-start` option for `rocprof` to `off` now disables kernel tracing -- `libpciaccess-dev` functions now load with `dlopen` -- `PcieAccessApi*` api and `void* libpciaccess_handle` are now initialized to `nullptr` - -#### Removed - -- Obsolete BSD and GPL licenses -- `libsystemd-dev` from `CMakeLists.txt` - -#### Optimized - -- ROCProfiler Performance improved to reduce profiling time for large workloads of counter collection - -#### Resolved issues - -- Bandwidth measurement in AMD Instinct MI300 accelerator -- Perfetto plugin issue of `roctx` trace not getting displayed -- `--help` for counter collection -- Signal management issues in `queue.cpp` -- Perfetto tracks for multi-GPU -- Perfetto plugin usage with `rocsys` -- Incorrect number of columns in the output CSV files for counter collection and kernel tracing -- The ROCProfiler hang issue when running kernel trace, thread trace, or counter collection on Iree benchmark for AMD Instinct MI300 accelerator -- Build errors thrown during parsing of unions -- The system hang caused while running `--kernel-trace` with Perfetto for certain applications -- Missing profiler records issue caused while running `--trace-period` -- The hang issue of `ProfilerAPITest` of `runFeatureTests` on AMD Instinct MI300 accelerator -- Segmentation fault on Navi32 - - -### **ROCprofiler-SDK** (0.5.0) - -#### Added - -- Start and end timestamp columns to the counter collection `csv` output -- Check to force tools to initialize context id with zero -- Support to specify hardware counters for collection using `rocprofv3` as `rocprofv3 --pmc [COUNTER [COUNTER ...]]` - -#### Changed - -- `--marker-trace` option for `rocprofv3` now supports the legacy ROC-TX library `libroctx64.so` when the application is linked against the new library `librocprofiler-sdk-roctx.so` -- Replaced deprecated `hipHostMalloc` and `hipHostFree` functions with `hipExtHostAlloc` and `hipFreeHost` for ROCm versions starting 6.3 -- Updated `rocprofv3` `--help` options -- Changed naming of "agent profiling" to a more descriptive "device counting service". To convert existing tool or user code to the new name, use the following sed: - ``` - find . -type f -exec sed -i 's/rocprofiler_agent_profile_callback_t/rocprofiler_device_counting_service_callback_t/g; s/rocprofiler_configure_agent_profile_counting_service/rocprofiler_configure_device_counting_service/g; s/agent_profile.h/device_counting_service.h/g; s/rocprofiler_sample_agent_profile_counting_service/rocprofiler_sample_device_counting_service/g' {} + - ``` -- Changed naming of "dispatch profiling service" to a more descriptive "dispatch counting service". To convert existing tool or user code to the new names, the following sed can be used: - ``` - -type f -exec sed -i -e 's/dispatch_profile_counting_service/dispatch_counting_service/g' -e 's/dispatch_profile.h/dispatch_counting_service.h/g' -e 's/rocprofiler_profile_counting_dispatch_callback_t/rocprofiler_dispatch_counting_service_callback_t/g' -e 's/rocprofiler_profile_counting_dispatch_data_t/rocprofiler_dispatch_counting_service_data_t/g' -e 's/rocprofiler_profile_counting_dispatch_record_t/rocprofiler_dispatch_counting_service_record_t/g' {} + - ``` -- `FETCH_SIZE` metric on gfx94x now uses `TCC_BUBBLE` for 128B reads -- PMC dispatch-based counter collection serialization is now per-device instead of being global across all devices - -#### Removed - -- `gfx8` metric definitions -- `rocprofv3` installation from `sbin` directory - -#### Resolved issues - -- Introduced subdirectory creation when `rocprofv3 --output-file` used to specify a folder path -- Fixed misaligned stores (undefined behavior) for buffer records -- Fixed crash when only scratch reporting is enabled -- Fixed `MeanOccupancy` metrics -- Fixed aborted-application validation test to properly check for `hipExtHostAlloc` command -- Fixed implicit reduction of SQ and GRBM metrics -- Fixed support for derived counters in reduce operation -- Bug fixed in max-in-reduce operation -- Introduced fix to handle a range of values for `select()` dimension in expressions parser -- Fixed Navi3x kernel tracing issues by setting the conditional `aql::set_profiler_active_on_queue` only when counter collection is registered - -### **rocPyDecode** (0.2.0) - -#### Added - -* RGB and YUV pytorch tensors -* Python distribution wheel (`.whl`) -* Multiple usecase samples - -#### Changed - -* Clang replaces `hipcc` as the default CXX compiler. - -#### Removed - -* Make tests have been removed. CTEST is now used for both Make tests and package tests. - -#### Optimized - -* Setup script - build and runtime install options -* Prerequisite installation helper Python scripts -* Same GPU memory viewed as pytorch tensor - -#### Resolved issues - -* Fixed setup issues. - -### **rocRAND** (3.2.0) - -#### Added - -* Added host generator for MT19937 -* Support for `rocrand_generate_poisson` in hipGraphs -* Added `engine`, `distribution`, `mode`, `throughput_gigabytes_per_second`, and `lambda` columns for the csv format in - `benchmark_rocrand_host_api` and `benchmark_rocrand_device_api`. To see these new columns, set `--benchmark_format=csv` - or `--benchmark_out_format=csv --benchmark_out="outName.csv"`. - -#### Changed - -* Updated the default value for the `-a` argument from `rmake.py` to `gfx906:xnack-,gfx1030,gfx1100,gfx1101,gfx1102`. -* `rocrand_discrete` for MTGP32, LFSR113 and ThreeFry generators now uses the alias method, which is faster than binary search in CDF. - -#### Resolved issues - -* Fixed an issue in `rmake.py` where the list storing the CMake options would contain individual characters instead of a full string of options. - -### **rocSOLVER** (3.27.0) - -#### Added - -* 64-bit APIs for existing functions: - - `LACGV_64` - - `LARF_64` - - `LARFG_64` - - `GEQR2_64` (with batched and strided\_batched versions) - - `GEQRF_64` (with batched and strided\_batched versions) - - `POTF2_64` (with batched and strided\_batched versions) - - `POTRF_64` (with batched and strided\_batched versions) - - `POTRS_64` (with batched and strided\_batched versions) - -#### Changed - -* The rocSPARSE library is now an optional dependency at runtime. If rocSPARSE - is not available, rocSOLVER's sparse refactorization and solvers functions - will return `rocblas_status_not_implemented`. - -#### Optimized - -* Improved the performance of LARFG, LARF, and downstream functions such as GEQR2 and GEQRF on wave64 architectures -* Improved the performance of BDSQR and GESVD -* Improved the performance of STEDC and divide and conquer Eigensolvers - -#### Resolved issues - -* Fixed a memory allocation issue in SYEVJ that could cause failures on clients that manage their own memory. -* Fixed a synchronizarion issue with SYEVJ that could led to a convergence failure for large matrices. -* Fixed a convergence issue in STEIN stemming from numerical orthogonality of the initial choice of eigenvectors. -* Fixed a synchronization issue in STEIN. - -#### Known issues - -* A known issue in STEBZ can lead to errors in routines based on bisection to compute eigenvalues for symmetric/Hermitian matrices (for example, SYEVX/HEEVX and SYGVX/HEGVX), as well as singular values (for example, BDSVDX and GESVDX). - -### **rocSPARSE** (3.3.0) - -#### Added - -* `rocsparse_create_extract_descr`, `rocsparse_destroy_extract_descr`, `rocsparse_extract_buffer_size`, `rocsparse_extract_nnz`, and `rocsparse_extract` APIs to allow extraction of the upper or lower part of sparse CSR or CSC matrices. - -#### Changed - -* Change the default compiler from hipcc to amdclang in install script and CMake files. -* Change address sanitizer build targets so that only gfx908:xnack+, gfx90a:xnack+, gfx940:xnack+, gfx941:xnack+, and gfx942:xnack+ are built when `BUILD_ADDRESS_SANITIZER=ON` is configured. - -#### Optimized - -* Improved user documentation - -#### Resolved issues - -* Fixed the `csrmm` merge path algorithm so that diagonal is clamped to the correct range. -* Fixed a race condition in `bsrgemm` that could on rare occasions cause incorrect results. -* Fixed an issue in `hyb2csr` where the CSR row pointer array was not being properly filled when `n=0`, `coo_nnz=0`, or `ell_nnz=0`. -* Fixed scaling in `rocsparse_Xhybmv` when only performing `y=beta*y`, for example, where `alpha==0` in `y=alpha*Ax+beta*y`. -* Fixed `rocsparse_Xgemmi` failures when the y grid dimension is too large. This occurred when `n >= 65536`. -* Fixed the gfortran dependency for the Azure Linux operating system. - -### **rocThrust** (3.2.0) - -#### Added - -* Merged changes from upstream CCCL/thrust 2.3.2 - * Only the NVIDIA backend uses `tuple` and `pair` types from libcu++, other backends continue to use the original Thrust implementations and hence do not require libcu++ (CCCL) as a dependency. -* Added the `thrust::hip::par_det` execution policy to enable bitwise reproducibility on algorithms that are not bitwise reproducible by default. - -#### Changed - -* Changed the default value of `rmake.py -a` to `default_gpus`. This is equivalent to `gfx906:xnack-,gfx1030,gfx1100,gfx1101,gfx1102`. -* Enabled the upstream (thrust) test suite for execution by default. It can be disabled by using the `-DENABLE_UPSTREAM_TESTS=OFF` cmake option. - -#### Resolved issues - -* Fixed an issue in `rmake.py` where the list storing cmake options would contain individual characters instead of a full string of options. -* Fixed the HIP backend not passing `TestCopyIfNonTrivial` from the upstream (thrust) test suite. -* Fixed tests failing when compiled with `-D_GLIBCXX_ASSERTIONS=ON`. - -### **rocWMMA** (1.6.0) - -#### Added - -* Added OCP `F8`/`BF8` datatype support - -#### Changed - -* Optimized some aos<->soa transforms with half-rotation offsets -* Refactored the rocBLAS reference entry point for validation and benchmarking -* `ROCWMMA_*` preprocessor configurations are now all assigned values -* Updated the default architecture targets for ASAN builds -* Updated the actor-critic implementation - -#### Resolved issues - -* Fixed a bug in `F64` validation due to faulty typecasting -* Fixed a bug causing runtime compilation errors with hipRTC -* Various documentation updates and fixes - -### **RPP** (1.9.1) - #### Added -* RPP Glitch and RPP Pixelate have been added to the HOST and HIP backend. -* The following audio support was added to the HIP backend: - * Resample - * Pre-emphasis filter - * Down-mixing - * To Decibels - * Non-silent region - -#### Changed - -* Test prerequisites have been updated. -* AMD advanced build flag. - -#### Removed - -* Older versions of TurboJPEG have been removed. - -#### Optimized - -* Updated the test suite. +* Improvements to support OMPT target offload. #### Resolved issues -* macOS build -* RPP Test Suite: augmentations fix -* Copy: bugfix for `NCDHW` layout -* MIVisionX compatibility fix: Resample and pre-emphasis filter - -#### Known issues - -* Package installation only supports the HIP backend. - -#### Upcoming changes - -* Optimized audio augmentations - -### **Tensile** (4.42.0) - -#### Added - -- Testing and documentation for `MasterSolutionLibrary.ArchitectureIndexMap` and `remapSolutionIndicesStartingFrom` -- Functions for writing master file -- `tPrint` and reconcile printing options -- Python unit test coverage report -- Factor embed library logic into function and test -- `clang++` as `cxx` compiler option for Windows -- Logic to cope with different compilers --`toFile` function to include `generateManifest` and moved to utilities -- Profiling CI job -- Support for `amdclang` and use defaults -- Architecture management functions in `TensileCreateLibrary` -- `TensileCreateLibrary` CLI reference docs -- New documentation for sphinx prototype and build out skeleton -- Contributor and developer guide -- Prediction model for optimal number of Stream-K tiles to run - - Two-tile algorithm with Stream-K after DP - - Atomic two-tile Stream-K and clean-up tuning parameters - - Using glob to find logic files in `TensileCreateLibrary` - - Function to confirm supported compiler rather than raw logic +* Fixed an issue with generated Perfetto files. See [issue #3767](https://github.com/ROCm/ROCm/issues/3767) for more information. -#### Changed +* Fixed an issue with merging multiple `.proto` files. -- Improved rocBLAS build output by allowing warning suppression, ignoring developer warnings, displaying progress bar and quiet printing -- Reordered extensions for Windows in `which` function -- updated `amdclang++` and `asm` directories -- Updated duplicate marking tests with mocks -- Restored print ordering -- Print option -- Bumped rocm-docs-core from 1.2.0 to 1.5.0 in `/docs/sphinx` -- Refactored kernel duplicate matching -- Refactored `generateLogicDataAndSolutions` -- Restricted XCC mapping to gfx942 -- Refactored argument parsing in `TensileCreateLibrary` -- Disabled failing rhel9 tests -- Changed line length to 100 characters for formatting -- Changed YAML operations to use C `libyaml` backend -- Improved warning text -- Updated clang support for Windows -- Updated `supportedCompiler` function -- Clang support on Windows to require use of conditional choices and defaults -- Refactored sanity check in `TensileCreateLibrary` -- Moved client config logic from `TensileCreateLibrary` main into `createClientConfig` -- Updated `verifyManifest` in `TensileCreateLibrary` -- Updated RTD configs -- Cleaned up CMake to avoid redundant work during client builds -- Updated Stream-K debug settings - -#### Removed - -- Deprecated flag from CI profiling job -- Diagnostic print -- Globals from `prepAsm` -- Deprecated `package-library` option -- Duplicate `which` function and minor cleanup - -#### Optimized - -- To optimize the performance of Stream-K kernels: - - Introduced analytical grid size prediction model - - Remapped XCC-based workgroup +* Fixed an issue causing GPU resource data to be missing from traces of Instinct MI300A systems. -#### Resolved issues - -- Fixed stream-K XCC configs for gfx942 -- Updated WMMA capability command for ISA 10+ -- Fixed progress bar character encoding error on Windows -- Fixed solution redundancy removal -- Fixed tuning imports for `pyyaml` -- Fixed printing of ASM capabilities for ROCm versions prior to 6.3 -- Fixed code objects by filtering kernels with build errors and unprocessed kernels -- Fixed fully qualified `std::get` in contraction solutions -- Fixed `add -v flag` and change system invocation -- Used conditional imports for new dependencies to fix yaml `CSafe` load and dump import and rich terminal print import -- Fixed comments on `scalarStaticDivideAndRemainder` +* Fixed a minor issue for users upgrading to ROCm 6.3 from 6.2 post-rename from `omnitrace`. + See [ROCm Compute Profiler and ROCm Systems Profiler post-upgrade issues](#rocm-compute-profiler-and-rocm-systems-profiler-post-upgrade-issues). ## ROCm known issues ROCm known issues are noted on {fab}`github` [GitHub](https://github.com/ROCm/ROCm/labels/Verified%20Issue). For known issues related to individual components, review the [Detailed component changes](#detailed-component-changes). -### Instinct MI300X reports incorrect raw GPU timestamps - -On MI300X accelerators, the command processor firmware reports incorrect raw GPU timestamps. This -issue is under investigation and will be addressed in a future release. See [GitHub issue #4079](https://github.com/ROCm/ROCm/issues/4079). - -### Instinct MI300 series: backward weights convolution performance issue - -A performance issue affects certain tensor shapes during backward weights convolution when using -FP16 or FP32 data types on Instinct MI300 series accelerators. This issue will be addressed in a future ROCm release. -See [GitHub issue #4080](https://github.com/ROCm/ROCm/issues/4080). - -To mitigate the issue during model training, set the following environment variables: - -```bash -export MIOPEN_FIND_MODE=3 -export MIOPEN_FIND_ENFORCE=3 -``` - -These settings enable auto-tuning on the first occurrence of a new tensor shape. The tuning results -are stored in the user database, eliminating the need for repeated tuning when the same shape is -encountered in subsequent runs. See the -[MIOpen](https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/workload.html#miopen) -section in the workload optimization guide to learn more about the MIOpen auto-tuning capabilities. - -### TransferBench package not functional - -TransferBench packages included in the ROCm 6.3.0 release are not compiled properly and are not -functional for most GPU targets, except for gfx906. Full functionality will be available -in a future ROCm release. See [GitHub issue #4081](https://github.com/ROCm/ROCm/issues/4081). +### PCI Express Qualification Tool failure on Debian 12 -TransferBench is a utility for benchmarking simultaneous transfers between user-specified devices -(CPUs or GPUs). See the documentation at [TransferBench -documentation](https://rocm.docs.amd.com/projects/TransferBench/en/docs-6.3.0/index.html). If you -want to use TransferBench, access the properly compiled packages at -[https://github.com/ROCm/TransferBench/releases](https://github.com/ROCm/TransferBench/releases). +The PCI Express Qualification Tool (PEQT) module present in the ROCm Validation Suite (RVS) might fail due to the segmentation issue in Debian 12 (bookworm). This will result in failure to determine the characteristics of the PCIe interconnect between the host platform and the GPU like support for Gen 3 atomic completers, DMA transfer statistics, link speed, and link width. The standard PCIe command `lspci` can be used as an alternative to view the characteristics of the PCIe bus interconnect with the GPU. This issue is under investigation and will be addressed in a future release. See [GitHub issue #4175](https://github.com/ROCm/ROCm/issues/4175). -### ROCm Compute Profiler post-upgrade +## ROCm resolved issues -In ROCm 6.3.0, the `omniperf` package is now named `rocprofiler-compute`. As a result, running `apt install omniperf` will fail to locate the package. -Instead, use `apt install rocprofiler-compute`. See [ROCm Compute Profiler 3.0.0](#rocm-compute-profiler-3-0-0). +The following are previously known issues resolved in this release. For resolved issues related to +individual components, review the [Detailed component changes](#detailed-component-changes). -When upgrading from ROCm 6.2 to 6.3, any existing `/opt/rocm-6.2/../omniperf` folders are not -automatically removed. To clean up these folders, manually uninstall Omniperf using `apt remove omniperf`. -See [GitHub issue #4082](https://github.com/ROCm/ROCm/issues/4082). +### Instinct MI300 series: backward weights convolution performance issue -### ROCm Systems Profiler post-upgrade +Fixed a performance issue affecting certain tensor shapes during backward weights convolution when using FP16 or FP32 data types on Instinct MI300 series accelerators. See [GitHub issue #4080](https://github.com/ROCm/ROCm/issues/4080). -In ROCm 6.3.0, the `omnitrace` package is now named `rocprofiler-systems`. As a result, running `apt install omnitrace` will fail to locate the package. -Instead, use `apt install rocprofiler-systems`. See [ROCm Systems Profiler 0.1.0](#rocm-systems-profiler-0-1-0). +### ROCm Compute Profiler and ROCm Systems Profiler post-upgrade issues -When upgrading from ROCm 6.2 to 6.3, any existing `/opt/rocm-6.2/../omnitrace` folders are not -automatically removed. To clean up these folders, manually uninstall Omnitrace using `apt remove omnitrace`. -See [GitHub issue #4083](https://github.com/ROCm/ROCm/issues/4083). +Packaging metadata for ROCm Compute Profiler (`rocprofiler-compute`) and ROCm Systems Profiler +(`rocprofiler-systems`) has been updated to handle the renaming from Omniperf and Omnitrace, +respectively. This fixes minor issues when upgrading from ROCm 6.2 to 6.3. For more information, see the GitHub issues +[#4082](https://github.com/ROCm/ROCm/issues/4082) and +[#4083](https://github.com/ROCm/ROCm/issues/4083). ### Stale file due to OpenCL ICD loader deprecation -When upgrading from ROCm 6.2.x to ROCm 6.3.0, the [removal of the `rocm-icd-loader` -package](#opencl-icd-loader-separated-from-rocm) leaves a stale file in the old `rocm-6.2.x` -directory. This has no functional impact. As a workaround, manually uninstall the -`rocm-icd-loader` package to remove the stale file. This issue will be addressed in a future ROCm -release. See [GitHub issue #4084](https://github.com/ROCm/ROCm/issues/4084). - -### ROCm Compute Profiler CTest failure in CI - -When running ROCm Compute Profiler's (`rocprof-compute`) CTest in the Azure CI environment, the -`rocprof-compute` execution test fails. This issue is due to an outdated test file that was not renamed -(`omniperf` to `rocprof-compute`), and due to the `ROCM_PATH` environment variable not being set in -the Azure CI environment, causing the tool to be unable to extract chip information as expected. -This issue will be addressed in a future ROCm release. See [GitHub issue #4085](https://github.com/ROCm/ROCm/issues/4085). - -### MIVisionX memory access fault in Canny edge detection - -Canny edge detection kernels might access out-of-bounds memory locations while -computing gradient intensities on edge pixels. This issue is isolated to -Canny-specific use cases on Instinct MI300 series accelerators. This issue is -resolved in the [MIVisionX `develop` branch](https://github.com/ROCm/mivisionx) -and will be part of a future ROCm release. See [GitHub issue #4086](https://github.com/ROCm/ROCm/issues/4086). - -### Transformer Engine test_distributed_fused_attn aborts with fatal Python error - -The `test_distributed_fused_attn` Pytest case for JAX in [Transformer Engine -for ROCm](https://github.com/ROCm/TransformerEngine) fails with a fatal Python -error under certain conditions. The root cause is unrelated Transformer Engine -but due to some issue within XLA. This XLA issue is under investigation and -will be addressed in a future release. See [GitHub issue #4087](https://github.com/ROCm/ROCm/issues/4087). - -### AMD SMI manual build issue - -Manual builds of AMD SMI fail due to a broken link in its build configuration. -This affects past AMD SMI releases as well. The fix is underway and will be -applied to all branches at [https://github.com/ROCm/amdsmi](https://github.com/ROCm/amdsmi). -See [GitHub issue #4088](https://github.com/ROCm/ROCm/issues/4088). - -### ROCm Data Center Tool incorrect RHEL9 package version - -In previous versions of ROCm Data Center Tool (RDC) included with ROCm 6.2 for RHEL9, RDC's version -number was incorrectly set to `1.0.0`. ROCm 6.3 includes RDC with the correct version number. -See [GitHub issue #4089](https://github.com/ROCm/ROCm/issues/4089). - -```{important} -If you're using RHEL9, you must first uninstall the existing ROCm 6.2 RDC 1.0.0 package with `sudo yum -remove rdc` before upgrading to the ROCm 6.3 RDC package `sudo yum install rdc`. -``` - -### ROCm Validation Suite needs specified configuration file - -ROCm Validation Suite might fail for certain platforms if executed without the `-c` option and -specifying the configuration file. See [RVS command line -options](https://rocm.docs.amd.com/projects/ROCmValidationSuite/en/docs-6.3.0/ug1main.html#command-line-options) -for more information. This issue will be addressed in a future release. -See [GitHub issue #4090](https://github.com/ROCm/ROCm/issues/4090). - -## ROCm resolved issues - -The following are previously known issues resolved in this release. For resolved issues related to -individual components, review the [Detailed component changes](#detailed-component-changes). - -### Bandwidth limitation in gang and non-gang modes on Instinct MI300A - -Fixed an issue where expected target peak non-gang performance (~60 GB/s) and target peak gang -performance (~90 GB/s) were not achieved. Previously, both gang and non-gang performance were -observed to be limited at 45 GB/s. See [issue #3496](https://github.com/ROCm/ROCm/issues/3496) on -GitHub. +When upgrading from ROCm 6.2.x to ROCm 6.3.0, the issue of removal of the `rocm-icd-loader` package +leaving a stale file in the old `rocm-6.2.x` directory has been resolved. The stale files left during +the upgrade from ROCm 6.2.x to ROCm 6.3.0 will be removed when upgrading to ROCm 6.3.1. For more +information, see [GitHub issue #4084](https://github.com/ROCm/ROCm/issues/4084). ## ROCm upcoming changes @@ -1725,10 +544,11 @@ The following changes to the ROCm software stack are anticipated for future rele ### AMDGPU wavefront size compiler macro deprecation -The `__AMDGCN_WAVEFRONT_SIZE__` macro is deprecated and support will be removed in an upcoming -release. It is recommended that any use of this macro be removed. For more information, see [AMDGPU -support](https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.0/LLVM/clang/html/AMDGPUSupport.html). +The `__AMDGCN_WAVEFRONT_SIZE__` macro will be deprecated in an upcoming +release. It is recommended to remove any use of this macro. For more information, see [AMDGPU +support](https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.1/LLVM/clang/html/AMDGPUSupport.html). ### HIPCC Perl scripts deprecation The HIPCC Perl scripts (`hipcc.pl` and `hipconfig.pl`) will be removed in an upcoming release. + diff --git a/default.xml b/default.xml index 04325940ca..9d86ca6c6f 100644 --- a/default.xml +++ b/default.xml @@ -1,17 +1,14 @@ - - - - @@ -21,6 +18,8 @@ + + @@ -42,6 +41,7 @@ + @@ -57,6 +57,7 @@ + @@ -67,6 +68,7 @@ + diff --git a/docs/compatibility/compatibility-matrix-historical-6.0.csv b/docs/compatibility/compatibility-matrix-historical-6.0.csv index 0d770d9b03..ff13f3c290 100644 --- a/docs/compatibility/compatibility-matrix-historical-6.0.csv +++ b/docs/compatibility/compatibility-matrix-historical-6.0.csv @@ -1,118 +1,128 @@ -ROCm Version,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0 - :ref:`Operating systems & kernels `,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,, - ,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2" - ,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5" - ,"RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4 [#red-hat94-past-60]_, 9.3, 9.2","RHEL 9.4 [#red-hat94-past-60]_, 9.3, 9.2","RHEL 9.4 [#red-hat94-past-60]_, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2" - ,"RHEL 8.10","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8" - ,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4" - ,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9 - ,Oracle Linux 8.10 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,Oracle Linux 8.9 [#oracle89-past-60]_,,, - ,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,, - :doc:`Architecture `,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3 - ,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2 - ,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA - ,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3 - ,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2 - ,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,, - :doc:`GPU / LLVM target `,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100 - ,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030 - ,gfx942,gfx942 [#mi300_624-past-60]_,gfx942 [#mi300_622-past-60]_,gfx942 [#mi300_621-past-60]_,gfx942 [#mi300_620-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_611-past-60]_, gfx942 [#mi300_610-past-60]_, gfx942 [#mi300_602-past-60]_, gfx942 [#mi300_600-past-60]_ - ,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a - ,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908 - ,,,,,,,,,, - FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,, - :doc:`PyTorch `,"2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13" - :doc:`TensorFlow `,"2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1" - :doc:`JAX `,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26 - `ONNX Runtime `_,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1 - ,,,,,,,,,, - THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,, - `UCC `_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0 - `UCX `_,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1 - ,,,,,,,,,, - THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,, - Thrust,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1 - CUB,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1 - ,,,,,,,,,, +ROCm Version,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0, 6.1.2, 6.1.1, 6.1.0, 6.0.2, 6.0.0 + :ref:`Operating systems & kernels `,Ubuntu 24.04.2,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04","Ubuntu 24.04.1, 24.04",Ubuntu 24.04,,,,, + ,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3","Ubuntu 22.04.4, 22.04.3, 22.04.2","Ubuntu 22.04.4, 22.04.3, 22.04.2" + ,,,,,,,"Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5","Ubuntu 20.04.6, 20.04.5" + ,"RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.4, 9.3, 9.2","RHEL 9.3, 9.2","RHEL 9.3, 9.2" + ,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.10, 8.9","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8","RHEL 8.9, 8.8" + ,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4","SLES 15 SP5, SP4" + ,,,,,,,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9,CentOS 7.9 + ,Oracle Linux 8.10 [#mic300x-past-60]_,Oracle Linux 8.10 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,Oracle Linux 8.9 [#mic300x-past-60]_,,, +,Debian 12 [#mic300x-past-60]_,,,,,,,,,, + ,.. _architecture-support-compatibility-matrix-past-60:,,,,,,,,,, + :doc:`Architecture `,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3,CDNA3 + ,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2,CDNA2 + ,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA,CDNA + ,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3,RDNA3 + ,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2,RDNA2 + ,.. _gpu-support-compatibility-matrix-past-60:,,,,,,,,,, + :doc:`GPU / LLVM target `,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100,gfx1100 + ,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030,gfx1030 + ,gfx942,gfx942,gfx942 [#mi300_624-past-60]_,gfx942 [#mi300_622-past-60]_,gfx942 [#mi300_621-past-60]_,gfx942 [#mi300_620-past-60]_, gfx942 [#mi300_612-past-60]_, gfx942 [#mi300_611-past-60]_, gfx942 [#mi300_610-past-60]_, gfx942 [#mi300_602-past-60]_, gfx942 [#mi300_600-past-60]_ + ,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a,gfx90a + ,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908,gfx908 + ,,,,,,,,,,, + FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix-past-60:,,,,,,,,,, + :doc:`PyTorch `,"2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13","2.1, 2.0, 1.13" + :doc:`TensorFlow `,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.15.0, 2.14.0, 2.13.1","2.14.0, 2.13.1, 2.12.1","2.14.0, 2.13.1, 2.12.1" + :doc:`JAX `,0.4.35,0.4.35,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26,0.4.26 + `ONNX Runtime `_,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.17.3,1.14.1,1.14.1 + ,,,,,,,,,,, + THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix-past-60:,,,,,,,,,, + `UCC `_,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.3.0,>=1.2.0,>=1.2.0 + `UCX `_,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.15.0,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1,>=1.14.1 + ,,,,,,,,,,, + THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix-past-60:,,,,,,,,,, + Thrust,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1 + CUB,2.3.2,2.3.2,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.1,2.0.1 +,,,,,,,,,,, + ,,,,,,,,,,, KMD & USER SPACE [#kfd_support-past-60]_,.. _kfd-userspace-support-compatibility-matrix-past-60:,,,,,,,,, - Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x" - ,,,,,,,,,, - ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,, - :doc:`Composable Kernel `,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0 - :doc:`MIGraphX `,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0 - :doc:`MIOpen `,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0 - :doc:`MIVisionX `,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0 - :doc:`rocAL `,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0 - :doc:`rocDecode `,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A - :doc:`rocJPEG `,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A - :doc:`rocPyDecode `,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A - :doc:`RPP `,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0 - ,,,,,,,,,, - COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,, - :doc:`RCCL `,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3 - ,,,,,,,,,, - MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,, - `half `_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0 - :doc:`hipBLAS `,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0 - :doc:`hipBLASLt `,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0 - :doc:`hipFFT `,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13 - :doc:`hipfort `,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0 - :doc:`hipRAND `,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16 - :doc:`hipSOLVER `,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0 - :doc:`hipSPARSE `,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0 - :doc:`hipSPARSELt `,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0 - :doc:`rocALUTION `,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3 - :doc:`rocBLAS `,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0 - :doc:`rocFFT `,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23 - :doc:`rocRAND `,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17 - :doc:`rocSOLVER `,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0 - :doc:`rocSPARSE `,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2 - :doc:`rocWMMA `,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0 - :doc:`Tensile `,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0 - ,,,,,,,,,, - PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,, - :doc:`hipCUB `,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0 - :doc:`hipTensor `,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0 - :doc:`rocPRIM `,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0 - :doc:`rocThrust `,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0 - ,,,,,,,,,, - SUPPORT LIBS,,,,,,,,,, - `hipother `_,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830 - `rocm-core `_,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0 - `ROCT-Thunk-Interface `_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245 - ,,,,,,,,,, - SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,, - :doc:`AMD SMI `,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2 - :doc:`ROCm Data Center Tool `,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0 - :doc:`rocminfo `,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0 - :doc:`ROCm SMI `,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0 - :doc:`ROCm Validation Suite `,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000 - ,,,,,,,,,, - PERFORMANCE TOOLS,,,,,,,,,, - :doc:`ROCm Bandwidth Test `,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0 - :doc:`ROCm Compute Profiler `,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A - :doc:`ROCm Systems Profiler `,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A - :doc:`ROCProfiler `,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000 - :doc:`ROCprofiler-SDK `,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A - :doc:`ROCTracer `,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000 - ,,,,,,,,,, - DEVELOPMENT TOOLS,,,,,,,,,, - :doc:`HIPIFY `,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483 - :doc:`ROCm CMake `,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0 - :doc:`ROCdbgapi `,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0 - :doc:`ROCm Debugger (ROCgdb) `,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0 - `rocprofiler-register `_,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,N/A,N/A - :doc:`ROCr Debug Agent `,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3 - ,,,,,,,,,, - COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,, - `clang-ocl `_,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0 - :doc:`hipCC `,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0 - `Flang `_,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483 - :doc:`llvm-project `,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483 - `OpenMP `_,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483 - ,,,,,,,,,, - RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,, - :doc:`AMD CLR `,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830 - :doc:`HIP `,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830 - `OpenCL Runtime `_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0 - :doc:`ROCr Runtime `,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0 + Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x","6.2.x, 6.1.x, 6.0.x, 5.7.x, 5.6.x" + ,,,,,,,,,,, + ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix-past-60:,,,,,,,,,, + :doc:`Composable Kernel `,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0,1.1.0 + :doc:`MIGraphX `,2.11.0,2.11.0,2.10.0,2.10.0,2.10.0,2.10.0,2.9.0,2.9.0,2.9.0,2.8.0,2.8.0 + :doc:`MIOpen `,3.3.0,3.3.0,3.2.0,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0 + :doc:`MIVisionX `,3.1.0,3.1.0,3.0.0,3.0.0,3.0.0,3.0.0,2.5.0,2.5.0,2.5.0,2.5.0,2.5.0 + :doc:`rocAL `,2.1.0,2.1.0,2.0.0,2.0.0,2.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0 + :doc:`rocDecode `,0.8.0,0.8.0,0.6.0,0.6.0,0.6.0,0.6.0,0.6.0,0.5.0,0.5.0,N/A,N/A + :doc:`rocJPEG `,0.6.0,0.6.0,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A + :doc:`rocPyDecode `,0.2.0,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0,N/A,N/A,N/A,N/A,N/A + :doc:`RPP `,1.9.1,1.9.1,1.8.0,1.8.0,1.8.0,1.8.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0 + ,,,,,,,,,,, + COMMUNICATION,.. _commlibs-support-compatibility-matrix-past-60:,,,,,,,,,, + :doc:`RCCL `,2.21.5,2.21.5,2.20.5,2.20.5,2.20.5,2.20.5,2.18.6,2.18.6,2.18.6,2.18.3,2.18.3 + ,,,,,,,,,,, + MATH LIBS,.. _mathlibs-support-compatibility-matrix-past-60:,,,,,,,,,, + `half `_ ,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0,1.12.0 + :doc:`hipBLAS `,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.0,2.1.0,2.1.0,2.0.0,2.0.0 + :doc:`hipBLASLt `,0.10.0,0.10.0,0.8.0,0.8.0,0.8.0,0.8.0,0.7.0,0.7.0,0.7.0,0.6.0,0.6.0 + :doc:`hipFFT `,1.0.17,1.0.17,1.0.16,1.0.15,1.0.15,1.0.14,1.0.14,1.0.14,1.0.14,1.0.13,1.0.13 + :doc:`hipfort `,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0 + :doc:`hipRAND `,2.11.1,2.11.0,2.11.1,2.11.0,2.11.0,2.11.0,2.10.16,2.10.16,2.10.16,2.10.16,2.10.16 +,,,,,,,,,,, + :doc:`hipSOLVER `,2.3.0,2.3.0,2.2.0,2.2.0,2.2.0,2.2.0,2.1.1,2.1.1,2.1.0,2.0.0,2.0.0 + :doc:`hipSPARSE `,3.1.2,3.1.2,3.1.1,3.1.1,3.1.1,3.1.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0 + :doc:`hipSPARSELt `,0.2.2,0.2.2,0.2.1,0.2.1,0.2.1,0.2.1,0.2.0,0.1.0,0.1.0,0.1.0,0.1.0 + :doc:`rocALUTION `,3.2.1,3.2.1,3.2.1,3.2.0,3.2.0,3.2.0,3.1.1,3.1.1,3.1.1,3.0.3,3.0.3 + :doc:`rocBLAS `,4.3.0,4.3.0,4.2.4,4.2.1,4.2.1,4.2.0,4.1.2,4.1.0,4.1.0,4.0.0,4.0.0 + :doc:`rocFFT `,1.0.31,1.0.31,1.0.30,1.0.29,1.0.29,1.0.28,1.0.27,1.0.27,1.0.26,1.0.25,1.0.23 + :doc:`rocRAND `,3.2.0,3.2.0,3.1.1,3.1.0,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.0,2.10.17 + :doc:`rocSOLVER `,3.27.0,3.27.0,3.26.2,3.26.0,3.26.0,3.26.0,3.25.0,3.25.0,3.25.0,3.24.0,3.24.0 + :doc:`rocSPARSE `,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.2,3.1.2,3.1.2,3.0.2,3.0.2 + :doc:`rocWMMA `,1.6.0,1.6.0,1.5.0,1.5.0,1.5.0,1.5.0,1.4.0,1.4.0,1.4.0,1.3.0,1.3.0 + :doc:`Tensile `,4.42.0,4.42.0,4.41.0,4.41.0,4.41.0,4.41.0,4.40.0,4.40.0,4.40.0,4.39.0,4.39.0 + ,,,,,,,,,,, + PRIMITIVES,.. _primitivelibs-support-compatibility-matrix-past-60:,,,,,,,,,, + :doc:`hipCUB `,3.3.0,3.3.0,3.2.1,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0 + :doc:`hipTensor `,1.4.0,1.4.0,1.3.0,1.3.0,1.3.0,1.3.0,1.2.0,1.2.0,1.2.0,1.1.0,1.1.0 + :doc:`rocPRIM `,3.3.0,3.3.0,3.2.2,3.2.0,3.2.0,3.2.0,3.1.0,3.1.0,3.1.0,3.0.0,3.0.0 + :doc:`rocThrust `,3.3.0,3.3.0,3.1.1,3.1.0,3.1.0,3.0.1,3.0.1,3.0.1,3.0.1,3.0.0,3.0.0 + ,,,,,,,,,,, + SUPPORT LIBS,,,,,,,,,,, + `hipother `_,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830 +,,,,,,,,,,, + `rocm-core `_,6.3.1,6.3.0,6.2.4,6.2.2,6.2.1,6.2.0,6.1.2,6.1.1,6.1.0,6.0.2,6.0.0 + `ROCT-Thunk-Interface `_,N/A [#ROCT-rocr-past-60]_,N/A [#ROCT-rocr-past-60]_,20240607.5.7,20240607.5.7,20240607.4.05,20240607.1.4246,20240125.5.08,20240125.5.08,20240125.3.30,20231016.2.245,20231016.2.245 + ,,,,,,,,,,, + SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix-past-60:,,,,,,,,,, + :doc:`AMD SMI `,24.7.1,24.7.1,24.6.3,24.6.3,24.6.3,24.6.2,24.5.1,24.5.1,24.4.1,23.4.2,23.4.2 + :doc:`ROCm Data Center Tool `,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0,0.3.0 + :doc:`rocminfo `,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0 + :doc:`ROCm SMI `,7.4.0,7.4.0,7.3.0,7.3.0,7.3.0,7.3.0,7.2.0,7.0.0,7.0.0,6.0.2,6.0.0 + :doc:`ROCm Validation Suite `,1.1.0,1.1.0,1.0.60204,1.0.60202,1.0.60201,1.0.60200,1.0.60102,1.0.60101,1.0.60100,1.0.60002,1.0.60000 + ,,,,,,,,,,, + PERFORMANCE TOOLS,,,,,,,,,,, + :doc:`ROCm Bandwidth Test `,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0,1.4.0 + :doc:`ROCm Compute Profiler `,3.0.0,3.0.0,2.0.1,2.0.1,2.0.1,2.0.1,N/A,N/A,N/A,N/A,N/A + :doc:`ROCm Systems Profiler `,0.1.0,0.1.0,1.11.2,1.11.2,1.11.2,1.11.2,N/A,N/A,N/A,N/A,N/A + :doc:`ROCProfiler `,2.0.60301,2.0.60300,2.0.60204,2.0.60202,2.0.60201,2.0.60200,2.0.60102,2.0.60101,2.0.60100,2.0.60002,2.0.60000 +,,,,,,,,,,, + :doc:`ROCprofiler-SDK `,0.5.0,0.5.0,0.4.0,0.4.0,0.4.0,0.4.0,N/A,N/A,N/A,N/A,N/A + :doc:`ROCTracer `,4.1.60301,4.1.60300,4.1.60204,4.1.60202,4.1.60201,4.1.60200,4.1.60102,4.1.60101,4.1.60100,4.1.60002,4.1.60000 +,,,,,,,,,,, + ,,,,,,,,,,, + DEVELOPMENT TOOLS,,,,,,,,,,, + :doc:`HIPIFY `,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483 +,,,,,,,,,,, + :doc:`ROCm CMake `,0.14.0,0.14.0,0.13.0,0.13.0,0.13.0,0.13.0,0.12.0,0.12.0,0.12.0,0.11.0,0.11.0 + :doc:`ROCdbgapi `,0.77.0,0.77.0,0.76.0,0.76.0,0.76.0,0.76.0,0.71.0,0.71.0,0.71.0,0.71.0,0.71.0 + :doc:`ROCm Debugger (ROCgdb) `,15.2.0,15.2.0,14.2.0,14.2.0,14.2.0,14.2.0,14.1.0,14.1.0,14.1.0,13.2.0,13.2.0 + `rocprofiler-register `_,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.4.0,0.3.0,0.3.0,0.3.0,N/A,N/A + :doc:`ROCr Debug Agent `,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3,2.0.3 + ,,,,,,,,,,, + COMPILERS,.. _compilers-support-compatibility-matrix-past-60:,,,,,,,,,, + `clang-ocl `_,N/A,N/A,N/A,N/A,N/A,N/A,0.5.0,0.5.0,0.5.0,0.5.0,0.5.0 + :doc:`hipCC `,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.1.1,1.0.0,1.0.0,1.0.0,1.0.0,1.0.0 + `Flang `_,18.0.0.24491,18.0.0.24455,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483 + :doc:`llvm-project `,18.0.0.24455,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483 + `OpenMP `_,18.0.0.24455,18.0.0.24491,18.0.0.24392,18.0.0.24355,18.0.0.24355,18.0.0.24232,17.0.0.24193,17.0.0.24154,17.0.0.24103,17.0.0.24012,17.0.0.23483 +,,,,,,,,,,, + ,,,,,,,,,,, + RUNTIMES,.. _runtime-support-compatibility-matrix-past-60:,,,,,,,,,, + :doc:`AMD CLR `,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830 +,,,,,,,,,,, + :doc:`HIP `,6.3.42133,6.3.42131,6.2.41134,6.2.41134,6.2.41134,6.2.41133,6.1.40093,6.1.40092,6.1.40091,6.1.32831,6.1.32830 +,,,,,,,,,,, + `OpenCL Runtime `_,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0,2.0.0 + :doc:`ROCr Runtime `,1.14.0,1.14.0,1.14.0,1.14.0,1.14.0,1.13.0,1.13.0,1.13.0,1.13.0,1.12.0,1.12.0 diff --git a/docs/compatibility/compatibility-matrix.rst b/docs/compatibility/compatibility-matrix.rst index b940a0744f..ae157874f9 100644 --- a/docs/compatibility/compatibility-matrix.rst +++ b/docs/compatibility/compatibility-matrix.rst @@ -23,17 +23,16 @@ compatibility and system requirements. .. container:: format-big-table .. csv-table:: - :header: "ROCm Version", "6.3.0", "6.2.4", "6.1.0" + :header: "ROCm Version", "6.3.1", "6.3.0", "6.2.0" :stub-columns: 1 - :ref:`Operating systems & kernels `,Ubuntu 24.04.2,"Ubuntu 24.04.1, 24.04", - ,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4","Ubuntu 22.04.4, 22.04.3" - ,,,"Ubuntu 20.04.6, 20.04.5" - ,"RHEL 9.5, 9.4","RHEL 9.4, 9.3","RHEL 9.4 [#red-hat94]_, 9.3, 9.2" - ,"RHEL 8.10","RHEL 8.10, 8.9","RHEL 8.9, 8.8" - ,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP5, SP4" - ,,,CentOS 7.9 - ,Oracle Linux 8.10 [#oracle89]_,Oracle Linux 8.9 [#oracle89]_, + :ref:`Operating systems & kernels `,Ubuntu 24.04.2,Ubuntu 24.04.2,Ubuntu 24.04 + ,Ubuntu 22.04.5,Ubuntu 22.04.5,"Ubuntu 22.04.5, 22.04.4" + ,"RHEL 9.5, 9.4","RHEL 9.5, 9.4","RHEL 9.4, 9.3" + ,RHEL 8.10,RHEL 8.10,"RHEL 8.10, 8.9" + ,"SLES 15 SP6, SP5","SLES 15 SP6, SP5","SLES 15 SP6, SP5" + ,Oracle Linux 8.10 [#mi300x]_,Oracle Linux 8.10 [#mi300x]_,Oracle Linux 8.9 [#mi300x]_ + ,Debian 12 [#mi300x]_,, ,.. _architecture-support-compatibility-matrix:,, :doc:`Architecture `,CDNA3,CDNA3,CDNA3 ,CDNA2,CDNA2,CDNA2 @@ -43,115 +42,114 @@ compatibility and system requirements. ,.. _gpu-support-compatibility-matrix:,, :doc:`GPU / LLVM target `,gfx1100,gfx1100,gfx1100 ,gfx1030,gfx1030,gfx1030 - ,gfx942,gfx942 [#mi300_624]_, gfx942 [#mi300_610]_ + ,gfx942,gfx942,gfx942 [#mi300_620]_ ,gfx90a,gfx90a,gfx90a ,gfx908,gfx908,gfx908 ,,, FRAMEWORK SUPPORT,.. _framework-support-compatibility-matrix:,, - :doc:`PyTorch `,"2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13","2.1, 2.0, 1.13" - :doc:`TensorFlow `,"2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1","2.15.0, 2.14.0, 2.13.1" - :doc:`JAX `,0.4.26,0.4.26,0.4.26 + :doc:`PyTorch `,"2.4, 2.3, 2.2, 1.13","2.4, 2.3, 2.2, 2.1, 2.0, 1.13","2.3, 2.2, 2.1, 2.0, 1.13" + :doc:`TensorFlow `,"2.17.0, 2.16.2, 2.15.1","2.17.0, 2.16.2, 2.15.1","2.16.1, 2.15.1, 2.14.1" + :doc:`JAX `,0.4.35,0.4.35,0.4.26 `ONNX Runtime `_,1.17.3,1.17.3,1.17.3 ,,, THIRD PARTY COMMS,.. _thirdpartycomms-support-compatibility-matrix:,, `UCC `_,>=1.3.0,>=1.3.0,>=1.3.0 - `UCX `_,>=1.15.0,>=1.15.0,>=1.14.1 + `UCX `_,>=1.15.0,>=1.15.0,>=1.15.0 ,,, THIRD PARTY ALGORITHM,.. _thirdpartyalgorithm-support-compatibility-matrix:,, - Thrust,2.3.2,2.2.0,2.1.0 - CUB,2.3.2,2.2.0,2.1.0 + Thrust,2.3.2,2.3.2,2.2.0 + CUB,2.3.2,2.3.2,2.2.0 ,,, KMD & USER SPACE [#kfd_support]_,.. _kfd-userspace-support-compatibility-matrix:,, - Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x","6.3.x, 6.2.x, 6.1.x, 6.0.x, 5.7.x" + Tested user space versions,"6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x","6.3.x, 6.2.x, 6.1.x, 6.0.x" ,,, ML & COMPUTER VISION,.. _mllibs-support-compatibility-matrix:,, :doc:`Composable Kernel `,1.1.0,1.1.0,1.1.0 - :doc:`MIGraphX `,2.11.0,2.10.0,2.9.0 - :doc:`MIOpen `,3.3.0,3.2.0,3.1.0 - :doc:`MIVisionX `,3.1.0,3.0.0,2.5.0 - :doc:`rocAL `,2.1.0,2.0.0,1.0.0 - :doc:`rocDecode `,0.8.0,0.6.0,0.5.0 - :doc:`rocJPEG `,0.6.0,N/A,N/A - :doc:`rocPyDecode `,0.2.0,0.1.0,N/A - :doc:`RPP `,1.9.1,1.8.0,1.5.0 + :doc:`MIGraphX `,2.11.0,2.11.0,2.10.0 + :doc:`MIOpen `,3.3.0,3.3.0,3.2.0 + :doc:`MIVisionX `,3.1.0,3.1.0,3.0.0 + :doc:`rocAL `,2.1.0,2.1.0,1.0.0 + :doc:`rocDecode `,0.8.0,0.8.0,0.6.0 + :doc:`rocJPEG `,0.6.0,0.6.0,N/A + :doc:`rocPyDecode `,0.2.0,0.2.0,0.1.0 + :doc:`RPP `,1.9.1,1.9.1,1.8.0 ,,, COMMUNICATION,.. _commlibs-support-compatibility-matrix:,, - :doc:`RCCL `,2.21.5,2.20.5,2.18.6 + :doc:`RCCL `,2.21.5,2.21.5,2.20.5 ,,, MATH LIBS,.. _mathlibs-support-compatibility-matrix:,, `half `_ ,1.12.0,1.12.0,1.12.0 - :doc:`hipBLAS `,2.3.0,2.2.0,2.1.0 - :doc:`hipBLASLt `,0.10.0,0.8.0,0.7.0 - :doc:`hipFFT `,1.0.17,1.0.16,1.0.14 - :doc:`hipfort `,0.5.0,0.4.0,0.4.0 - :doc:`hipRAND `,2.11.0,2.11.1,2.10.16 - :doc:`hipSOLVER `,2.3.0,2.2.0,2.1.0 - :doc:`hipSPARSE `,3.1.2,3.1.1,3.0.1 - :doc:`hipSPARSELt `,0.2.2,0.2.1,0.1.0 - :doc:`rocALUTION `,3.2.1,3.2.1,3.1.1 - :doc:`rocBLAS `,4.3.0,4.2.4,4.1.0 - :doc:`rocFFT `,1.0.31,1.0.30,1.0.26 - :doc:`rocRAND `,3.2.0,3.1.1,3.0.1 - :doc:`rocSOLVER `,3.27.0,3.26.2,3.25.0 - :doc:`rocSPARSE `,3.3.0,3.2.1,3.1.2 - :doc:`rocWMMA `,1.6.0,1.5.0,1.4.0 - :doc:`Tensile `,4.42.0,4.41.0,4.40.0 + :doc:`hipBLAS `,2.3.0,2.3.0,2.2.0 + :doc:`hipBLASLt `,0.10.0,0.10.0,0.8.0 + :doc:`hipFFT `,1.0.17,1.0.17,1.0.14 + :doc:`hipfort `,0.5.0,0.5.0,0.4.0 + :doc:`hipRAND `,2.11.1,2.11.0,2.11.0 + :doc:`hipSOLVER `,2.3.0,2.3.0,2.2.0 + :doc:`hipSPARSE `,3.1.2,3.1.2,3.1.1 + :doc:`hipSPARSELt `,0.2.2,0.2.2,0.2.1 + :doc:`rocALUTION `,3.2.1,3.2.1,3.2.0 + :doc:`rocBLAS `,4.3.0,4.3.0,4.2.0 + :doc:`rocFFT `,1.0.31,1.0.31,1.0.28 + :doc:`rocRAND `,3.2.0,3.2.0,3.1.0 + :doc:`rocSOLVER `,3.27.0,3.27.0,3.26.0 + :doc:`rocSPARSE `,3.3.0,3.3.0,3.2.0 + :doc:`rocWMMA `,1.6.0,1.6.0,1.5.0 + :doc:`Tensile `,4.42.0,4.42.0,4.41.0 ,,, PRIMITIVES,.. _primitivelibs-support-compatibility-matrix:,, - :doc:`hipCUB `,3.3.0,3.2.1,3.1.0 - :doc:`hipTensor `,1.4.0,1.3.0,1.2.0 - :doc:`rocPRIM `,3.3.0,3.2.2,3.1.0 - :doc:`rocThrust `,3.3.0,3.1.1,3.0.1 + :doc:`hipCUB `,3.3.0,3.3.0,3.2.0 + :doc:`hipTensor `,1.4.0,1.4.0,1.3.0 + :doc:`rocPRIM `,3.3.0,3.3.0,3.2.0 + :doc:`rocThrust `,3.3.0,3.3.0,3.0.1 ,,, SUPPORT LIBS,,, - `hipother `_,6.3.42131,6.2.41134,6.1.40091 - `rocm-core `_,6.3.0,6.2.4,6.1.0 - `ROCT-Thunk-Interface `_,N/A [#ROCT-rocr]_,20240607.5.7,20240125.3.30 + `hipother `_,6.3.42133,6.3.42131,6.2.41133 + `rocm-core `_,6.3.1,6.3.0,6.2.0 + `ROCT-Thunk-Interface `_,N/A [#ROCT-rocr]_,N/A [#ROCT-rocr]_,20240607.1.4246 ,,, SYSTEM MGMT TOOLS,.. _tools-support-compatibility-matrix:,, - :doc:`AMD SMI `,24.7.1,24.6.3,24.4.1 + :doc:`AMD SMI `,24.7.1,24.7.1,24.6.2 :doc:`ROCm Data Center Tool `,0.3.0,0.3.0,0.3.0 :doc:`rocminfo `,1.0.0,1.0.0,1.0.0 - :doc:`ROCm SMI `,7.4.0,7.3.0,7.0.0 - :doc:`ROCm Validation Suite `,1.1.0,1.0.60204,1.0.60100 + :doc:`ROCm SMI `,7.4.0,7.4.0,7.3.0 + :doc:`ROCm Validation Suite `,1.1.0,1.1.0,1.0.60200 ,,, PERFORMANCE TOOLS,,, :doc:`ROCm Bandwidth Test `,1.4.0,1.4.0,1.4.0 - :doc:`ROCm Compute Profiler `,3.0.0,2.0.1,N/A - :doc:`ROCm Systems Profiler `,0.1.0,1.11.2,N/A - :doc:`ROCProfiler `,2.0.60300,2.0.60204,2.0.60100 - :doc:`ROCprofiler-SDK `,0.5.0,0.4.0,N/A - :doc:`ROCTracer `,4.1.60300,4.1.60204,4.1.60100 + :doc:`ROCm Compute Profiler `,3.0.0,3.0.0,2.0.1 + :doc:`ROCm Systems Profiler `,0.1.0,0.1.0,1.11.2 + :doc:`ROCProfiler `,2.0.60301,2.0.60300,2.0.60200 + :doc:`ROCprofiler-SDK `,0.5.0,0.5.0,0.4.0 + :doc:`ROCTracer `,4.1.60301,4.1.60300,4.1.60200 ,,, DEVELOPMENT TOOLS,,, - :doc:`HIPIFY `,18.0.0.24455,18.0.0.24392,17.0.0.24103 - :doc:`ROCm CMake `,0.14.0,0.13.0,0.12.0 - :doc:`ROCdbgapi `,0.77.0,0.76.0,0.71.0 - :doc:`ROCm Debugger (ROCgdb) `,15.2.0,14.2.0,14.1.0 - `rocprofiler-register `_,0.4.0,0.4.0,0.3.0 + :doc:`HIPIFY `,18.0.0.24491,18.0.0.24455,18.0.0.24232 + :doc:`ROCm CMake `,0.14.0,0.14.0,0.13.0 + :doc:`ROCdbgapi `,0.77.0,0.77.0,0.76.0 + :doc:`ROCm Debugger (ROCgdb) `,15.2.0,15.2.0,14.2.0 + `rocprofiler-register `_,0.4.0,0.4.0,0.4.0 :doc:`ROCr Debug Agent `,2.0.3,2.0.3,2.0.3 ,,, COMPILERS,.. _compilers-support-compatibility-matrix:,, - `clang-ocl `_,N/A,N/A,0.5.0 - :doc:`hipCC `,1.1.1,1.1.1,1.0.0 - `Flang `_,18.0.0.24455,18.0.0.24392,17.0.0.24103 - :doc:`llvm-project `,18.0.0.24455,18.0.0.24392,17.0.0.24103 - `OpenMP `_,18.0.0.24455,18.0.0.24392,17.0.0.24103 + `clang-ocl `_,N/A,N/A,N/A + :doc:`hipCC `,1.1.1,1.1.1,1.1.1 + `Flang `_,18.0.0.24491,18.0.0.24455,18.0.0.24232 + :doc:`llvm-project `,18.0.0.24491,18.0.0.24455,18.0.0.24232 + `OpenMP `_,18.0.0.24491,18.0.0.24455,18.0.0.24232 ,,, RUNTIMES,.. _runtime-support-compatibility-matrix:,, - :doc:`AMD CLR `,6.3.42131,6.2.41134,6.1.40091 - :doc:`HIP `,6.3.42131,6.2.41134,6.1.40091 + :doc:`AMD CLR `,6.3.42133,6.3.42131,6.2.41133 + :doc:`HIP `,6.3.42133,6.3.42131,6.2.41133 `OpenCL Runtime `_,2.0.0,2.0.0,2.0.0 :doc:`ROCr Runtime `,1.14.0,1.14.0,1.13.0 + .. rubric:: Footnotes -.. [#red-hat94] RHEL 9.4 is supported only on AMD Instinct MI300A. -.. [#oracle89] Oracle Linux is supported only on AMD Instinct MI300X. -.. [#mi300_624] **For ROCm 6.2.4** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE]. -.. [#mi300_610] **For ROCm 6.1.0** - MI300A (gfx942) is supported on Ubuntu 22.04.4, RHEL 9.4, RHEL 9.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.4. +.. [#mi300x] Oracle Linux and Debian are supported only on AMD Instinct MI300X. +.. [#mi300_620] **For ROCm 6.2.0** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE]. .. [#kfd_support] ROCm provides forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported. -.. [#ROCT-rocr] As of ROCm 6.3.0, the ROCT Thunk Interface is now included as part of the ROCr runtime package. +.. [#ROCT-rocr] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package. .. _OS-kernel-versions: @@ -166,35 +164,25 @@ Use this lookup table to confirm which operating system and kernel versions are :stub-columns: 1 `Ubuntu `_, 24.04.2, "6.8 GA, 6.11 HWE" - , 24.04.1, "6.8 GA" , 24.04, "6.8 GA" ,, `Ubuntu `_, 22.04.5, "5.15 GA, 6.8 HWE" , 22.04.4, "5.15 GA, 6.5 HWE" - , 22.04.3, "5.15 GA, 6.2 HWE" - , 22.04.2, "5.15 GA, 5.19 HWE" - ,, - `Ubuntu `_, 20.04.06, "5.15 HWE" - , 20.04.5, "5.15 HWE" ,, `Red Hat Enterprise Linux (RHEL) `_, 9.5, 5.14.0 ,9.4, 5.14.0 ,9.3, 5.14.0 - ,9.2, 5.14.0 ,, `Red Hat Enterprise Linux (RHEL) `_, 8.10, 4.18.0 ,8.9, 4.18.0 - ,8.8, 4.18.0 - ,, - `CentOS `_, 7.9, 3.10 ,, `SUSE Linux Enterprise Server (SLES) `_, 15 SP6, 6.4.0 ,15 SP5, 5.14.21 - ,15 SP4, 5.14.21 ,, `Oracle Linux `_, 8.10, 5.15.0 ,8.9, 5.15.0 - `Azure Linux `_, 3.0, 6.6.60 + ,, + `Debian `_,12, 6.1 .. Footnotes and ref anchors in below historical tables should be appended with "-past-60", to differentiate from the @@ -222,7 +210,7 @@ Expand for full historical view of: .. rubric:: Footnotes - .. [#oracle89-past-60] Oracle Linux is supported only on AMD Instinct MI300X. + .. [#mic300x-past-60] Oracle Linux and Debian are supported only on AMD Instinct MI300X. .. [#mi300_624-past-60] **For ROCm 6.2.4** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE]. .. [#mi300_622-past-60] **For ROCm 6.2.2** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE]. .. [#mi300_621-past-60] **For ROCm 6.2.1** - MI300X (gfx942) is supported on listed operating systems *except* Ubuntu 22.04.5 [6.8 HWE] and Ubuntu 22.04.4 [6.5 HWE]. @@ -233,4 +221,4 @@ Expand for full historical view of: .. [#mi300_602-past-60] **For ROCm 6.0.2** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3. .. [#mi300_600-past-60] **For ROCm 6.0.0** - MI300A (gfx942) is supported on Ubuntu 22.04.3, RHEL 8.9, and SLES 15 SP5. MI300X (gfx942) is only supported on Ubuntu 22.04.3. .. [#kfd_support-past-60] ROCm provides forward and backward compatibility between the AMD Kernel-mode GPU Driver (KMD) and its user space software for +/- 2 releases. These are the compatibility combinations that are currently supported. - .. [#ROCT-rocr-past-60] As of ROCm 6.3.0, the ROCT Thunk Interface is now included as part of the ROCr runtime package. + .. [#ROCT-rocr-past-60] Starting from ROCm 6.3.0, the ROCT Thunk Interface is included as part of the ROCr runtime package. diff --git a/docs/conceptual/More-about-how-ROCm-uses-PCIe-Atomics.rst b/docs/conceptual/More-about-how-ROCm-uses-PCIe-Atomics.rst deleted file mode 100644 index 6ad57956a6..0000000000 --- a/docs/conceptual/More-about-how-ROCm-uses-PCIe-Atomics.rst +++ /dev/null @@ -1,156 +0,0 @@ -.. meta:: - :description: How ROCm uses PCIe atomics - :keywords: PCIe, PCIe atomics, atomics, BAR memory, AMD, ROCm - -***************************************************************************** -How ROCm uses PCIe atomics -***************************************************************************** - -ROCm PCIe feature and overview of BAR memory -================================================================ - -ROCm is an extension of HSA platform architecture, so it shares the queuing model, memory model, -signaling and synchronization protocols. Platform atomics are integral to perform queuing and -signaling memory operations where there may be multiple-writers across CPU and GPU agents. - -The full list of HSA system architecture platform requirements are here: -`HSA Sys Arch Features `_. - -AMD ROCm Software uses the new PCI Express 3.0 (Peripheral Component Interconnect Express [PCIe] -3.0) features for atomic read-modify-write transactions which extends inter-processor synchronization -mechanisms to IO to support the defined set of HSA capabilities needed for queuing and signaling -memory operations. - -The new PCIe atomic operations operate as completers for ``CAS`` (Compare and Swap), ``FetchADD``, -``SWAP`` atomics. The atomic operations are initiated by the I/O device which support 32-bit, 64-bit and -128-bit operand which target address have to be naturally aligned to operation sizes. - -For ROCm the Platform atomics are used in ROCm in the following ways: - - * Update HSA queue's read_dispatch_id: 64 bit atomic add used by the command processor on the - GPU agent to update the packet ID it processed. - * Update HSA queue's write_dispatch_id: 64 bit atomic add used by the CPU and GPU agent to - support multi-writer queue insertions. - * Update HSA Signals -- 64bit atomic ops are used for CPU & GPU synchronization. - -The PCIe 3.0 atomic operations feature allows atomic transactions to be requested by, routed through -and completed by PCIe components. Routing and completion does not require software support. -Component support for each is detectable via the Device Capabilities 2 (DevCap2) register. Upstream -bridges need to have atomic operations routing enabled or the atomic operations will fail even though -PCIe endpoint and PCIe I/O devices has the capability to atomic operations. - -To do atomic operations routing capability between two or more Root Ports, each associated Root Port -must indicate that capability via the atomic operations routing supported bit in the DevCap2 register. - -If your system has a PCIe Express Switch it needs to support atomic operations routing. Atomic -operations requests are permitted only if a component's ``DEVCTL2.ATOMICOP_REQUESTER_ENABLE`` -field is set. These requests can only be serviced if the upstream components support atomic operation -completion and/or routing to a component which does. Atomic operations routing support=1, routing -is supported; atomic operations routing support=0, routing is not supported. - -An atomic operation is a non-posted transaction supporting 32-bit and 64-bit address formats, there -must be a response for Completion containing the result of the operation. Errors associated with the -operation (uncorrectable error accessing the target location or carrying out the atomic operation) are -signaled to the requester by setting the Completion Status field in the completion descriptor, they are -set to to Completer Abort (CA) or Unsupported Request (UR). - -To understand more about how PCIe atomic operations work, see -`PCIe atomics `_ - -`Linux Kernel Patch to pci_enable_atomic_request `_ - -There are also a number of papers which talk about these new capabilities: - - * `Atomic Read Modify Write Primitives by Intel `_ - * `PCI express 3 Accelerator White paper by Intel `_ - * `PCIe Generation 4 Base Specification includes atomic operations `_ - * `Xilinx PCIe Ultrascale White paper `_ - -Other I/O devices with PCIe atomics support: - - * Mellanox ConnectX-5 InfiniBand Card - * Cray Aries Interconnect - * Xilinx 7 Series Devices - -Future bus technology with richer I/O atomics operation Support - - * GenZ - -New PCIe Endpoints with support beyond AMD Ryzen and EPYC CPU; Intel Haswell or newer CPUs -with PCIe Generation 3.0 support. - - * Mellanox Bluefield SOC - * Cavium Thunder X2 - -In ROCm, we also take advantage of PCIe ID based ordering technology for P2P when the GPU -originates two writes to two different targets: - -* Write to another GPU memory -* Write to system memory to indicate transfer complete - -They are routed off to different ends of the computer but we want to make sure the write to system -memory to indicate transfer complete occurs AFTER P2P write to GPU has complete. - -BAR memory overview ----------------------------------------------------------------------------------------------------- -On a Xeon E5 based system in the BIOS we can turn on above 4GB PCIe addressing, if so he need to set -memory-mapped input/output (MMIO) base address (MMIOH base) and range (MMIO high size) in the BIOS. - -In the Supermicro system in the system bios you need to see the following - - * Advanced->PCIe/PCI/PnP configuration-\> Above 4G Decoding = Enabled - * Advanced->PCIe/PCI/PnP Configuration-\>MMIOH Base = 512G - * Advanced->PCIe/PCI/PnP Configuration-\>MMIO High Size = 256G - -When we support Large Bar Capability there is a Large Bar VBIOS which also disable the IO bar. - -For GFX9 and Vega10 which have Physical Address up 44 bit and 48 bit Virtual address. - - * BAR0-1 registers: 64bit, prefetchable, GPU memory. 8GB or 16GB depending on Vega10 SKU. Must - be placed < 2^44 to support P2P access from other Vega10. - * BAR2-3 registers: 64bit, prefetchable, Doorbell. Must be placed \< 2^44 to support P2P access from - other Vega10. - * BAR4 register: Optional, not a boot device. - * BAR5 register: 32bit, non-prefetchable, MMIO. Must be placed \< 4GB. - -Here is how our base address register (BAR) works on GFX 8 GPUs with 40 bit Physical Address Limit :: - - 11:00.0 Display controller: Advanced Micro Devices, Inc. [AMD/ATI] Fiji [Radeon R9 FURY / NANO - Series] (rev c1) - - Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Device 0b35 - - Flags: bus master, fast devsel, latency 0, IRQ 119 - - Memory at bf40000000 (64-bit, prefetchable) [size=256M] - - Memory at bf50000000 (64-bit, prefetchable) [size=2M] - - I/O ports at 3000 [size=256] - - Memory at c7400000 (32-bit, non-prefetchable) [size=256K] - - Expansion ROM at c7440000 [disabled] [size=128K] - -Legend: - -1 : GPU Frame Buffer BAR -- In this example it happens to be 256M, but typically this will be size of the -GPU memory (typically 4GB+). This BAR has to be placed \< 2^40 to allow peer-to-peer access from -other GFX8 AMD GPUs. For GFX9 (Vega GPU) the BAR has to be placed \< 2^44 to allow peer-to-peer -access from other GFX9 AMD GPUs. - -2 : Doorbell BAR -- The size of the BAR is typically will be \< 10MB (currently fixed at 2MB) for this -generation GPUs. This BAR has to be placed \< 2^40 to allow peer-to-peer access from other current -generation AMD GPUs. - -3 : IO BAR -- This is for legacy VGA and boot device support, but since this the GPUs in this project are -not VGA devices (headless), this is not a concern even if the SBIOS does not setup. - -4 : MMIO BAR -- This is required for the AMD Driver SW to access the configuration registers. Since the -reminder of the BAR available is only 1 DWORD (32bit), this is placed \< 4GB. This is fixed at 256KB. - -5 : Expansion ROM -- This is required for the AMD Driver SW to access the GPU video-bios. This is -currently fixed at 128KB. - -For more information, you can review -`Overview of Changes to PCI Express 3.0 `_. diff --git a/docs/conceptual/gpu-memory.md b/docs/conceptual/gpu-memory.md deleted file mode 100644 index a167513dfa..0000000000 --- a/docs/conceptual/gpu-memory.md +++ /dev/null @@ -1,241 +0,0 @@ - - - - - - -# GPU memory - -For the HIP reference documentation, see: - -* {doc}`hip:doxygen/html/group___memory` -* {doc}`hip:doxygen/html/group___memory_m` - -Host memory exists on the host (e.g. CPU) of the machine in random access memory (RAM). - -Device memory exists on the device (e.g. GPU) of the machine in video random access memory (VRAM). -Recent architectures use graphics double data rate (GDDR) synchronous dynamic random-access memory (SDRAM)such as GDDR6, or high-bandwidth memory (HBM) such as HBM2e. - -## Memory allocation - -Memory can be allocated in two ways: pageable memory, and pinned memory. -The following API calls with result in these allocations: - -| API | Data location | Allocation | -|--------------------|---------------|------------| -| System allocated | Host | Pageable | -| `hipMallocManaged` | Host | Managed | -| `hipHostMalloc` | Host | Pinned | -| `hipMalloc` | Device | Pinned | - -:::{tip} -`hipMalloc` and `hipFree` are blocking calls, however, HIP recently added non-blocking versions `hipMallocAsync` and `hipFreeAsync` which take in a stream as an additional argument. -::: - -### Pageable memory - -Pageable memory is usually gotten when calling `malloc` or `new` in a C++ application. -It is unique in that it exists on "pages" (blocks of memory), which can be migrated to other memory storage. -For example, migrating memory between CPU sockets on a motherboard, or a system that runs out of space in RAM and starts dumping pages of RAM into the swap partition of your hard drive. - -### Pinned memory - -Pinned memory (or page-locked memory, or non-pageable memory) is host memory that is mapped into the address space of all GPUs, meaning that the pointer can be used on both host and device. -Accessing host-resident pinned memory in device kernels is generally not recommended for performance, as it can force the data to traverse the host-device interconnect (e.g. PCIe), which is much slower than the on-device bandwidth (>40x on MI200). - -Pinned host memory can be allocated with one of two types of coherence support: - -:::{note} -In HIP, pinned memory allocations are coherent by default (`hipHostMallocDefault`). -There are additional pinned memory flags (e.g. `hipHostMallocMapped` and `hipHostMallocPortable`). -On MI200 these options do not impact performance. - -For more information, see the section *memory allocation flags* in the HIP Programming Guide: {doc}`hip:how-to/programming_manual`. -::: - -Much like how a process can be locked to a CPU core by setting affinity, a pinned memory allocator does this with the memory storage system. -On multi-socket systems it is important to ensure that pinned memory is located on the same socket as the owning process, or else each cache line will be moved through the CPU-CPU interconnect, thereby increasing latency and potentially decreasing bandwidth. - -In practice, pinned memory is used to improve transfer times between host and device. -For transfer operations, such as `hipMemcpy` or `hipMemcpyAsync`, using pinned memory instead of pageable memory on host can lead to a ~3x improvement in bandwidth. - -:::{tip} -If the application needs to move data back and forth between device and host (separate allocations), use pinned memory on the host side. -::: - -### Managed memory - -Managed memory refers to universally addressable, or unified memory available on the MI200 series of GPUs. -Much like pinned memory, managed memory shares a pointer between host and device and (by default) supports fine-grained coherence, however, managed memory can also automatically migrate pages between host and device. -The allocation will be managed by AMD GPU driver using the Linux HMM (Heterogeneous Memory Management) mechanism. - -If heterogenous memory management (HMM) is not available, then `hipMallocManaged` will default back to using system memory and will act like pinned host memory. -Other managed memory API calls will have undefined behavior. -It is therefore recommended to check for managed memory capability with: `hipDeviceGetAttribute` and `hipDeviceAttributeManagedMemory`. - -HIP supports additional calls that work with page migration: - -* `hipMemAdvise` -* `hipMemPrefetchAsync` - -:::{tip} -If the application needs to use data on both host and device regularly, does not want to deal with separate allocations, and is not worried about maxing out the VRAM on MI200 GPUs (64 GB per GCD), use managed memory. -::: - -:::{tip} -If managed memory performance is poor, check to see if managed memory is supported on your system and if page migration (XNACK) is enabled. -::: - -## Access behavior - -Memory allocations for GPUs behave as follow: - -| API | Data location | Host access | Device access | -|--------------------|---------------|--------------|----------------------| -| System allocated | Host | Local access | Unhandled page fault | -| `hipMallocManaged` | Host | Local access | Zero-copy | -| `hipHostMalloc` | Host | Local access | Zero-copy* | -| `hipMalloc` | Device | Zero-copy | Local access | - -Zero-copy accesses happen over the Infinity Fabric interconnect or PCI-E lanes on discrete GPUs. - -:::{note} -While `hipHostMalloc` allocated memory is accessible by a device, the host pointer must be converted to a device pointer with `hipHostGetDevicePointer`. - -Memory allocated through standard system allocators such as `malloc`, can be accessed a device by registering the memory via `hipHostRegister`. -The device pointer to be used in kernels can be retrieved with `hipHostGetDevicePointer`. -Registered memory is treated like `hipHostMalloc` and will have similar performance. - -On devices that support and have [](#xnack) enabled, such as the MI250X, `hipHostRegister` is not required as memory accesses are handled via automatic page migration. -::: - -### XNACK - -Normally, host and device memory are separate and data has to be transferred manually via `hipMemcpy`. - -On a subset of GPUs, such as the MI200, there is an option to automatically migrate pages of memory between host and device. -This is important for managed memory, where the locality of the data is important for performance. -Depending on the system, page migration may be disabled by default in which case managed memory will act like pinned host memory and suffer degraded performance. - -*XNACK* describes the GPUs ability to retry memory accesses that failed due a page fault (which normally would lead to a memory access error), and instead retrieve the missing page. - -This also affects memory allocated by the system as indicated by the following table: - -| API | Data location | Host after device access | Device after host access | -|--------------------|---------------|--------------------------|--------------------------| -| System allocated | Host | Migrate page to host | Migrate page to device | -| `hipMallocManaged` | Host | Migrate page to host | Migrate page to device | -| `hipHostMalloc` | Host | Local access | Zero-copy | -| `hipMalloc` | Device | Zero-copy | Local access | - -To check if page migration is available on a platform, use `rocminfo`: - -```sh -$ rocminfo | grep xnack - Name: amdgcn-amd-amdhsa--gfx90a:sramecc+:xnack- -``` - -Here, `xnack-` means that XNACK is available but is disabled by default. -Turning on XNACK by setting the environment variable `HSA_XNACK=1` and gives the expected result, `xnack+`: - -```sh -$ HSA_XNACK=1 rocminfo | grep xnack -Name: amdgcn-amd-amdhsa--gfx90a:sramecc+:xnack+ -``` - -`hipcc`by default will generate code that runs correctly with both XNACK enabled or disabled. -Setting the `--offload-arch=`-option with `xnack+` or `xnack-` forces code to be only run with XNACK enabled or disabled respectively. - -```sh -# Compiled kernels will run regardless if XNACK is enabled or is disabled. -hipcc --offload-arch=gfx90a - -# Compiled kernels will only be run if XNACK is enabled with XNACK=1. -hipcc --offload-arch=gfx90a:xnack+ - -# Compiled kernels will only be run if XNACK is disabled with XNACK=0. -hipcc --offload-arch=gfx90a:xnack- -``` - -:::{tip} -If you want to make use of page migration, use managed memory. While pageable memory will migrate correctly, it is not a portable solution and can have performance issues if the accessed data isn't page aligned. -::: - -### Coherence - -* *Coarse-grained coherence* means that memory is only considered up to date at kernel boundaries, which can be enforced through `hipDeviceSynchronize`, `hipStreamSynchronize`, or any blocking operation that acts on the null stream (e.g. `hipMemcpy`). -For example, cacheable memory is a type of coarse-grained memory where an up-to-date copy of the data can be stored elsewhere (e.g. in an L2 cache). -* *Fine-grained coherence* means the coherence is supported while a CPU/GPU kernel is running. -This can be useful if both host and device are operating on the same dataspace using system-scope atomic operations (e.g. updating an error code or flag to a buffer). -Fine-grained memory implies that up-to-date data may be made visible to others regardless of kernel boundaries as discussed above. - -| API | Flag | Coherence | -|-------------------------|------------------------------|----------------| -| `hipHostMalloc` | `hipHostMallocDefault` | Fine-grained | -| `hipHostMalloc` | `hipHostMallocNonCoherent` | Coarse-grained | - -| API | Flag | Coherence | -|-------------------------|------------------------------|----------------| -| `hipExtMallocWithFlags` | `hipDeviceMallocDefault` | Coarse-grained | -| `hipExtMallocWithFlags` | `hipDeviceMallocFinegrained` | Fine-grained | - -| API | `hipMemAdvise` argument | Coherence | -|-------------------------|------------------------------|----------------| -| `hipMallocManaged` | | Fine-grained | -| `hipMallocManaged` | `hipMemAdviseSetCoarseGrain` | Coarse-grained | -| `malloc` | | Fine-grained | -| `malloc` | `hipMemAdviseSetCoarseGrain` | Coarse-grained | - -:::{tip} -Try to design your algorithms to avoid host-device memory coherence (e.g. system scope atomics). While it can be a useful feature in very specific cases, it is not supported on all systems, and can negatively impact performance by introducing the host-device interconnect bottleneck. -::: - -The availability of fine- and coarse-grained memory pools can be checked with `rocminfo`: - -```sh -$ rocminfo -... -******* -Agent 1 -******* -Name: AMD EPYC 7742 64-Core Processor -... -Pool Info: -Pool 1 -Segment: GLOBAL; FLAGS: FINE GRAINED -... -Pool 3 -Segment: GLOBAL; FLAGS: COARSE GRAINED -... -******* -Agent 9 -******* -Name: gfx90a -... -Pool Info: -Pool 1 -Segment: GLOBAL; FLAGS: COARSE GRAINED -... -``` - -## System direct memory access - -In most cases, the default behavior for HIP in transferring data from a pinned host allocation to device will run at the limit of the interconnect. -However, there are certain cases where the interconnect is not the bottleneck. - -The primary way to transfer data onto and off of a GPU, such as the MI200, is to use the onboard System Direct Memory Access engine, which is used to feed blocks of memory to the off-device interconnect (either GPU-CPU or GPU-GPU). -Each GCD has a separate SDMA engine for host-to-device and device-to-host memory transfers. -Importantly, SDMA engines are separate from the computing infrastructure, meaning that memory transfers to and from a device will not impact kernel compute performance, though they do impact memory bandwidth to a limited extent. -The SDMA engines are mainly tuned for PCIe-4.0 x16, which means they are designed to operate at bandwidths up to 32 GB/s. - -:::{note} -An important feature of the MI250X platform is the Infinity Fabric™ interconnect between host and device. -The Infinity Fabric interconnect supports improved performance over standard PCIe-4.0 (usually ~50% more bandwidth); however, since the SDMA engine does not run at this speed, it will not max out the bandwidth of the faster interconnect. -::: - -The bandwidth limitation can be countered by bypassing the SDMA engine and replacing it with a type of copy kernel known as a "blit" kernel. -Blit kernels will use the compute units on the GPU, thereby consuming compute resources, which may not always be beneficial. -The easiest way to enable blit kernels is to set an environment variable `HSA_ENABLE_SDMA=0`, which will disable the SDMA engine. -On systems where the GPU uses a PCIe interconnect instead of an Infinity Fabric interconnect, blit kernels will not impact bandwidth, but will still consume compute resources. -The use of SDMA vs blit kernels also applies to MPI data transfers and GPU-GPU transfers. diff --git a/docs/conceptual/pcie-atomics.rst b/docs/conceptual/pcie-atomics.rst new file mode 100644 index 0000000000..f82d72e7ea --- /dev/null +++ b/docs/conceptual/pcie-atomics.rst @@ -0,0 +1,57 @@ +.. meta:: + :description: How ROCm uses PCIe atomics + :keywords: PCIe, PCIe atomics, atomics, Atomic operations, AMD, ROCm + +***************************************************************************** +How ROCm uses PCIe atomics +***************************************************************************** +AMD ROCm is an extension of the Heterogeneous System Architecture (HSA). To meet the requirements of an HSA-compliant system, ROCm supports queuing models, memory models, and signaling and synchronization protocols. ROCm can perform atomic Read-Modify-Write (RMW) transactions that extend inter-processor synchronization mechanisms to Input/Output (I/O) devices starting from Peripheral Component Interconnect Express 3.0 (PCIe™ 3.0). It supports the defined HSA capabilities for queuing and signaling memory operations. To learn more about the requirements of an HSA-compliant system, see the +`HSA Platform System Architecture Specification `_. + +ROCm uses platform atomics to perform memory operations like queuing, signaling, and synchronization across multiple CPU, GPU agents, and I/O devices. Platform atomics ensure that atomic operations run synchronously, without interruptions or conflicts, across multiple shared resources. + +Platform atomics in ROCm +============================== +Platform atomics enable the set of atomic operations that perform RMW actions across multiple processors, devices, and memory locations so that they run synchronously without interruption. An atomic operation is a sequence of computing instructions run as a single, indivisible unit. These instructions are completed in their entirety without any interruptions. If the instructions can't be completed as a unit without interruption, none of the instructions are run. These operations support 32-bit and 64-bit address formats. + +Some of the operations for which ROCm uses platform atomics are: + +* Update the HSA queue's ``read_dispatch_id``. The command processor on the GPU agent uses a 64-bit atomic add operation. It updates the packet ID it processed. +* Update the HSA queue's ``write_dispatch_id``. The CPU and GPU agents use a 64-bit atomic add operation. It supports multi-writer queue insertions. +* Update HSA Signals. A 64-bit atomic operation is used for CPU & GPU synchronization. + + +PCIe for atomic operations +---------------------------- +ROCm requires CPUs that support PCIe atomics. Similarly, all connected I/O devices should also support PCIe atomics for optimum compatibility. PCIe supports the ``CAS`` (Compare and Swap), ``FetchADD``, and ``SWAP`` atomic operations across multiple resources. These atomic operations are initiated by the I/O devices that support 32-bit, 64-bit, and 128-bit operands. Likewise, the target memory address where these atomic operations are performed should also be aligned to the size of the operand. This alignment ensures that the operations are performed efficiently and correctly without failure. + +When an atomic operation is successful, the requester receives a response of completion along with the operation result. However, any errors associated with the operation are signaled to the requester by updating the Completion Status field. Issues accessing the target location or running the atomic operation are common errors. Depending upon the error, the Completion Status field is updated to Completer Abort (CA) or Unsupported Request (UR). The field is present in the Completion Descriptor. + +To learn more about the industry standards and specifications of PCIe, see `PCI-SIG Specification `_. + +To learn more about PCIe and its capabilities, consult the following white papers: + +* `Atomic Read Modify Write Primitives by Intel `_ +* `PCI Express 3 Accelerator White paper by Intel `_ +* `PCIe Generation 4 Base Specification includes atomic operations `_ +* `Xilinx PCIe Ultrascale White paper `_ + +Working with PCIe 3.0 in ROCm +------------------------------- +Starting with PCIe 3.0, atomic operations can be requested, routed through, and completed by PCIe components. Routing and completion do not require software support. Component support for each can be identified by the Device Capabilities 2 (DevCap2) register. Upstream +bridges need to have atomic operations routing enabled. If not enabled, the atomic operations will fail even if the +PCIe endpoint and PCIe I/O devices can perform atomic operations. + +If your system uses PCIe switches to connect and enable communication between multiple PCIe components, the switches must also support atomic operations routing. + +To enable atomic operations routing between multiple root ports, each root port must support atomic operation routing. This capability can be identified from the atomic operations routing support bit in the DevCap2 register. If the bit has value of 1, routing is supported. Atomic operation requests are permitted only if a component's ``DEVCTL2.ATOMICOP_REQUESTER_ENABLE`` +field is set. These requests can only be serviced if the upstream components also support atomic operation completion or if the requests can be routed to a component that supports atomic operation completion. + +ROCm uses the PCIe-ID-based ordering technology for peer-to-peer (P2P) data transmission. PCIe-ID-based ordering technology is used when the GPU initiates multiple write operations to different memory locations. + +For more information on changes implemented in PCIe 3.0, see `Overview of Changes to PCI Express 3.0 `_. + + + + + diff --git a/docs/conf.py b/docs/conf.py index 468b0a8d59..2e13cadc92 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -30,19 +30,20 @@ project = "ROCm Documentation" author = "Advanced Micro Devices, Inc." copyright = "Copyright (c) 2024 Advanced Micro Devices, Inc. All rights reserved." -version = "6.3.0" -release = "6.3.0" +version = "6.3.1" +release = "6.3.1" setting_all_article_info = True all_article_info_os = ["linux", "windows"] all_article_info_author = "" # pages with specific settings article_pages = [ - {"file": "about/release-notes", "os": ["linux", "windows"], "date": "2024-12-03"}, + {"file": "about/release-notes", "os": ["linux", "windows"], "date": "2024-12-20"}, {"file": "how-to/deep-learning-rocm", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/index", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/install", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/train-a-model", "os": ["linux"]}, + {"file": "how-to/rocm-for-ai/accelerate-training", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/deploy-your-model", "os": ["linux"]}, {"file": "how-to/rocm-for-ai/hugging-face-models", "os": ["linux"]}, {"file": "how-to/rocm-for-hpc/index", "os": ["linux"]}, diff --git a/docs/contribute/building.md b/docs/contribute/building.md new file mode 100644 index 0000000000..97801832bd --- /dev/null +++ b/docs/contribute/building.md @@ -0,0 +1,150 @@ + + + + + + +# Building documentation + +## GitHub + +If you open a pull request and scroll down to the summary panel, +there is a commit status section. Next to the line +`docs/readthedocs.com:advanced-micro-devices-demo`, there is a `Details` link. +If you click this, it takes you to the Read the Docs build for your pull request. + +![GitHub PR commit status](../data/contribute/commit-status.png) + +If you don't see this line, click `Show all checks` to get an itemized view. + +## Command line + +You can build our documentation via the command line using Python. + +See the `build.tools.python` setting in the [Read the Docs configuration file](https://github.com/ROCm/ROCm/blob/develop/.readthedocs.yaml) for the Python version used by Read the Docs to build documentation. + +See the [Python requirements file](https://github.com/ROCm/ROCm/blob/develop/docs/sphinx/requirements.txt) for Python packages needed to build the documentation. + +Use the Python Virtual Environment (`venv`) and run the following commands from the project root: + +```sh +python3 -mvenv .venv + +.venv/bin/python -m pip install -r docs/sphinx/requirements.txt +.venv/bin/python -m sphinx -T -E -b html -d _build/doctrees -D language=en docs _build/html +``` + +Navigate to `_build/html/index.html` and open this file in a web browser. + +## Visual Studio Code + +With the help of a few extensions, you can create a productive environment to author and test +documentation locally using Visual Studio (VS) Code. Follow these steps to configure VS Code: + +1. Install the required extensions: + + * Python: `(ms-python.python)` + * Live Server: `(ritwickdey.LiveServer)` + +2. Add the following entries to `.vscode/settings.json`. + + ```json + { + "liveServer.settings.root": "/.vscode/build/html", + "liveServer.settings.wait": 1000, + "python.terminal.activateEnvInCurrentTerminal": true + } + ``` + + * `liveServer.settings.root`: Sets the root of the output website for live previews. Must be changed + alongside the `tasks.json` command. + * `liveServer.settings.wait`: Tells the live server to wait with the update in order to give Sphinx time to + regenerate the site contents and not refresh before the build is complete. + * `python.terminal.activateEnvInCurrentTerminal`: Activates the automatic virtual environment, so you + can build the site from the integrated terminal. + +3. Add the following tasks to `.vscode/tasks.json`. + + ```json + { + "version": "2.0.0", + "tasks": [ + { + "label": "Build Docs", + "type": "process", + "windows": { + "command": "${workspaceFolder}/.venv/Scripts/python.exe" + }, + "command": "${workspaceFolder}/.venv/bin/python3", + "args": [ + "-m", + "sphinx", + "-j", + "auto", + "-T", + "-b", + "html", + "-d", + "${workspaceFolder}/.vscode/build/doctrees", + "-D", + "language=en", + "${workspaceFolder}/docs", + "${workspaceFolder}/.vscode/build/html" + ], + "problemMatcher": [ + { + "owner": "sphinx", + "fileLocation": "absolute", + "pattern": { + "regexp": "^(?:.*\\.{3}\\s+)?(\\/[^:]*|[a-zA-Z]:\\\\[^:]*):(\\d+):\\s+(WARNING|ERROR):\\s+(.*)$", + "file": 1, + "line": 2, + "severity": 3, + "message": 4 + } + }, + { + "owner": "sphinx", + "fileLocation": "absolute", + "pattern": { + "regexp": "^(?:.*\\.{3}\\s+)?(\\/[^:]*|[a-zA-Z]:\\\\[^:]*):{1,2}\\s+(WARNING|ERROR):\\s+(.*)$", + "file": 1, + "severity": 2, + "message": 3 + } + } + ], + "group": { + "kind": "build", + "isDefault": true + } + } + ] + } + ``` + + > Implementation detail: two problem matchers were needed to be defined, + > because VS Code doesn't tolerate some problem information being potentially + > absent. While a single regex could match all types of errors, if a capture + > group remains empty (the line number doesn't show up in all warning/error + > messages) but the `pattern` references said empty capture group, VS Code + > discards the message completely. + +4. Configure the Python virtual environment (`venv`). + + From the Command Palette, run `Python: Create Environment`. Select `venv` environment and + `docs/sphinx/requirements.txt`. + +5. Build the docs. + + Launch the default build task using one of the following options: + + * A hotkey (the default is `Ctrl+Shift+B`) + * Issuing the `Tasks: Run Build Task` from the Command Palette + +6. Open the live preview. + + Navigate to the site output within VS Code: right-click on `.vscode/build/html/index.html` and + select `Open with Live Server`. The contents should update on every rebuild without having to + refresh the browser. diff --git a/docs/data/how-to/rocm-for-ai/2-node-training-master.png b/docs/data/how-to/rocm-for-ai/2-node-training-master.png new file mode 100644 index 0000000000..c59d149897 Binary files /dev/null and b/docs/data/how-to/rocm-for-ai/2-node-training-master.png differ diff --git a/docs/data/how-to/rocm-for-ai/2-node-training-worker.png b/docs/data/how-to/rocm-for-ai/2-node-training-worker.png new file mode 100644 index 0000000000..1b8ffc50ab Binary files /dev/null and b/docs/data/how-to/rocm-for-ai/2-node-training-worker.png differ diff --git a/docs/data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png b/docs/data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png new file mode 100644 index 0000000000..cdb38f457c Binary files /dev/null and b/docs/data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png differ diff --git a/docs/data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png b/docs/data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png new file mode 100644 index 0000000000..1a4fedbc56 Binary files /dev/null and b/docs/data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png differ diff --git a/docs/data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png b/docs/data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png new file mode 100644 index 0000000000..af0af3f0a2 Binary files /dev/null and b/docs/data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png differ diff --git a/docs/data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png b/docs/data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png new file mode 100644 index 0000000000..e43c8508e0 Binary files /dev/null and b/docs/data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png differ diff --git a/docs/data/how-to/rocm-for-ai/rccl-tests-8-gpu.png b/docs/data/how-to/rocm-for-ai/rccl-tests-8-gpu.png new file mode 100644 index 0000000000..10523802af Binary files /dev/null and b/docs/data/how-to/rocm-for-ai/rccl-tests-8-gpu.png differ diff --git a/docs/data/rocm-software-stack-6_3_0.jpg b/docs/data/rocm-software-stack-6_3_0.jpg deleted file mode 100644 index 94053f492d..0000000000 Binary files a/docs/data/rocm-software-stack-6_3_0.jpg and /dev/null differ diff --git a/docs/data/rocm-software-stack-6_3_1.jpg b/docs/data/rocm-software-stack-6_3_1.jpg new file mode 100644 index 0000000000..fb858bf003 Binary files /dev/null and b/docs/data/rocm-software-stack-6_3_1.jpg differ diff --git a/docs/how-to/Bar-Memory.rst b/docs/how-to/Bar-Memory.rst new file mode 100644 index 0000000000..954789eca0 --- /dev/null +++ b/docs/how-to/Bar-Memory.rst @@ -0,0 +1,99 @@ +.. meta:: + :description: Learn about BAR configuration in AMD GPUs and ways to troubleshoot physical addressing limit + :keywords: BAR memory, MMIO, GPU memory, Physical Addressing Limit, AMD, ROCm + +************************************** +Troubleshoot BAR access limitation +************************************** +Direct Memory Access (DMA) to PCIe devices using Base Address Registers (BARs) can be restricted due to physical addressing limits. These restrictions can result in data access failures between the system components. Peer-to-peer (P2P) DMA is used to access resources such as registers and memory between devices. PCIe devices need memory-mapped input/output (MMIO) space for DMA, and these MMIO spaces are defined in the PCIe BARs. + +These BARs are a set of 32-bit or 64-bit registers that are used to define the resources that PCIe devices provide. The CPU and other system devices also use these to access the resources of the PCIe devices. P2P DMA only works when one device can directly access the local BAR memory of another. If the memory address of a BAR memory exceeds the physical addressing limit of a device, the device will not be able to access that BAR. This could be the device's own BAR or the BAR of another device in the system. + +If the BAR memory exceeds than the physical addressing limit of the device, the device will not be able to access the remote BAR. + +To handle any BAR access issues that might occur, you need to be aware of the physical address limitations of the devices and understand the :ref:`BAR configuration of AMD GPUs `. This information is important when setting up additional MMIO apertures for PCIe devices in the system's physical address space. + +Handling physical address limitation +============================================= +When a system boots, the system BIOS allocates the physical address space for the components in the system, including system memory and MMIO apertures. On modern 64-bit platforms, there are generally two or more MMIO apertures: one located below 4 GB of physical address space for 32-bit compatibility, and one or more above 4 GB for devices needing more space. + +You can control the memory address of the high MMIO aperture from the system BIOS configuration options. This lets you configure the additional MMIO space to align with the physical addressing limit and allows P2P DMA between the devices. For example, if a PCIe device is limited to 44-bit of physical addressing, you should ensure that the MMIO aperture is set below 44-bit in the system physical address space. + +There are two ways to handle this: + +* Ensure that the high MMIO aperture is within the physical addressing limits of the devices in the system. For example, if the devices have a 44-bit physical addressing limit, set the ``MMIO High Base`` and ``MMIO High size`` options in the BIOS such that the aperture is within the 44-bit address range, and ensure that the ``Above 4G Decoding`` option is Enabled. + +* Enable the Input-Output Memory Management Unit (IOMMU). When the IOMMU is enabled in non-passthrough mode, it will create a virtual I/O address space for each device on the system. It also ensures that all virtual addresses created in that space are within the physical addressing limits of the device. For more information on IOMMU, see :doc:`../conceptual/iommu`. + +.. _bar-configuration: + +BAR configuration for AMD GPUs +================================================ + +The following table shows how the BARs are configured for AMD GPUs. + + +.. list-table:: + :widths: 25 25 50 + :header-rows: 1 + + * - BAR Type + - Value + - Description + * - BAR0-1 registers + - 64-bit, Prefetchable, GPU memory + - 8 GB or 16 GB depending on GPU. Set to less than 2^44 to support P2P access from other GPUs with a 44-bit physical address limit. Prefetchable memory enables faster read operation for high-performance computing (HPC) by fetching the contiguous data from the same data source even before requested as an anticipation of a future request. + * - BAR2-3 registers + - 64-bit, Prefetchable, Doorbell + - Set to less than 2^44 to support P2P access from other GPUs with a 44-bit physical address limit. As a Doorbell BAR, it indicates to the GPU that a new operation is in its queue to be processed. + * - BAR4 register + - Optional + - Not a boot device + * - BAR5 register + - 32-bit, Non-prefetchable, MMIO + - Is set to less than 4 GB. + +Example of BAR usage on AMD GPUs +------------------------------------- +Following is an example configuration of BARs set by the system BIOS on GFX8 GPUs with the 40-bit physical addressing limit: + +.. code:: shell + + 11:00.0 Display controller: Advanced Micro Devices, Inc. [AMD/ATI] Fiji [Radeon R9 FURY / NANO + Series] (rev c1) + + Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Device 0b35 + + Flags: bus master, fast devsel, latency 0, IRQ 119 + + Memory at bf40000000 (64-bit, prefetchable) [size=256M] + + Memory at bf50000000 (64-bit, prefetchable) [size=2M] + + I/O ports at 3000 [size=256] + + Memory at c7400000 (32-bit, non-prefetchable) [size=256K] + + Expansion ROM at c7440000 [disabled] [size=128K] + +Details of the BARs configured in the example are: + +**GPU Frame Buffer BAR:** ``Memory at bf40000000 (64-bit, prefetchable) [size=256M]`` + +The size of the BAR in the example is 256 MB. Generally, it will be the size of the GPU memory (typically 4 GB+). Depending upon the physical address limit and generation of AMD GPUs, the BAR can be set below 2^40, 2^44, or 2^48. + +**Doorbell BAR:** ``Memory at bf50000000 (64-bit, prefetchable) [size=2M]`` + +The size of the BAR should typically be less than 10 MB for this generation of GPUs and has been set to 2 MB in the example. This BAR is placed less than 2^40 to allow peer-to-peer access from other generations of AMD GPUs. + +**I/O BAR:** ``I/O ports at 3000 [size=256]`` + +This is for legacy VGA and boot device support. Because the GPUs used are not connected to a display (VGA devices), this is not a concern, even if it isn't set up in the system BIOS. + +**MMIO BAR:** ``Memory at c7400000 (32-bit, non-prefetchable) [size=256K]`` + +The AMD Driver requires this to access the configuration registers. Since the reminder of the BAR available is only 1 DWORD (32-bit), this is set less than 4 GB. In the example, it is fixed at 256 KB. + +**Expansion ROM:** ``Expansion ROM at c7440000 [disabled] [size=128K]`` + +This is required by the AMD Driver to access the GPU video-BIOS. In the example, it is fixed at 128 KB. \ No newline at end of file diff --git a/docs/how-to/rocm-for-ai/index.rst b/docs/how-to/rocm-for-ai/index.rst index 7c63de05ae..e0852ffc42 100644 --- a/docs/how-to/rocm-for-ai/index.rst +++ b/docs/how-to/rocm-for-ai/index.rst @@ -16,6 +16,8 @@ In this guide, you'll learn about: - :doc:`Installing ROCm and machine learning frameworks ` +- :doc:`Scaling model training ` + - :doc:`Training a model ` - :doc:`Running models from Hugging Face ` diff --git a/docs/how-to/rocm-for-ai/scale-model-training.rst b/docs/how-to/rocm-for-ai/scale-model-training.rst new file mode 100644 index 0000000000..f01c75d7b3 --- /dev/null +++ b/docs/how-to/rocm-for-ai/scale-model-training.rst @@ -0,0 +1,135 @@ +.. meta:: + :description: How to scale and accelerate model training + :keywords: ROCm, AI, LLM, train, fine-tune, deploy, FSDP, DeepSpeed, LLaMA, tutorial + +********************** +Scaling model training +********************** + +To train a large-scale model like OpenAI GPT-2 or Meta Llama 2 70B, a single accelerator or GPU cannot store all the +model parameters required for training. This immense scale presents a fundamental challenge: no single GPU or +accelerator can simultaneously store and process the entire model's parameters during training. PyTorch +provides an answer to this computational constraint through its distributed training frameworks. + +.. _rocm-for-ai-pytorch-distributed: + +PyTorch distributed +=================== + +Features in ``torch.distributed`` are categorized into three main components: + +- `Distributed data-parallel training + `_ (DDP) + +- `RPC-Based distributed training `_ (RPC) + +- `Collective communication `_ + +In this topic, the focus is on the distributed data-parallelism strategy as it’s the most popular. To get started with DDP, +you need to first understand how to coordinate the model and its training data across multiple accelerators or GPUs. + +The DDP workflow on multiple accelerators or GPUs is as follows: + +#. Split the current global training batch into small local batches on each GPU. For instance, if you have 8 GPUs and + the global batch is set at 32 samples, each of the 8 GPUs will have a local batch size of 4 samples. + +#. Copy the model to every device so each can process its local batches independently. + +#. Run a forward pass, then a backward pass, and output the gradient of the weights with respect to the loss of the + model for that local batch. This happens in parallel on multiple devices. + +#. Synchronize the local gradients computed by each device and combine them to update the model weights. The updated + weights are then redistributed to each device. + +In DDP training, each process or worker owns a replica of the model and processes a batch of data, and then the reducer uses +``allreduce`` to sum up gradients over different workers. + +See the following developer blogs for more in-depth explanations and examples. + +* `Multi GPU training with DDP — PyTorch Tutorials `_ + +* `Building a decoder transformer model on AMD GPUs — ROCm Blogs + `_ + +.. _rocm-for-ai-pytorch-fsdp: + +PyTorch FSDP +------------ + +As noted in :ref:`PyTorch distributed `, DDP model weights and optimizer states +are evenly replicated across all workers. Fully Sharded Data Parallel (FSDP) is a type of data parallelism that shards +model parameters, optimizer states, and gradients across DDP ranks. + +When training with FSDP, the GPU memory footprint is smaller than when training with DDP across all workers. This makes +training some very large models feasible by allowing larger models or batch sizes to fit on-device. However, this +comes with the cost of increased communication volume. The communication overhead is reduced by internal optimizations +like overlapping communication and computation. + +For a high-level overview of how FSDP works, review `Getting started with Fully Sharded Data Parallel +`_. + +For detailed training steps, see `PyTorch FSDP examples +`_. + +.. _rocm-for-ai-deepspeed: + +DeepSpeed +--------- + +`DeepSpeed `_ offers system innovations that make large-scale deep learning training effective, +efficient, and easy to use. Innovations such as ZeRO, 3D-Parallelism, DeepSpeed-MoE, ZeRO-Infinity, and so on fall under +the training pillar. + +See `Pre-training a large language model with Megatron-DeepSpeed on multiple AMD GPUs +`_ for a detailed example of +training with DeepSpeed on an AMD accelerator or GPU. + +.. _rocm-for-ai-automatic-mixed-precision: + +Automatic mixed precision (AMP) +------------------------------- + +As models increase in size, so do the time and memory needed to train them; their cost also increases. Any measure we +can take to reduce training time and memory usage through `automatic mixed precision +`_ (AMP) is highly beneficial for most use cases. + +See `Automatic mixed precision in PyTorch using AMD GPUs — ROCm Blogs +`_ +for more information about running AMP on an AMD accelerator. + +.. _rocm-for-ai-fine-tune: + +Fine-tuning your model +====================== + +ROCm supports multiple techniques for :ref:`optimizing fine-tuning `, for +example, LoRA, QLoRA, PEFT, and FSDP. + +Learn more about challenges and solutions for model fine-tuning in :doc:`../llm-fine-tuning-optimization/index`. + +The following developer blogs showcase examples of fine-tuning a model on an AMD accelerator or GPU. + +* Fine-tuning Llama2 with LoRA + + * `Fine-tune Llama 2 with LoRA: Customizing a large language model for question-answering + `_ + +* Fine-tuning Llama2 with QLoRA + + * `Enhancing LLM accessibility: A deep dive into QLoRA through fine-tuning Llama 2 on a single AMD GPU + `_ + +* Fine-tuning a BERT-based LLM for a text classification task using JAX + + * `LLM distributed supervised fine-tuning with JAX + `_ + +* Fine-tuning StarCoder using PEFT + + * `Instruction fine-tuning of StarCoder with PEFT on multiple AMD GPUs + `_ + +* Recipes for fine-tuning Llama2 and 3 with ``llama-recipes`` + + * `meta-llama/llama-recipes: Scripts for fine-tuning Meta Llama3 with composable FSDP & PEFT methods to cover + single/multi-node GPUs `_ diff --git a/docs/how-to/rocm-for-ai/train-a-model.rst b/docs/how-to/rocm-for-ai/train-a-model.rst index d5ef1431a2..b47f028bbf 100644 --- a/docs/how-to/rocm-for-ai/train-a-model.rst +++ b/docs/how-to/rocm-for-ai/train-a-model.rst @@ -1,140 +1,503 @@ .. meta:: - :description: How to use ROCm for AI - :keywords: ROCm, AI, LLM, train, fine-tune, FSDP, DeepSpeed, LLaMA, tutorial + :description: How to train a model using ROCm Megatron-LM + :keywords: ROCm, AI, LLM, train, Megatron-LM, megatron, Llama, tutorial, docker, torch -**************** -Training a model -**************** +************************************** +Training a model with ROCm Megatron-LM +************************************** -The following is a brief overview of popular component paths per AI development use-case, such as training, LLMs, -and inferencing. +.. _amd-megatron-lm: -Accelerating model training -=========================== +The ROCm Megatron-LM framework is a specialized fork of the robust Megatron-LM, designed to +enable efficient training of large-scale language models on AMD GPUs. By leveraging AMD Instinct™ MI300X +accelerators, AMD Megatron-LM delivers enhanced scalability, performance, and resource utilization for AI +workloads. It is purpose-built to :ref:`support models ` +like Meta's Llama 2, Llama 3, and Llama 3.1, enabling developers to train next-generation AI models with greater +efficiency. See the GitHub repository at ``__. -To train a large model like GPT2 or Llama 2 70B, a single accelerator or GPU cannot store all the model parameters -required for training. What if you could convert the single-GPU training code to run on multiple accelerators or GPUs? -PyTorch offers distributed training solutions to facilitate this. +For ease of use, AMD provides a ready-to-use Docker image for MI300X accelerators containing essential +components, including PyTorch, PyTorch Lightning, ROCm libraries, and Megatron-LM utilities. It contains the +following software to accelerate training workloads: -.. _rocm-for-ai-pytorch-distributed: ++--------------------------+--------------------------------+ +| Software component | Version | ++==========================+================================+ +| ROCm | 6.1 | ++--------------------------+--------------------------------+ +| PyTorch | 2.4.0 | ++--------------------------+--------------------------------+ +| PyTorch Lightning | 2.4.0 | ++--------------------------+--------------------------------+ +| Megatron Core | 0.9.0 | ++--------------------------+--------------------------------+ +| Transformer Engine | 1.5.0 | ++--------------------------+--------------------------------+ +| Flash Attention | v2.6 | ++--------------------------+--------------------------------+ +| Transformers | 4.44.0 | ++--------------------------+--------------------------------+ -PyTorch distributed -------------------- +Supported features and models +============================= + +Megatron-LM provides the following key features to train large language models efficiently: -As of PyTorch 1.6.0, features in ``torch.distributed`` are categorized into three main components: +- Transformer Engine (TE) -- `Distributed data-parallel training - `_ (DDP) +- APEX -- `RPC-Based distributed training `_ (RPC) +- GEMM tuning -- `Collective communication `_ +- Torch.compile -In this guide, the focus is on the distributed data-parallelism strategy as it’s the most popular. To get started with DDP, -let’s first understand how to coordinate the model and its training data across multiple accelerators or GPUs. +- 3D parallelism: TP + SP + CP -The DDP workflow on multiple accelerators or GPUs is as follows: +- Distributed optimizer -#. Split the current global training batch into small local batches on each GPU. For instance, if you have 8 GPUs and - the global batch is set at 32 samples, each of the 8 GPUs will have a local batch size of 4 samples. +- Flash Attention (FA) 2 -#. Copy the model to every device so each device can process its local batches independently. +- Fused kernels -#. Run a forward pass, then a backward pass, and output the gradient of the weights with respect to the loss of the - model for that local batch. This happens in parallel on multiple devices. +- Pre-training -#. Synchronize the local gradients computed by each device and combine them to update the model weights. The updated - weights are then redistributed to each device. +.. _amd-megatron-lm-model-support: -In DDP training, each process or worker owns a replica of the model and processes a batch of data, then the reducer uses -``allreduce`` to sum up gradients over different workers. +The following models are pre-optimized for performance on the AMD Instinct MI300X accelerator. -See the following developer blogs for more in-depth explanations and examples. +* Llama 2 7B -* `Multi GPU training with DDP — PyTorch Tutorials `_ +* Llama 2 70B -* `Building a decoder transformer model on AMD GPUs — ROCm Blogs - `_ +* Llama 3 8B -.. _rocm-for-ai-pytorch-fsdp: +* Llama 3 70B -PyTorch FSDP ------------- +* Llama 3.1 8B -As noted in :ref:`PyTorch distributed `, in DDP model weights and optimizer states -are evenly replicated across all workers. Fully Sharded Data Parallel (FSDP) is a type of data parallelism that shards -model parameters, optimizer states, and gradients across DDP ranks. +* Llama 3.1 70B -When training with FSDP, the GPU memory footprint is smaller than when training with DDP across all workers. This makes -the training of some very large models feasible by allowing larger models or batch sizes to fit on-device. However, this -comes with the cost of increased communication volume. The communication overhead is reduced by internal optimizations -like overlapping communication and computation. +Prerequisite system validation steps +==================================== -For a high-level overview of how FSDP works, review `Getting started with Fully Sharded Data Parallel -`_. +Complete the following system validation and optimization steps to set up your system before starting training. -For detailed training steps, refer to the `PyTorch FSDP examples -`_. +Disable NUMA auto-balancing +--------------------------- -.. _rocm-for-ai-deepspeed: +Generally, application performance can benefit from disabling NUMA auto-balancing. However, +it might be detrimental to performance with certain types of workloads. -DeepSpeed ---------- +Run the command ``cat /proc/sys/kernel/numa_balancing`` to check your current NUMA (Non-Uniform +Memory Access) settings. Output ``0`` indicates this setting is disabled. If there is no output or +the output is ``1``, run the following command to disable NUMA auto-balancing. -`DeepSpeed `_ offers system innovations that make large-scale deep learning training effective, -efficient, and easy to use. Innovations such as ZeRO, 3D-Parallelism, DeepSpeed-MoE, ZeRO-Infinity, and so on fall under -the training pillar. +.. code-block:: shell -See `Pre-training a large language model with Megatron-DeepSpeed on multiple AMD GPUs — ROCm Blogs -`_ for a detailed example of -training with DeepSpeed on an AMD accelerator or GPU. + sudo sh -c 'echo 0 > /proc/sys/kernel/numa_balancing' -.. _rocm-for-ai-automatic-mixed-precision: +See :ref:`mi300x-disable-numa` for more information. -Automatic mixed precision (AMP) +Hardware verification with ROCm ------------------------------- -As models increase in size, the time and memory needed to train them; that is, their cost also increases. Any measure we -can take to reduce training time and memory usage through `automatic mixed precision -`_ (AMP) is highly beneficial for most use cases. +Use the command ``rocm-smi --setperfdeterminism 1900`` to set the max clock speed up to 1900 MHz +instead of the default 2100 MHz. This can reduce the chance of a PCC event lowering the attainable +GPU clocks. This setting will not be required for new IFWI releases with the production PRC feature. +You can restore this setting to its default value with the ``rocm-smi -r`` command. + +Run the command: + +.. code-block:: shell + + rocm-smi --setperfdeterminism 1900 + +See :ref:`mi300x-hardware-verification-with-rocm` for more information. + +RCCL Bandwidth Test +------------------- + +ROCm Collective Communications Library (RCCL) is a standalone library of standard collective communication +routines for GPUs. See the :doc:`RCCL documentation ` for more information. Before starting +pre-training, running a RCCL bandwidth test helps ensure that the multi-GPU or multi-node setup is optimized +for efficient distributed training. + +Running the RCCL bandwidth test helps verify that: + +- The GPUs can communicate across nodes or within a single node. + +- The interconnect (such as InfiniBand, Ethernet, or Infinite fabric) is functioning as expected and + provides adequate bandwidth for communication. + +- No hardware setup or cabling issues could affect the communication between GPUs + +Tuning and optimizing hyperparameters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In distributed training, specific hyperparameters related to distributed communication can be tuned based on +the results of the RCCL bandwidth test. These variables are already set in the Docker image: + +.. code-block:: shell + + # force all RCCL streams to be high priority + export TORCH_NCCL_HIGH_PRIORITY=1 + + # specify which RDMA interfaces to use for communication + export NCCL_IB_HCA=rdma0,rdma1,rdma2,rdma3,rdma4,rdma5,rdma6,rdma7 + + # define the Global ID index used in RoCE mode + export NCCL_IB_GID_INDEX=3 + + # avoid data corruption/mismatch issue that existed in past releases + export RCCL_MSCCL_ENABLE=0 + +Running the RCCL Bandwidth Test +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +It's recommended you run the RCCL bandwidth test before launching training. It ensures system +performance is sufficient to launch training. RCCL is not included in the AMD Megatron-LM Docker +image; follow the instructions in ``__ to get started. +See :ref:`mi300x-rccl` for more information. + +Run on 8 GPUs (``-g 8``), scanning from 8 bytes to 10 GB: + +.. code-block:: shell + + ./build/all_reduce_perf -b 8 -e 10G -f 2 -g 8 + +.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-8-gpu.png + :width: 800 + +Using one MPI process per GPU and ``-g 1`` for performance-oriented runs on both single-node and multi-node is +recommended. So, a run on 8 GPUs looks something like: + +.. code-block:: shell + + mpirun -np 8 --bind-to numa ./build/all_reduce_perf -b 8 -e 10G -f 2 -g 1 + +.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-1-mpi-process-per-gpu.png + :width: 800 + +Running with one MPI process per GPU ensures a one-to-one mapping for CPUs and GPUs, which can be beneficial +for smaller message sizes. This better represents the real-world use of RCCL in deep learning frameworks like +PyTorch and TensorFlow. + +Use the following script to run the RCCL test for four MI300X GPU nodes. Modify paths and node addresses as needed. + +.. code-block:: + + /home/$USER/ompi_for_gpu/ompi/bin/mpirun -np 32 -H tw022:8,tw024:8,tw010:8, tw015:8 \ + --mca pml ucx \ + --mca btl ^openib \ + -x NCCL_SOCKET_IFNAME=ens50f0np0 \ + -x NCCL_IB_HCA=rdma0:1,rdma1:1,rdma2:1,rdma3:1,rdma4:1,rdma5:1,rdma6:1,rdma7:1 \ + -x NCCL_IB_GID_INDEX=3 \ + -x NCCL_MIN_NCHANNELS=40 \ + -x NCCL_DEBUG=version \ + $HOME/rccl-tests/build/all_reduce_perf -b 8 -e 8g -f 2 -g 1 + +.. image:: ../../data/how-to/rocm-for-ai/rccl-tests-4-mi300x-gpu-nodes.png + :width: 800 + +.. _mi300x-amd-megatron-lm-training: + +Start training on MI300X accelerators +===================================== + +The pre-built ROCm Megatron-LM environment allows users to quickly validate system performance, conduct +training benchmarks, and achieve superior performance for models like Llama 2 and Llama 3.1. + +Use the following instructions to set up the environment, configure the script to train models, and +reproduce the benchmark results on the MI300X accelerators with the AMD Megatron-LM Docker +image. + +.. _amd-megatron-lm-requirements: + +Download the Docker image and required packages +----------------------------------------------- + +1. Use the following command to pull the Docker image from Docker Hub. + + .. code-block:: shell + + docker pull rocm/megatron-lm:24.12-dev + +2. Launch the Docker container. + + .. code-block:: shell + + docker run -it --device /dev/dri --device /dev/kfd --network host --ipc host --group-add video --cap-add SYS_PTRACE --security-opt seccomp=unconfined --privileged -v $CACHE_DIR:/root/.cache --name megatron-dev-env rocm/megatron-lm:24.12-dev /bin/bash + +3. Clone the ROCm Megatron-LM repository to a local directory and install the required packages on the host machine. + + .. code-block:: shell + + git clone https://github.com/ROCm/Megatron-LM + cd Megatron-LM + + .. note:: + + This release is validated with ``ROCm/Megatron-LM`` commit `bb93ccb `_. + Checking out this specific commit is recommended for a stable and reproducible environment. + + .. code-block:: shell + + git checkout bb93ccbfeae6363c67b361a97a27c74ab86e7e92 + +Prepare training datasets +------------------------- + +If you already have the preprocessed data, you can skip this section. + +Use the following command to process datasets. We use GPT data as an example. You may change the merge table, use an +end-of-document token, remove sentence splitting, and use the tokenizer type. + +.. code-block:: shell + + python tools/preprocess_data.py \ + --input my-corpus.json \ + --output-prefix my-gpt2 \ + --vocab-file gpt2-vocab.json \ + --tokenizer-type GPT2BPETokenizer \ + --merge-file gpt2-merges.txt \ + --append-eod + +In this case, the automatically generated output files are named ``my-gpt2_text_document.bin`` and +``my-gpt2_text_document.idx``. + +.. image:: ../../data/how-to/rocm-for-ai/prep-training-datasets-my-gpt2-text-document.png + :width: 800 + +.. _amd-megatron-lm-environment-setup: + +Environment setup +----------------- + +In the ``examples/llama`` directory of Megatron-LM, if you're working with Llama 2 7B or Llama 2 70 B, use the +``train_llama2.sh`` configuration script. Likewise, if you're working with Llama 3 or Llama 3.1, then use +``train_llama3.sh`` and update the configuration script accordingly. + +Network interface +^^^^^^^^^^^^^^^^^ + +To avoid connectivity issues, ensure the correct network interface is set in your training scripts. + +1. Run the following command to find the active network interface on your system. + + .. code-block:: shell + + ip a + +2. Update the ``NCCL_SOCKET_IFNAME`` and ``GLOO_SOCKET_IFNAME`` variables with your system’s network interface. For + example: + + .. code-block:: shell + + export NCCL_SOCKET_IFNAME=ens50f0np0 + + export GLOO_SOCKET_IFNAME=ens50f0np0 + +Dataset options +^^^^^^^^^^^^^^^ + +You can use either mock data or real data for training. + +* If you're using a real dataset, update the ``DATA_PATH`` variable to point to the location of your dataset. + + .. code-block:: shell + + DATA_DIR="/root/.cache/data" # Change to where your dataset is stored + + DATA_PATH=${DATA_DIR}/bookcorpus_text_sentence + + .. code-block:: shell + + --data-path $DATA_PATH + + Ensure that the files are accessible inside the Docker container. + +* Mock data can be useful for testing and validation. If you're using mock data, replace ``--data-path $DATA_PATH`` with the ``--mock-data`` option. + + .. code-block:: shell + + --mock-data + +Tokenizer +^^^^^^^^^ + +Tokenization is the process of converting raw text into tokens that can be processed by the model. For Llama +models, this typically involves sub-word tokenization, where words are broken down into smaller units based on +a fixed vocabulary. The tokenizer is trained along with the model on a large corpus of text, and it learns a +fixed vocabulary that can represent a wide range of text from different domains. This allows Llama models to +handle a variety of input sequences, including unseen words or domain-specific terms. + +To train any of the Llama 2 models that this Docker image supports, use the ``Llama2Tokenizer``. + +To train any of Llama 3 and Llama 3.1 models that this Docker image supports, use the ``HuggingFaceTokenizer``. +Set the Hugging Face model link in the ``TOKENIZER_MODEL`` variable. + +For example, if you're using the Llama 3.1 8B model: + +.. code-block:: shell + + TOKENIZER_MODEL=meta-llama/Llama-3.1-8B + +Run benchmark tests +------------------- + +.. note:: + + If you're running **multi node training**, update the following environment variables. They can + also be passed as command line arguments. + + * Change ``localhost`` to the master node's hostname: + + .. code-block:: shell + + MASTER_ADDR="${MASTER_ADDR:-localhost}" + + * Set the number of nodes you want to train on (for instance, ``2``, ``4``, ``8``): + + .. code-block:: shell + + NNODES="${NNODES:-1}" + + * Set the rank of each node (0 for master, 1 for the first worker node, and so on): + + .. code-block:: shell + + NODE_RANK="${NODE_RANK:-0}" + +* Use this command to run a performance benchmark test of any of the Llama 2 models that this Docker image supports (see :ref:`variables `). + + .. code-block:: shell + + {variables} bash examples/llama/train_llama2.sh + +* Use this command to run a performance benchmark test of any of the Llama 3 and Llama 3.1 models that this Docker image supports (see :ref:`variables `). + + .. code-block:: shell -See `Automatic mixed precision in PyTorch using AMD GPUs — ROCm Blogs -`_ -for more information about running AMP on an AMD accelerator. + {variables} bash examples/llama/train_llama3.sh -.. _rocm-for-ai-fine-tune: +.. _amd-megatron-lm-benchmark-test-vars: -Fine-tuning your model -====================== +The benchmark tests support the same set of variables: -ROCm supports multiple techniques for :ref:`optimizing fine-tuning `, for -example, LoRA, QLoRA, PEFT, and FSDP. ++--------------------------+-----------------------+-----------------------+ +| Name | Options | Description | ++==========================+=======================+=======================+ +| ``TEE_OUTPUT`` | 0 or 1 | 0: disable training | +| | | log | +| | | | +| | | 1: enable training | +| | | log | ++--------------------------+-----------------------+-----------------------+ +| ``MBS`` | | Micro batch size | ++--------------------------+-----------------------+-----------------------+ +| ``BS`` | | Batch size | ++--------------------------+-----------------------+-----------------------+ +| ``TP`` | 1, 2, 4, 8 | Tensor parallel | ++--------------------------+-----------------------+-----------------------+ +| ``TE_FP8`` | 0 or 1 | Datatype. | +| | | If it is set to 1, | +| | | FP8. | +| | | | +| | | If it is set to 0. | +| | | BP16 | ++--------------------------+-----------------------+-----------------------+ +| ``NO_TORCH_COMPILE`` | 0 or 1 | If it is set to 1, | +| | | enable torch.compile. | +| | | | +| | | If it is set to 0. | +| | | Disable torch.compile | +| | | (default) | ++--------------------------+-----------------------+-----------------------+ +| ``SEQ_LENGTH`` | | Input sequence length | ++--------------------------+-----------------------+-----------------------+ +| ``GEMM_TUNING`` | 0 or 1 | If it is set to 1, | +| | | enable gemm tuning. | +| | | | +| | | If it is set to 0, | +| | | disable gemm tuning | ++--------------------------+-----------------------+-----------------------+ +| ``USE_FLASH_ATTN`` | 0 or 1 | 0: disable flash | +| | | attention | +| | | | +| | | 1: enable flash | +| | | attention | ++--------------------------+-----------------------+-----------------------+ +| ``ENABLE_PROFILING`` | 0 or 1 | 0: disable torch | +| | | profiling | +| | | | +| | | 1: enable torch | +| | | profiling | ++--------------------------+-----------------------+-----------------------+ +| ``MODEL_SIZE`` | | The size of the mode: | +| | | 7B/70B, etc. | ++--------------------------+-----------------------+-----------------------+ +| ``TOTAL_ITERS`` | | Total number of | +| | | iterations | ++--------------------------+-----------------------+-----------------------+ +| ``transformer-impl`` | transformer_engine or | Enable transformer | +| | local | engine by default | ++--------------------------+-----------------------+-----------------------+ -Learn more about challenges and solutions for model fine-tuning in :doc:`../llm-fine-tuning-optimization/index`. +Benchmarking examples +^^^^^^^^^^^^^^^^^^^^^ -The following developer blogs showcase examples of how to fine-tune a model on an AMD accelerator or GPU. +.. tab-set:: -* Fine-tuning Llama2 with LoRA + .. tab-item:: Single node training + :sync: single - * `Fine-tune Llama 2 with LoRA: Customizing a large language model for question-answering — ROCm Blogs - `_ + Use this command to run training with Llama 2 7B model on a single node. You can specify MBS, BS, FP, + datatype, and so on. -* Fine-tuning Llama2 with QLoRA + .. code-block:: bash - * `Enhancing LLM accessibility: A deep dive into QLoRA through fine-tuning Llama 2 on a single AMD GPU — ROCm Blogs - `_ + TEE_OUTPUT=1 MBS=5 BS=120 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1 + SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh -* Fine-tuning a BERT-based LLM for a text classification task using JAX + You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script `. - * `LLM distributed supervised fine-tuning with JAX — ROCm Blogs - `_ + See the sample output: -* Fine-tuning StarCoder using PEFT + .. image:: ../../data/how-to/rocm-for-ai/llama2-7b-training-log-sample.png + :width: 800 - * `Instruction fine-tuning of StarCoder with PEFT on multiple AMD GPUs — ROCm Blogs - `_ + .. tab-item:: Multi node training + :sync: multi -* Recipes for fine-tuning Llama2 and 3 with ``llama-recipes`` + Launch the Docker container on each node. + + In this example, run training with Llama 2 7B model on 2 nodes with specific MBS, BS, FP, datatype, and + so on. + + On the master node: + + .. code-block:: bash + + TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1 + SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh + + On the worker node: + + .. code-block:: bash + + TEE_OUTPUT=1 MBS=4 BS=64 TP=8 TE_FP8=0 NO_TORCH_COMPILE=1 + SEQ_LENGTH=4096 bash examples/llama/train_llama2.sh + + You can find the training logs at the location defined in ``$TRAIN_LOG`` in the :ref:`configuration script `. + + Sample output for 2-node training: + + Master node: + + .. image:: ../../data/how-to/rocm-for-ai/2-node-training-master.png + :width: 800 + + Worker node: + + .. image:: ../../data/how-to/rocm-for-ai/2-node-training-worker.png + :width: 800 - * `meta-llama/llama-recipes: Scripts for fine-tuning Meta Llama3 with composable FSDP & PEFT methods to cover - single/multi-node GPUs `_ diff --git a/docs/how-to/system-optimization/mi300x.rst b/docs/how-to/system-optimization/mi300x.rst index 5f185f379c..4a0145c5b2 100644 --- a/docs/how-to/system-optimization/mi300x.rst +++ b/docs/how-to/system-optimization/mi300x.rst @@ -537,6 +537,8 @@ installation was successful, refer to the :doc:`rocm-install-on-linux:install/post-install`. Should verification fail, consult :doc:`/how-to/system-debugging`. +.. _mi300x-hardware-verification-with-rocm: + Hardware verification with ROCm ------------------------------- diff --git a/docs/how-to/tuning-guides/mi300x/workload.rst b/docs/how-to/tuning-guides/mi300x/workload.rst index 0a4fbfd6ee..b74418234e 100644 --- a/docs/how-to/tuning-guides/mi300x/workload.rst +++ b/docs/how-to/tuning-guides/mi300x/workload.rst @@ -2062,11 +2062,10 @@ collectives. Multi-node FSDP and RCCL settings --------------------------------- -It's recommended to use high-priority HIP streams with RCCL. - -The simplest way to enable this is by using the nightly PyTorch wheels, as the required changes from -`PR #122830 `_ were not included in the PyTorch 2.3 -release but are available in the nightly builds. +When using PyTorch's FSDP (Full Sharded Data Parallel) feature, the HIP +streams used by RCCL and HIP streams used for compute kernels do not +always overlap well. As a workaround, it's recommended to use +high-priority HIP streams with RCCL. To configure high-priority streams: diff --git a/docs/index.md b/docs/index.md index 000c930b36..94a7611e56 100644 --- a/docs/index.md +++ b/docs/index.md @@ -37,7 +37,6 @@ ROCm documentation is organized into the following categories: :::{grid-item-card} How to :class-body: rocm-card-banner rocm-hue-12 -* [Programming guide](./how-to/hip_programming_guide.rst) * [Use ROCm for AI](./how-to/rocm-for-ai/index.rst) * [Use ROCm for HPC](./how-to/rocm-for-hpc/index.rst) * [Fine-tune LLMs and inference optimization](./how-to/llm-fine-tuning-optimization/index.rst) @@ -47,7 +46,8 @@ ROCm documentation is organized into the following categories: * [System debugging](./how-to/system-debugging.md) * [Use MPI](./how-to/gpu-enabled-mpi.rst) * [Use advanced compiler features](./conceptual/compiler-topics.md) -* [Set the number of CUs](./how-to/setting-cus) +* [Set the number of CUs](./how-to/setting-cus) +* [Troubleshoot BAR access limitation](./how-to/Bar-Memory.rst) * [ROCm examples](https://github.com/amd/rocm-examples) ::: @@ -55,12 +55,11 @@ ROCm documentation is organized into the following categories: :class-body: rocm-card-banner rocm-hue-8 * [GPU architecture overview](./conceptual/gpu-arch.md) -* [GPU memory](./conceptual/gpu-memory.md) * [Input-Output Memory Management Unit (IOMMU)](./conceptual/iommu.rst) * [File structure (Linux FHS)](./conceptual/file-reorg.md) * [GPU isolation techniques](./conceptual/gpu-isolation.md) * [Using CMake](./conceptual/cmake-packages.rst) -* [ROCm & PCIe atomics](./conceptual/More-about-how-ROCm-uses-PCIe-Atomics.rst) +* [PCIe atomics in ROCm](./conceptual/pcie-atomics.rst) * [Inception v3 with PyTorch](./conceptual/ai-pytorch-inception.md) * [Oversubscription of hardware resources](./conceptual/oversubscription.rst) ::: @@ -73,6 +72,7 @@ ROCm documentation is organized into the following categories: * [ROCm tools, compilers, and runtimes](./reference/rocm-tools.md) * [Accelerator and GPU hardware specifications](./reference/gpu-arch-specs.rst) * [Precision support](./reference/precision-support.rst) +* [Graph safe support](./reference/graph-safe-support.rst) ::: diff --git a/docs/reference/gpu-arch-specs.rst b/docs/reference/gpu-arch-specs.rst index dde0a2eb87..efd1fc2f82 100644 --- a/docs/reference/gpu-arch-specs.rst +++ b/docs/reference/gpu-arch-specs.rst @@ -32,6 +32,21 @@ For more information about ROCm hardware compatibility, see the ROCm `Compatibil - L1 Instruction Cache (KiB) - VGPR File (KiB) - SGPR File (KiB) + * + - MI325X + - CDNA3 + - gfx942 + - 256 + - 304 (38 per XCD) + - 64 + - 64 + - 256 + - 32 (4 per XCD) + - 32 + - 16 per 2 CUs + - 64 per 2 CUs + - 512 + - 12.5 * - MI300X - CDNA3 diff --git a/docs/reference/graph-safe-support.rst b/docs/reference/graph-safe-support.rst new file mode 100644 index 0000000000..44283e7326 --- /dev/null +++ b/docs/reference/graph-safe-support.rst @@ -0,0 +1,111 @@ +.. meta:: + :description: This page lists supported graph safe ROCm libraries. + :keywords: AMD, ROCm, HIP, hipGRAPH + +******************************************************************************** +Graph-safe support for ROCm libraries +******************************************************************************** + +HIP graph-safe libraries operate safely in HIP execution graphs. +:ref:`hip:how_to_HIP_graph` are an alternative way of executing tasks on a GPU +that can provide performance benefits over launching kernels using the standard +method via streams. + +Functions and routines from graph-safe libraries shouldn’t result in issues like +race conditions, deadlocks, or unintended dependencies. + +The following table shows whether a ROCm library is graph-safe. + +.. list-table:: + :header-rows: 1 + + * + - ROCm library + - Graph safe support + * + - `Composable Kernel `_ + - ❌ + * + - `hipBLAS `_ + - ✅ + * + - `hipBLASLt `_ + - ⚠️ + * + - `hipCUB `_ + - ✅ + * + - `hipFFT `_ + - ✅ (see :ref:`details `) + * + - `hipRAND `_ + - ✅ + * + - `hipSOLVER `_ + - ⚠️ (experimental) + * + - `hipSPARSE `_ + - ✅ + * + - `hipSPARSELt `_ + - ⚠️ (experimental) + * + - `hipTensor `_ + - ❌ + * + - `MIOpen `_ + - ❌ + * + - `RCCL `_ + - ✅ + * + - `rocAL `_ + - ❌ + * + - `rocALUTION `_ + - ❌ + * + - `rocBLAS `_ + - ✅ (see :doc:`details `) + * + - `rocDecode `_ + - ❌ + * + - `rocFFT `_ + - ✅ (see :ref:`details `) + * + - `rocHPCG `_ + - ❌ + * + - `rocJPEG `_ + - ❌ + * + - `rocPRIM `_ + - ✅ + * + - `rocRAND `_ + - ✅ + * + - `rocSOLVER `_ + - ⚠️ (experimental) + * + - `rocSPARSE `_ + - ⚠️ (experimental) + * + - `rocThrust `_ + - ❌ (see :doc:`details `) + * + - `rocWMMA `_ + - ❌ + * + - `RPP `_ + - ⚠️ + * + - `Tensile `_ + - ✅ + +✅: full support + +⚠️: partial support + +❌: not supported diff --git a/docs/reference/rocm-tools.md b/docs/reference/rocm-tools.md index 31e5962276..9ff9452d5b 100644 --- a/docs/reference/rocm-tools.md +++ b/docs/reference/rocm-tools.md @@ -1,7 +1,7 @@ - diff --git a/docs/release/versions.md b/docs/release/versions.md index 0d7db8e1f0..6083f466e5 100644 --- a/docs/release/versions.md +++ b/docs/release/versions.md @@ -8,6 +8,7 @@ | Version | Release date | | ------- | ------------ | +| [6.3.1](https://rocm.docs.amd.com/en/docs-6.3.1/) | December 20, 2024 | | [6.3.0](https://rocm.docs.amd.com/en/docs-6.3.0/) | December 3, 2024 | | [6.2.4](https://rocm.docs.amd.com/en/docs-6.2.4/) | November 6, 2024 | | [6.2.2](https://rocm.docs.amd.com/en/docs-6.2.2/) | September 27, 2024 | diff --git a/docs/sphinx/_toc.yml.in b/docs/sphinx/_toc.yml.in index 7b573237f8..b1e1f1dcb1 100644 --- a/docs/sphinx/_toc.yml.in +++ b/docs/sphinx/_toc.yml.in @@ -40,6 +40,8 @@ subtrees: title: Installation - file: how-to/rocm-for-ai/train-a-model.rst title: Train a model + - file: how-to/rocm-for-ai/scale-model-training.rst + title: Scale model training - file: how-to/rocm-for-ai/hugging-face-models.rst title: Run models from Hugging Face - file: how-to/rocm-for-ai/deploy-your-model.rst @@ -108,7 +110,9 @@ subtrees: - url: https://rocm.docs.amd.com/projects/llvm-project/en/latest/conceptual/openmp.html title: OpenMP support - file: how-to/setting-cus - title: Set the number of CUs + title: Set the number of CUs + - file: how-to/Bar-Memory.rst + title: Troubleshoot BAR access limitation - url: https://github.com/amd/rocm-examples title: ROCm examples @@ -144,8 +148,6 @@ subtrees: title: AMD Instinct MI100/CDNA1 ISA - url: https://www.amd.com/system/files/documents/amd-cdna-whitepaper.pdf title: White paper - - file: conceptual/gpu-memory.md - title: GPU memory - file: conceptual/iommu.rst title: Input-Output Memory Management Unit (IOMMU) - file: conceptual/file-reorg.md @@ -154,8 +156,8 @@ subtrees: title: GPU isolation techniques - file: conceptual/cmake-packages.rst title: Using CMake - - file: conceptual/More-about-how-ROCm-uses-PCIe-Atomics.rst - title: ROCm & PCIe atomics + - file: conceptual/pcie-atomics.rst + title: PCIe atomics in ROCm - file: conceptual/ai-pytorch-inception.md title: Inception v3 with PyTorch - file: conceptual/oversubscription.rst @@ -171,6 +173,8 @@ subtrees: title: Hardware specifications - file: reference/precision-support.rst title: Precision support + - file: reference/graph-safe-support.rst + title: Graph safe support - caption: Contribute entries: diff --git a/docs/what-is-rocm.rst b/docs/what-is-rocm.rst index 0e75287e7f..e8bf732963 100644 --- a/docs/what-is-rocm.rst +++ b/docs/what-is-rocm.rst @@ -10,9 +10,9 @@ ROCm is a software stack, composed primarily of open-source software, that provides the tools for programming AMD Graphics Processing Units (GPUs), from low-level kernels to high-level end-user applications. -.. image:: data/rocm-software-stack-6_3_0.jpg +.. image:: data/rocm-software-stack-6_3_1.jpg :width: 800 - :alt: AMD's ROCm software stack and neighboring technologies. + :alt: AMD's ROCm software stack and enabling technologies. :align: center Specifically, ROCm provides the tools for diff --git a/tools/autotag/components.xml b/tools/autotag/components.xml index 511b0feb04..5662bb3f67 100644 --- a/tools/autotag/components.xml +++ b/tools/autotag/components.xml @@ -1,7 +1,7 @@ - diff --git a/tools/autotag/templates/highlights/6.3.1.md b/tools/autotag/templates/highlights/6.3.1.md new file mode 100644 index 0000000000..2f8ecc3306 --- /dev/null +++ b/tools/autotag/templates/highlights/6.3.1.md @@ -0,0 +1,61 @@ +# ROCm 6.3.1 release notes + +The release notes provide a summary of notable changes since the previous ROCm release. + +- [Release highlights](#release-highlights) + +- [Operating system and hardware support changes](#operating-system-and-hardware-support-changes) + +- [ROCm components versioning](#rocm-components) + +- [Detailed component changes](#detailed-component-changes) + +- [ROCm known issues](#rocm-known-issues) + +- [ROCm resolved issues](#rocm-resolved-issues) + +- [ROCm upcoming changes](#rocm-upcoming-changes) + +```{note} +If you’re using Radeon™ PRO or Radeon GPUs in a workstation setting with a +display connected, continue to use ROCm 6.2.3. See the [Use ROCm on Radeon GPUs](https://rocm.docs.amd.com/projects/radeon/en/latest/index.html) +documentation to verify compatibility and system requirements. +``` +## Release highlights + +The following are notable new features and improvements in ROCm 6.3.1. For changes to individual components, see +[Detailed component changes](#detailed-component-changes). + +### Per queue resiliency for Instinct MI300 accelerators + +The AMDGPU driver now includes enhanced resiliency for misbehaving applications on AMD Instinct MI300 accelerators. This helps isolate the impact of misbehaving applications, ensuring other workloads running on the same accelerator are unaffected. + +### ROCm Runfile Installer + +ROCm 6.3.1 introduces the ROCm Runfile Installer, with initial support for Ubuntu 22.04. The ROCm Runfile Installer facilitates ROCm installation without using a native Linux package management system, with or without network or internet access. For more information, see the [ROCm Runfile Installer documentation](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.1/install/rocm-runfile-installer.html). + +### ROCm documentation updates + +ROCm documentation continues to be updated to provide clearer and more comprehensive guidance for a wider variety of user needs and use cases. + +* Added documentation on training a model with ROCm Megatron-LM. AMD offers a Docker image for MI300X accelerators + containing essential components to get started, including ROCm libraries, PyTorch, and Megatron-LM utilities. See + [Training a model using ROCm Megatron-LM](https://rocm.docs.amd.com/en/latest/how-to/rocm-for-ai/train-a-model.html) + to get started. + + The new ROCm Megatron-LM training Docker accompanies the [ROCm vLLM inference + Docker](https://rocm.docs.amd.com/en/latest/how-to/performance-validation/mi300x/vllm-benchmark.html) + as a set of ready-to-use containerized solutions to get started with using ROCm + for AI. + +* Updated the [Instinct MI300X workload tuning + guide](https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/workload.html) with more current optimization + strategies. The updated sections include guidance on vLLM optimization, PyTorch TunableOp, and hipBLASLt tuning. + +* HIP graph-safe libraries operate safely in HIP execution graphs. [HIP graphs](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/hipgraph.html#how-to-hip-graph) are an alternative way of executing tasks on a GPU that can provide performance benefits over launching kernels using the standard method via streams. A topic that shows whether a [ROCm library is graph-safe](https://advanced-micro-devices-demo--3953.com.readthedocs.build/en/3953/reference/graph-safe-support.html) has been added. + +* The [Device memory](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/memory_management/device_memory.html) topic in the HIP memory management section has been updated. + +* The HIP documentation has expanded with new resources for developers: + * [Multi device management](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/multi_device.html) + * [OpenGL interoperability](https://rocm.docs.amd.com/projects/HIP/en/latest/how-to/hip_runtime_api/opengl_interop.html) diff --git a/tools/autotag/templates/known_issues/6.3.1.md b/tools/autotag/templates/known_issues/6.3.1.md new file mode 100644 index 0000000000..10b93c2b91 --- /dev/null +++ b/tools/autotag/templates/known_issues/6.3.1.md @@ -0,0 +1,8 @@ +## ROCm known issues + +ROCm known issues are noted on {fab}`github` [GitHub](https://github.com/ROCm/ROCm/labels/Verified%20Issue). For known +issues related to individual components, review the [Detailed component changes](#detailed-component-changes). + +### PCI Express Qualification Tool failure on Debian 12 + +The PCI Express Qualification Tool (PEQT) module present in the ROCm Validation Suite (RVS) might fail due to the segmentation issue in Debian 12 (bookworm). This will result in failure to determine the characteristics of the PCIe interconnect between the host platform and the GPU like support for Gen 3 atomic completers, DMA transfer statistics, link speed, and link width. The standard PCIe command `lspci` can be used as an alternative to view the characteristics of the PCIe bus interconnect with the GPU. This issue is under investigation and will be addressed in a future release. See [GitHub issue #4175](https://github.com/ROCm/ROCm/issues/4175). \ No newline at end of file diff --git a/tools/autotag/templates/resolved_issues/6.3.1.md b/tools/autotag/templates/resolved_issues/6.3.1.md new file mode 100644 index 0000000000..ac41d81dad --- /dev/null +++ b/tools/autotag/templates/resolved_issues/6.3.1.md @@ -0,0 +1,23 @@ +## ROCm resolved issues + +The following are previously known issues resolved in this release. For resolved issues related to +individual components, review the [Detailed component changes](#detailed-component-changes). + +### Instinct MI300 series: backward weights convolution performance issue + +Fixed a performance issue affecting certain tensor shapes during backward weights convolution when using FP16 or FP32 data types on Instinct MI300 series accelerators. See [GitHub issue #4080](https://github.com/ROCm/ROCm/issues/4080). + +### ROCm Compute Profiler and ROCm Systems Profiler post-upgrade issues + +Packaging metadata for ROCm Compute Profiler (`rocprofiler-compute`) and ROCm Systems Profiler +(`rocprofiler-systems`) has been updated to handle the renaming from Omniperf and Omnitrace, +respectively. This fixes minor issues when upgrading from ROCm 6.2 to 6.3. For more information, see the GitHub issues +[#4082](https://github.com/ROCm/ROCm/issues/4082) and +[#4083](https://github.com/ROCm/ROCm/issues/4082). + +### Stale file due to OpenCL ICD loader deprecation + +When upgrading from ROCm 6.2.x to ROCm 6.3.0, the issue of removal of the `rocm-icd-loader` package +leaving a stale file in the old `rocm-6.2.x` directory has been resolved. The stale files left during +the upgrade from ROCm 6.2.x to ROCm 6.3.0 will be removed when upgrading to ROCm 6.3.1. For more +information, see [GitHub issue #4084](https://github.com/ROCm/ROCm/issues/4084). diff --git a/tools/autotag/templates/support/6.3.1.md b/tools/autotag/templates/support/6.3.1.md new file mode 100644 index 0000000000..ea458ca454 --- /dev/null +++ b/tools/autotag/templates/support/6.3.1.md @@ -0,0 +1,9 @@ +## Operating system and hardware support changes + +ROCm 6.3.1 adds support for Debian 12 (kernel: 6.1). Debian is supported only on AMD Instinct accelerators. See the installation instructions at [Debian native installation](https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.3.1/install/native-install/debian.html). + +ROCm 6.3.1 enables support for AMD Instinct MI325X accelerator. For more information, see [AMD Instinct™ MI325X Accelerators](https://www.amd.com/en/products/accelerators/instinct/mi300/mi325x.html). + +See the [Compatibility +matrix](https://rocm.docs.amd.com/en/docs-6.3.1/compatibility/compatibility-matrix.html) +for more information about operating system and hardware compatibility. \ No newline at end of file diff --git a/tools/autotag/templates/upcoming_changes/6.3.1.md b/tools/autotag/templates/upcoming_changes/6.3.1.md new file mode 100644 index 0000000000..2a85d9fd10 --- /dev/null +++ b/tools/autotag/templates/upcoming_changes/6.3.1.md @@ -0,0 +1,13 @@ +## ROCm upcoming changes + +The following changes to the ROCm software stack are anticipated for future releases. + +### AMDGPU wavefront size compiler macro deprecation + +The `__AMDGCN_WAVEFRONT_SIZE__` macro will be deprecated in an upcoming +release. It is recommended to remove any use of this macro. For more information, see [AMDGPU +support](https://rocm.docs.amd.com/projects/llvm-project/en/docs-6.3.1/LLVM/clang/html/AMDGPUSupport.html). + +### HIPCC Perl scripts deprecation + +The HIPCC Perl scripts (`hipcc.pl` and `hipconfig.pl`) will be removed in an upcoming release. \ No newline at end of file diff --git a/tools/rocm-build/build_lightning.sh b/tools/rocm-build/build_lightning.sh index 49a393ac09..09f890180f 100755 --- a/tools/rocm-build/build_lightning.sh +++ b/tools/rocm-build/build_lightning.sh @@ -1117,6 +1117,18 @@ build() { create_compiler_config_files } +create_wheel_package() { + echo "Creating rocm-llvm wheel package" + mkdir -p "$ROCM_WHEEL_DIR" + cp -f $SCRIPT_ROOT/generate_setup_py.py $ROCM_WHEEL_DIR + cp -f $SCRIPT_ROOT/repackage_wheel.sh $ROCM_WHEEL_DIR + cd $ROCM_WHEEL_DIR + # Currently only supports python3.6 + ./repackage_wheel.sh $RPM_PATH/rocm-llvm*.rpm python3.6 + # Copy the wheel created to RPM folder which will be uploaded to artifactory + mv "$ROCM_WHEEL_DIR"/dist/*.whl "$RPM_PATH" +} + case $TARGET in (clean) clean_lightning ;; (all) diff --git a/tools/rocm-build/build_rpp.sh b/tools/rocm-build/build_rpp.sh index 89d8c8dc61..4e95ea4e16 100755 --- a/tools/rocm-build/build_rpp.sh +++ b/tools/rocm-build/build_rpp.sh @@ -40,7 +40,7 @@ build_rpp() { mkdir -p $BUILD_DIR && cd $BUILD_DIR - init_rocm_common_cmake_params + init_rocm_common_cmake_params cmake \ "${rocm_math_common_cmake_params[@]}" \ diff --git a/tools/rocm-build/rocm-6.2.1.xml b/tools/rocm-build/rocm-6.2.1.xml index 4c49de01b1..35bcebbb05 100644 --- a/tools/rocm-build/rocm-6.2.1.xml +++ b/tools/rocm-build/rocm-6.2.1.xml @@ -26,6 +26,7 @@ + @@ -43,6 +44,7 @@ + @@ -58,6 +60,8 @@ + + diff --git a/tools/rocm-build/rocm-6.2.2.xml b/tools/rocm-build/rocm-6.2.2.xml index 552502b2b5..edd46f0bbc 100644 --- a/tools/rocm-build/rocm-6.2.2.xml +++ b/tools/rocm-build/rocm-6.2.2.xml @@ -2,6 +2,7 @@ @@ -72,7 +73,6 @@ -======== diff --git a/tools/rocm-build/rocm-6.3.0.xml b/tools/rocm-build/rocm-6.3.0.xml index 200389468a..abd29cf125 100644 --- a/tools/rocm-build/rocm-6.3.0.xml +++ b/tools/rocm-build/rocm-6.3.0.xml @@ -74,4 +74,4 @@ - \ No newline at end of file +