diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 020fadc2c7..9377bd7150 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,7 +38,7 @@ jobs: submodules: recursive fetch-depth: 0 - - uses: actions/cache@v2 + - uses: actions/cache@v3 if: matrix.libclang == true && matrix.benchmark == false with: key: v3-libclang-${{ runner.os }}-${{ hashFiles( 'cpp/ycm/CMakeLists.txt' ) }} @@ -51,7 +51,7 @@ jobs: clang_archives name: Cache libclang - - uses: actions/cache@v2 + - uses: actions/cache@v3 if: matrix.benchmark == false with: key: v2-deps-${{ runner.os }}-${{ hashFiles( 'build.py' ) }} @@ -65,7 +65,7 @@ jobs: third_party/omnisharp-roslyn/v[0-9]* name: Cache dependencies - - uses: actions/cache@v2 + - uses: actions/cache@v3 if: matrix.benchmark == false with: key: v2-testdeps-${{ runner.os }}-${{ hashFiles( 'run_tests.py' ) }} @@ -79,21 +79,22 @@ jobs: - name: Install Java if: matrix.benchmark == false - uses: actions/setup-java@v2 + uses: actions/setup-java@v3 with: java-version: 17 distribution: 'adopt' - name: Install Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: '3.8' - name: Install Go if: matrix.benchmark == false - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: - stable: true + go-version: stable + cache: false - name: Install GCC if: runner.os == 'Linux' && matrix.compiler != 'clang' run: | @@ -127,6 +128,7 @@ jobs: if: matrix.benchmark == false with: name: "${{ matrix.runs-on }}-${{ matrix.name_suffix }}-tests" + token: ${{ secrets.CODECOV_TOKEN }} gcov: true linux_lint: @@ -196,7 +198,7 @@ jobs: submodules: recursive fetch-depth: 0 - - uses: actions/cache@v2 + - uses: actions/cache@v3 if: matrix.libclang == true && matrix.benchmark == false with: key: v3-libclang-${{ runner.os }}-${{ hashFiles( 'cpp/ycm/CMakeLists.txt' ) }} @@ -209,7 +211,7 @@ jobs: clang_archives name: Cache libclang - - uses: actions/cache@v2 + - uses: actions/cache@v3 if: matrix.benchmark == false with: key: v2-deps-${{ runner.os }}-${{ hashFiles( 'build.py' ) }} @@ -223,7 +225,7 @@ jobs: third_party/omnisharp-roslyn/v[0-9]* name: Cache dependencies - - uses: actions/cache@v2 + - uses: actions/cache@v3 if: matrix.benchmark == false with: key: v2-testdeps-${{ runner.os }}-${{ hashFiles( 'run_tests.py' ) }} @@ -237,20 +239,21 @@ jobs: - name: Install Java if: matrix.benchmark == false - uses: actions/setup-java@v2 + uses: actions/setup-java@v3 with: java-version: 17 distribution: 'temurin' - name: Install Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: '3.8' architecture: ${{ matrix.python-arch }} - name: Install Go if: matrix.benchmark == false - uses: actions/setup-go@v2 + uses: actions/setup-go@v4 with: - stable: true + go-version: stable + cache: false - name: Run pip and prepare coverage if: matrix.benchmark == false run: | @@ -271,4 +274,4 @@ jobs: if: matrix.benchmark == false with: name: "${{ matrix.runs-on }}-${{ matrix.name_suffix }}-tests" - gcov: true + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.ycm_extra_conf.py b/.ycm_extra_conf.py index aa5e153b5e..d97014841c 100644 --- a/.ycm_extra_conf.py +++ b/.ycm_extra_conf.py @@ -28,7 +28,7 @@ # # For more information, please refer to -from distutils.sysconfig import get_python_inc +from sysconfig import get_path import platform import os.path as p import subprocess @@ -71,7 +71,7 @@ '-isystem', 'cpp/BoostParts', '-isystem', -get_python_inc(), +get_path( 'include' ), '-isystem', 'cpp/llvm/include', '-isystem', diff --git a/build.py b/build.py index bc9427a96b..f4a95deee8 100755 --- a/build.py +++ b/build.py @@ -95,7 +95,7 @@ def Exit( self ): 'ba5fe5ee3b2a8395287e24aef20ce6e17834cf8e877117e6caacac6a688a6c53' ) -DEFAULT_RUST_TOOLCHAIN = 'nightly-2023-05-11' +DEFAULT_RUST_TOOLCHAIN = 'nightly-2023-08-18' RUST_ANALYZER_DIR = p.join( DIR_OF_THIRD_PARTY, 'rust-analyzer' ) BUILD_ERROR_MESSAGE = ( @@ -107,7 +107,7 @@ def Exit( self ): 'issue tracker, including the entire output of this script (with --verbose) ' 'and the invocation line used to run it.' ) -CLANGD_VERSION = '16.0.1' +CLANGD_VERSION = '17.0.1' CLANGD_BINARIES_ERROR_MESSAGE = ( 'No prebuilt Clang {version} binaries for {platform}. ' 'You\'ll have to compile Clangd {version} from source ' @@ -132,23 +132,23 @@ def FindLatestMSVC( quiet ): try: latest_v = int( latest_full_v.split( '.' )[ 0 ] ) except ValueError: - raise ValueError( f"{latest_full_v} is not a version number." ) + raise ValueError( f"{ latest_full_v } is not a version number." ) if not quiet: - print( f'vswhere -latest returned version {latest_full_v}' ) + print( f'vswhere -latest returned version { latest_full_v }' ) if latest_v not in ACCEPTABLE_VERSIONS: if latest_v > 17: if not quiet: - print( f'MSVC Version {latest_full_v} is newer than expected.' ) + print( f'MSVC Version { latest_full_v } is newer than expected.' ) else: raise ValueError( - f'vswhere returned {latest_full_v} which is unexpected.' + f'vswhere returned { latest_full_v } which is unexpected.' 'Pass --msvc argument.' ) return latest_v else: if not quiet: - print( f'vswhere returned nothing usable, {latest_full_v}' ) + print( f'vswhere returned nothing usable, { latest_full_v }' ) # Fall back to registry parsing, which works at least until MSVC 2019 (16) # but is likely failing on MSVC 2022 (17) @@ -161,11 +161,11 @@ def FindLatestMSVC( quiet ): for i in ACCEPTABLE_VERSIONS: if not quiet: print( 'Trying to find ' - rf'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\{i}.0' ) + rf'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\{ i }.0' ) try: - winreg.OpenKey( handle, rf'SOFTWARE\Microsoft\VisualStudio\{i}.0' ) + winreg.OpenKey( handle, rf'SOFTWARE\Microsoft\VisualStudio\{ i }.0' ) if not quiet: - print( f"Found MSVC version {i}" ) + print( f"Found MSVC version { i }" ) msvc = i break except FileNotFoundError: @@ -939,7 +939,7 @@ def EnableGoCompleter( args ): new_env.pop( 'GOROOT', None ) new_env[ 'GOBIN' ] = p.join( new_env[ 'GOPATH' ], 'bin' ) - gopls = 'golang.org/x/tools/gopls@v0.9.4' + gopls = 'golang.org/x/tools/gopls@v0.13.2' CheckCall( [ go, 'install', gopls ], env = new_env, quiet = args.quiet, @@ -978,7 +978,7 @@ def EnableRustCompleter( switches ): req_toolchain_version = switches.rust_toolchain_version if switches.quiet: - sys.stdout.write( f'Installing rust-analyzer "{req_toolchain_version}" ' + sys.stdout.write( f'Installing rust-analyzer "{ req_toolchain_version }" ' 'for Rust support...' ) sys.stdout.flush() @@ -1145,30 +1145,30 @@ def GetClangdTarget(): if OnWindows(): return [ ( 'clangd-{version}-win64', - 'a0a7b16f6f92d545c84baff5e4bdb56897e955689ffc7407c915cc9d3c69a945' ), + '66a1e4d527b451d1e9f21183416fd53ef7f395266bbf7fd74b470ec326d19c98' ), ( 'clangd-{version}-win32', - '870de4d2a45380eba7c6b6640e2cb870219dd2025ed3bcb58101fd1d17f51d75' ) ] + 'c4c351da9f528a2cfacbc669cfb656ef34791ed637aeed051274adf611f3ba5a' ) ] if OnMac(): if OnArm(): return [ ( 'clangd-{version}-arm64-apple-darwin', - 'c5b0a314c00e4ce839ce1f4ee1ed46116f839949b7874affa759e10589340948' ) ] + '38b0335306193cfe7978af9b2bb9dffc48406739b23f19158e7f000f910df5b0' ) ] return [ ( 'clangd-{version}-x86_64-apple-darwin', - '826c85889a1c288418e2c05b91e40158cde06f2e79f1e951d4983de2652a6d2c' ) ] + 'e3dcbefda4a10d7e1e2f8ce8db820219d78ac48ade247048fc0c6a821105ca26' ) ] if OnAArch64(): return [ ( 'clangd-{version}-aarch64-linux-gnu', - '79f4a0a20342479c0e29573cf58810e0daabbf00178cf042edf6e1acb20a8602' ) ] + 'a3074a5d3c955b3326881617d36438e2cf36140d8de4b5f7d98e73eda92797a8' ) ] if OnArm(): return [ None, # First list index is for 64bit archives. ARMv7 is 32bit only. ( 'clangd-{version}-armv7a-linux-gnueabihf', - 'e521f21021885aaeb94e631949db6c0a65cc9c5c9c708afe4a42a058eb91ebca' ) ] + 'f167c13d3741ad7869a6ee57621af2cb9c2477bb300ab2fac91ea64c19f8df43' ) ] if OnX86_64(): return [ ( 'clangd-{version}-x86_64-unknown-linux-gnu', - '51e69f6f5394ed6990cd7d938c53135ef2b5f8d2da1026eb291ffb3c81968847' ) ] + '70a9cf4c9e288941f0193dbfe0ab164e1805b622c2df522ea7319dabdeae3b4c' ) ] raise InstallationFailed( CLANGD_BINARIES_ERROR_MESSAGE.format( version = CLANGD_VERSION, platform = 'this system' ) ) diff --git a/cpp/llvm/include/clang-c/Index.h b/cpp/llvm/include/clang-c/Index.h index a3e54285f8..601b91f67d 100644 --- a/cpp/llvm/include/clang-c/Index.h +++ b/cpp/llvm/include/clang-c/Index.h @@ -34,7 +34,7 @@ * compatible, thus CINDEX_VERSION_MAJOR is expected to remain stable. */ #define CINDEX_VERSION_MAJOR 0 -#define CINDEX_VERSION_MINOR 63 +#define CINDEX_VERSION_MINOR 64 #define CINDEX_VERSION_ENCODE(major, minor) (((major)*10000) + ((minor)*1)) @@ -48,6 +48,10 @@ #define CINDEX_VERSION_STRING \ CINDEX_VERSION_STRINGIZE(CINDEX_VERSION_MAJOR, CINDEX_VERSION_MINOR) +#ifndef __has_feature +#define __has_feature(feature) 0 +#endif + LLVM_CLANG_C_EXTERN_C_BEGIN /** \defgroup CINDEX libclang: C Interface to Clang @@ -275,6 +279,22 @@ CINDEX_LINKAGE CXIndex clang_createIndex(int excludeDeclarationsFromPCH, */ CINDEX_LINKAGE void clang_disposeIndex(CXIndex index); +typedef enum { + /** + * Use the default value of an option that may depend on the process + * environment. + */ + CXChoice_Default = 0, + /** + * Enable the option. + */ + CXChoice_Enabled = 1, + /** + * Disable the option. + */ + CXChoice_Disabled = 2 +} CXChoice; + typedef enum { /** * Used to indicate that no special CXIndex options are needed. @@ -309,9 +329,131 @@ typedef enum { } CXGlobalOptFlags; +/** + * Index initialization options. + * + * 0 is the default value of each member of this struct except for Size. + * Initialize the struct in one of the following three ways to avoid adapting + * code each time a new member is added to it: + * \code + * CXIndexOptions Opts; + * memset(&Opts, 0, sizeof(Opts)); + * Opts.Size = sizeof(CXIndexOptions); + * \endcode + * or explicitly initialize the first data member and zero-initialize the rest: + * \code + * CXIndexOptions Opts = { sizeof(CXIndexOptions) }; + * \endcode + * or to prevent the -Wmissing-field-initializers warning for the above version: + * \code + * CXIndexOptions Opts{}; + * Opts.Size = sizeof(CXIndexOptions); + * \endcode + */ +typedef struct CXIndexOptions { + /** + * The size of struct CXIndexOptions used for option versioning. + * + * Always initialize this member to sizeof(CXIndexOptions), or assign + * sizeof(CXIndexOptions) to it right after creating a CXIndexOptions object. + */ + unsigned Size; + /** + * A CXChoice enumerator that specifies the indexing priority policy. + * \sa CXGlobalOpt_ThreadBackgroundPriorityForIndexing + */ + unsigned char ThreadBackgroundPriorityForIndexing; + /** + * A CXChoice enumerator that specifies the editing priority policy. + * \sa CXGlobalOpt_ThreadBackgroundPriorityForEditing + */ + unsigned char ThreadBackgroundPriorityForEditing; + /** + * \see clang_createIndex() + */ + unsigned ExcludeDeclarationsFromPCH : 1; + /** + * \see clang_createIndex() + */ + unsigned DisplayDiagnostics : 1; + /** + * Store PCH in memory. If zero, PCH are stored in temporary files. + */ + unsigned StorePreamblesInMemory : 1; + unsigned /*Reserved*/ : 13; + + /** + * The path to a directory, in which to store temporary PCH files. If null or + * empty, the default system temporary directory is used. These PCH files are + * deleted on clean exit but stay on disk if the program crashes or is killed. + * + * This option is ignored if \a StorePreamblesInMemory is non-zero. + * + * Libclang does not create the directory at the specified path in the file + * system. Therefore it must exist, or storing PCH files will fail. + */ + const char *PreambleStoragePath; + /** + * Specifies a path which will contain log files for certain libclang + * invocations. A null value implies that libclang invocations are not logged. + */ + const char *InvocationEmissionPath; +} CXIndexOptions; + +/** + * Provides a shared context for creating translation units. + * + * Call this function instead of clang_createIndex() if you need to configure + * the additional options in CXIndexOptions. + * + * \returns The created index or null in case of error, such as an unsupported + * value of options->Size. + * + * For example: + * \code + * CXIndex createIndex(const char *ApplicationTemporaryPath) { + * const int ExcludeDeclarationsFromPCH = 1; + * const int DisplayDiagnostics = 1; + * CXIndex Idx; + * #if CINDEX_VERSION_MINOR >= 64 + * CXIndexOptions Opts; + * memset(&Opts, 0, sizeof(Opts)); + * Opts.Size = sizeof(CXIndexOptions); + * Opts.ThreadBackgroundPriorityForIndexing = 1; + * Opts.ExcludeDeclarationsFromPCH = ExcludeDeclarationsFromPCH; + * Opts.DisplayDiagnostics = DisplayDiagnostics; + * Opts.PreambleStoragePath = ApplicationTemporaryPath; + * Idx = clang_createIndexWithOptions(&Opts); + * if (Idx) + * return Idx; + * fprintf(stderr, + * "clang_createIndexWithOptions() failed. " + * "CINDEX_VERSION_MINOR = %d, sizeof(CXIndexOptions) = %u\n", + * CINDEX_VERSION_MINOR, Opts.Size); + * #else + * (void)ApplicationTemporaryPath; + * #endif + * Idx = clang_createIndex(ExcludeDeclarationsFromPCH, DisplayDiagnostics); + * clang_CXIndex_setGlobalOptions( + * Idx, clang_CXIndex_getGlobalOptions(Idx) | + * CXGlobalOpt_ThreadBackgroundPriorityForIndexing); + * return Idx; + * } + * \endcode + * + * \sa clang_createIndex() + */ +CINDEX_LINKAGE CXIndex +clang_createIndexWithOptions(const CXIndexOptions *options); + /** * Sets general options associated with a CXIndex. * + * This function is DEPRECATED. Set + * CXIndexOptions::ThreadBackgroundPriorityForIndexing and/or + * CXIndexOptions::ThreadBackgroundPriorityForEditing and call + * clang_createIndexWithOptions() instead. + * * For example: * \code * CXIndex idx = ...; @@ -327,6 +469,9 @@ CINDEX_LINKAGE void clang_CXIndex_setGlobalOptions(CXIndex, unsigned options); /** * Gets the general options associated with a CXIndex. * + * This function allows to obtain the final option values used by libclang after + * specifying the option policies via CXChoice enumerators. + * * \returns A bitmask of options, a bitwise OR of CXGlobalOpt_XXX flags that * are associated with the given CXIndex object. */ @@ -335,6 +480,9 @@ CINDEX_LINKAGE unsigned clang_CXIndex_getGlobalOptions(CXIndex); /** * Sets the invocation emission path option in a CXIndex. * + * This function is DEPRECATED. Set CXIndexOptions::InvocationEmissionPath and + * call clang_createIndexWithOptions() instead. + * * The invocation emission path specifies a path which will contain log * files for certain libclang invocations. A null value (default) implies that * libclang invocations are not logged.. @@ -2787,10 +2935,15 @@ enum CXTypeKind { CXType_OCLIntelSubgroupAVCImeResult = 169, CXType_OCLIntelSubgroupAVCRefResult = 170, CXType_OCLIntelSubgroupAVCSicResult = 171, + CXType_OCLIntelSubgroupAVCImeResultSingleReferenceStreamout = 172, + CXType_OCLIntelSubgroupAVCImeResultDualReferenceStreamout = 173, + CXType_OCLIntelSubgroupAVCImeSingleReferenceStreamin = 174, + CXType_OCLIntelSubgroupAVCImeDualReferenceStreamin = 175, + + /* Old aliases for AVC OpenCL extension types. */ CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = 172, CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = 173, CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = 174, - CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175, CXType_ExtVector = 176, @@ -2888,9 +3041,25 @@ CINDEX_LINKAGE unsigned long long clang_getEnumConstantDeclUnsignedValue(CXCursor C); /** - * Retrieve the bit width of a bit field declaration as an integer. + * Returns non-zero if the cursor specifies a Record member that is a bit-field. + */ +CINDEX_LINKAGE unsigned clang_Cursor_isBitField(CXCursor C); + +/** + * Retrieve the bit width of a bit-field declaration as an integer. + * + * If the cursor does not reference a bit-field, or if the bit-field's width + * expression cannot be evaluated, -1 is returned. * - * If a cursor that is not a bit field declaration is passed in, -1 is returned. + * For example: + * \code + * if (clang_Cursor_isBitField(Cursor)) { + * int Width = clang_getFieldDeclBitWidth(Cursor); + * if (Width != -1) { + * // The bit-field width is not value-dependent. + * } + * } + * \endcode */ CINDEX_LINKAGE int clang_getFieldDeclBitWidth(CXCursor C); @@ -3519,12 +3688,6 @@ CINDEX_LINKAGE CXType clang_Type_getTemplateArgumentAsType(CXType T, */ CINDEX_LINKAGE enum CXRefQualifierKind clang_Type_getCXXRefQualifier(CXType T); -/** - * Returns non-zero if the cursor specifies a Record member that is a - * bitfield. - */ -CINDEX_LINKAGE unsigned clang_Cursor_isBitField(CXCursor C); - /** * Returns 1 if the base class specified by the cursor with kind * CX_CXXBaseSpecifier is virtual. @@ -3697,8 +3860,6 @@ typedef enum CXChildVisitResult (*CXCursorVisitor)(CXCursor cursor, CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent, CXCursorVisitor visitor, CXClientData client_data); -#ifdef __has_feature -#if __has_feature(blocks) /** * Visitor invoked for each cursor found by a traversal. * @@ -3709,8 +3870,12 @@ CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent, * The visitor should return one of the \c CXChildVisitResult values * to direct clang_visitChildrenWithBlock(). */ +#if __has_feature(blocks) typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor, CXCursor parent); +#else +typedef struct _CXChildVisitResult *CXCursorVisitorBlock; +#endif /** * Visits the children of a cursor using the specified block. Behaves @@ -3718,8 +3883,6 @@ typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor, */ CINDEX_LINKAGE unsigned clang_visitChildrenWithBlock(CXCursor parent, CXCursorVisitorBlock block); -#endif -#endif /** * @} @@ -4343,6 +4506,51 @@ CINDEX_LINKAGE unsigned clang_CXXMethod_isCopyAssignmentOperator(CXCursor C); */ CINDEX_LINKAGE unsigned clang_CXXMethod_isMoveAssignmentOperator(CXCursor C); +/** + * Determines if a C++ constructor or conversion function was declared + * explicit, returning 1 if such is the case and 0 otherwise. + * + * Constructors or conversion functions are declared explicit through + * the use of the explicit specifier. + * + * For example, the following constructor and conversion function are + * not explicit as they lack the explicit specifier: + * + * class Foo { + * Foo(); + * operator int(); + * }; + * + * While the following constructor and conversion function are + * explicit as they are declared with the explicit specifier. + * + * class Foo { + * explicit Foo(); + * explicit operator int(); + * }; + * + * This function will return 0 when given a cursor pointing to one of + * the former declarations and it will return 1 for a cursor pointing + * to the latter declarations. + * + * The explicit specifier allows the user to specify a + * conditional compile-time expression whose value decides + * whether the marked element is explicit or not. + * + * For example: + * + * constexpr bool foo(int i) { return i % 2 == 0; } + * + * class Foo { + * explicit(foo(1)) Foo(); + * explicit(foo(2)) operator int(); + * } + * + * This function will return 0 for the constructor and 1 for + * the conversion function. + */ +CINDEX_LINKAGE unsigned clang_CXXMethod_isExplicit(CXCursor C); + /** * Determine if a C++ record is abstract, i.e. whether a class or struct * has a pure virtual member function. @@ -5675,11 +5883,12 @@ CINDEX_LINKAGE CXResult clang_findReferencesInFile( CINDEX_LINKAGE CXResult clang_findIncludesInFile( CXTranslationUnit TU, CXFile file, CXCursorAndRangeVisitor visitor); -#ifdef __has_feature #if __has_feature(blocks) - typedef enum CXVisitorResult (^CXCursorAndRangeVisitorBlock)(CXCursor, CXSourceRange); +#else +typedef struct _CXCursorAndRangeVisitorBlock *CXCursorAndRangeVisitorBlock; +#endif CINDEX_LINKAGE CXResult clang_findReferencesInFileWithBlock(CXCursor, CXFile, @@ -5689,9 +5898,6 @@ CINDEX_LINKAGE CXResult clang_findIncludesInFileWithBlock(CXTranslationUnit, CXFile, CXCursorAndRangeVisitorBlock); -#endif -#endif - /** * The client's data object that is associated with a CXFile. */ @@ -6304,6 +6510,144 @@ typedef enum CXVisitorResult (*CXFieldVisitor)(CXCursor C, CINDEX_LINKAGE unsigned clang_Type_visitFields(CXType T, CXFieldVisitor visitor, CXClientData client_data); +/** + * Describes the kind of binary operators. + */ +enum CXBinaryOperatorKind { + /** This value describes cursors which are not binary operators. */ + CXBinaryOperator_Invalid, + /** C++ Pointer - to - member operator. */ + CXBinaryOperator_PtrMemD, + /** C++ Pointer - to - member operator. */ + CXBinaryOperator_PtrMemI, + /** Multiplication operator. */ + CXBinaryOperator_Mul, + /** Division operator. */ + CXBinaryOperator_Div, + /** Remainder operator. */ + CXBinaryOperator_Rem, + /** Addition operator. */ + CXBinaryOperator_Add, + /** Subtraction operator. */ + CXBinaryOperator_Sub, + /** Bitwise shift left operator. */ + CXBinaryOperator_Shl, + /** Bitwise shift right operator. */ + CXBinaryOperator_Shr, + /** C++ three-way comparison (spaceship) operator. */ + CXBinaryOperator_Cmp, + /** Less than operator. */ + CXBinaryOperator_LT, + /** Greater than operator. */ + CXBinaryOperator_GT, + /** Less or equal operator. */ + CXBinaryOperator_LE, + /** Greater or equal operator. */ + CXBinaryOperator_GE, + /** Equal operator. */ + CXBinaryOperator_EQ, + /** Not equal operator. */ + CXBinaryOperator_NE, + /** Bitwise AND operator. */ + CXBinaryOperator_And, + /** Bitwise XOR operator. */ + CXBinaryOperator_Xor, + /** Bitwise OR operator. */ + CXBinaryOperator_Or, + /** Logical AND operator. */ + CXBinaryOperator_LAnd, + /** Logical OR operator. */ + CXBinaryOperator_LOr, + /** Assignment operator. */ + CXBinaryOperator_Assign, + /** Multiplication assignment operator. */ + CXBinaryOperator_MulAssign, + /** Division assignment operator. */ + CXBinaryOperator_DivAssign, + /** Remainder assignment operator. */ + CXBinaryOperator_RemAssign, + /** Addition assignment operator. */ + CXBinaryOperator_AddAssign, + /** Subtraction assignment operator. */ + CXBinaryOperator_SubAssign, + /** Bitwise shift left assignment operator. */ + CXBinaryOperator_ShlAssign, + /** Bitwise shift right assignment operator. */ + CXBinaryOperator_ShrAssign, + /** Bitwise AND assignment operator. */ + CXBinaryOperator_AndAssign, + /** Bitwise XOR assignment operator. */ + CXBinaryOperator_XorAssign, + /** Bitwise OR assignment operator. */ + CXBinaryOperator_OrAssign, + /** Comma operator. */ + CXBinaryOperator_Comma +}; + +/** + * Retrieve the spelling of a given CXBinaryOperatorKind. + */ +CINDEX_LINKAGE CXString +clang_getBinaryOperatorKindSpelling(enum CXBinaryOperatorKind kind); + +/** + * Retrieve the binary operator kind of this cursor. + * + * If this cursor is not a binary operator then returns Invalid. + */ +CINDEX_LINKAGE enum CXBinaryOperatorKind +clang_getCursorBinaryOperatorKind(CXCursor cursor); + +/** + * Describes the kind of unary operators. + */ +enum CXUnaryOperatorKind { + /** This value describes cursors which are not unary operators. */ + CXUnaryOperator_Invalid, + /** Postfix increment operator. */ + CXUnaryOperator_PostInc, + /** Postfix decrement operator. */ + CXUnaryOperator_PostDec, + /** Prefix increment operator. */ + CXUnaryOperator_PreInc, + /** Prefix decrement operator. */ + CXUnaryOperator_PreDec, + /** Address of operator. */ + CXUnaryOperator_AddrOf, + /** Dereference operator. */ + CXUnaryOperator_Deref, + /** Plus operator. */ + CXUnaryOperator_Plus, + /** Minus operator. */ + CXUnaryOperator_Minus, + /** Not operator. */ + CXUnaryOperator_Not, + /** LNot operator. */ + CXUnaryOperator_LNot, + /** "__real expr" operator. */ + CXUnaryOperator_Real, + /** "__imag expr" operator. */ + CXUnaryOperator_Imag, + /** __extension__ marker operator. */ + CXUnaryOperator_Extension, + /** C++ co_await operator. */ + CXUnaryOperator_Coawait +}; + +/** + * Retrieve the spelling of a given CXUnaryOperatorKind. + */ +CINDEX_LINKAGE CXString +clang_getUnaryOperatorKindSpelling(enum CXUnaryOperatorKind kind); + +/** + * Retrieve the unary operator kind of this cursor. + * + * If this cursor is not a unary operator then returns Invalid. + */ +CINDEX_LINKAGE enum CXUnaryOperatorKind +clang_getCursorUnaryOperatorKind(CXCursor cursor); + /** * @} */ diff --git a/cpp/llvm/include/clang-c/module.modulemap b/cpp/llvm/include/clang-c/module.modulemap deleted file mode 100644 index 95a59d6234..0000000000 --- a/cpp/llvm/include/clang-c/module.modulemap +++ /dev/null @@ -1,4 +0,0 @@ -module Clang_C { - umbrella "." - module * { export * } -} diff --git a/cpp/ycm/CMakeLists.txt b/cpp/ycm/CMakeLists.txt index 0b6a3cd7b6..1bad2a6bef 100644 --- a/cpp/ycm/CMakeLists.txt +++ b/cpp/ycm/CMakeLists.txt @@ -30,41 +30,41 @@ if ( USE_CLANG_COMPLETER AND NOT PATH_TO_LLVM_ROOT AND NOT EXTERNAL_LIBCLANG_PATH ) - set( CLANG_VERSION 16.0.1 ) + set( CLANG_VERSION 17.0.1 ) if ( APPLE ) if ( "${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "arm64" ) set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-arm64-apple-darwin" ) set( LIBCLANG_SHA256 - "3fd9230f591fc2cb081b3088d0b640b3692812adc59d03fb015441a65c68c328" ) + "e90a409dc408214fc553e3b3df2a71f6d67fdd34d9441b6c2be1a043e9542f06" ) else() set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-x86_64-apple-darwin" ) set( LIBCLANG_SHA256 - "43f7e4e72bc1d661eb01ee61666ee3a62a97d2993586c0b98efa6f46a96e768f" ) + "b70786d68e71b5988fda8c7c377e301a0817ea280f425639e976a573ef266473" ) endif() elseif ( WIN32 ) if( 64_BIT_PLATFORM ) set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-win64" ) set( LIBCLANG_SHA256 - "06280c023ff339af29d68ea66366507607b03d01061d3d7066875b2ff4f78c29" ) + "7bbb980c2bc5a69ca1b93b8a6a671abb1ad8cab5a5b9f7fff6f7fa300fc1bf07" ) else() set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-win32" ) set( LIBCLANG_SHA256 - "8f424ac12623638d8f8fa5f410499fe02a4a4ea9d1e02facdc484db9f1b0f4d8" ) + "ef50790e2b01bfb701cd14ec315431a60da7921fc78ac893c0af0b956d6e2223" ) endif() elseif ( CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)" ) set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-aarch64-linux-gnu" ) set( LIBCLANG_SHA256 - "518725f324e425cc3d0eafd1897cbf9cc35d9e442983b5efaa19112b73ae0ebf" ) + "829e4b81d9fddd70ed8bcbeffd1feea909369434b225612148e833fb9b16265b" ) elseif ( CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)" ) set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-armv7a-linux-gnueabihf" ) set( LIBCLANG_SHA256 - "5b45929923ec241bec18b714765554eb31365c4c90bf58529555665edd79a2f1" ) + "fdc3df9ef3fe15868340bc0dcd4d0c74814edd06be1d79796b8a402db8aee723" ) elseif ( CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64)" ) set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-x86_64-unknown-linux-gnu" ) set( LIBCLANG_SHA256 - "e0c69d229f6dd91d0530508fa28250f658cb27d7b8825394bf539f8cc1db8c9c" ) + "bd1ab9ab8e8ccdb46064178bc54a45e7e980b5451cff4fa468596a414e1f7b46" ) else() message( FATAL_ERROR "No prebuilt Clang ${CLANG_VERSION} binaries for this system. " @@ -418,6 +418,12 @@ endif() ############################################################################# +if ( CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12 ) + target_compile_options( ${PROJECT_NAME} PRIVATE "-Wno-bidi-chars" ) +endif() + +############################################################################# + if( SYSTEM_IS_SUNOS ) # SunOS needs this setting for thread support target_compile_options( ${PROJECT_NAME} PUBLIC "-pthreads" ) diff --git a/third_party/clang/lib/clang/16.0.0/include/adxintrin.h b/third_party/clang/lib/clang/16.0.0/include/adxintrin.h deleted file mode 100644 index 72b9ed08f4..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/adxintrin.h +++ /dev/null @@ -1,72 +0,0 @@ -/*===---- adxintrin.h - ADX intrinsics -------------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __ADXINTRIN_H -#define __ADXINTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) - -/* Intrinsics that are available only if __ADX__ defined */ -static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx"))) -_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y, - unsigned int *__p) -{ - return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p); -} - -#ifdef __x86_64__ -static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx"))) -_addcarryx_u64(unsigned char __cf, unsigned long long __x, - unsigned long long __y, unsigned long long *__p) -{ - return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); -} -#endif - -/* Intrinsics that are also available if __ADX__ undefined */ -static __inline unsigned char __DEFAULT_FN_ATTRS -_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y, - unsigned int *__p) -{ - return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p); -} - -#ifdef __x86_64__ -static __inline unsigned char __DEFAULT_FN_ATTRS -_addcarry_u64(unsigned char __cf, unsigned long long __x, - unsigned long long __y, unsigned long long *__p) -{ - return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); -} -#endif - -static __inline unsigned char __DEFAULT_FN_ATTRS -_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y, - unsigned int *__p) -{ - return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p); -} - -#ifdef __x86_64__ -static __inline unsigned char __DEFAULT_FN_ATTRS -_subborrow_u64(unsigned char __cf, unsigned long long __x, - unsigned long long __y, unsigned long long *__p) -{ - return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p); -} -#endif - -#undef __DEFAULT_FN_ATTRS - -#endif /* __ADXINTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/avx2intrin.h b/third_party/clang/lib/clang/16.0.0/include/avx2intrin.h deleted file mode 100644 index f8521e7d72..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/avx2intrin.h +++ /dev/null @@ -1,1148 +0,0 @@ -/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __AVX2INTRIN_H -#define __AVX2INTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256))) -#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128))) - -/* SSE4 Multiple Packed Sums of Absolute Difference. */ -#define _mm256_mpsadbw_epu8(X, Y, M) \ - ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \ - (__v32qi)(__m256i)(Y), (int)(M))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_abs_epi8(__m256i __a) -{ - return (__m256i)__builtin_elementwise_abs((__v32qs)__a); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_abs_epi16(__m256i __a) -{ - return (__m256i)__builtin_elementwise_abs((__v16hi)__a); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_abs_epi32(__m256i __a) -{ - return (__m256i)__builtin_elementwise_abs((__v8si)__a); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_packs_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_packs_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_packus_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_packus_epi32(__m256i __V1, __m256i __V2) -{ - return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_add_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)((__v32qu)__a + (__v32qu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_add_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)((__v16hu)__a + (__v16hu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_add_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)((__v8su)__a + (__v8su)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_add_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4du)__a + (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_adds_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_adds_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_adds_epu8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_adds_epu16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b); -} - -#define _mm256_alignr_epi8(a, b, n) \ - ((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \ - (__v32qi)(__m256i)(b), (n))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_and_si256(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4du)__a & (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_andnot_si256(__m256i __a, __m256i __b) -{ - return (__m256i)(~(__v4du)__a & (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_avg_epu8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_avg_epu16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M) -{ - return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2, - (__v32qi)__M); -} - -#define _mm256_blend_epi16(V1, V2, M) \ - ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \ - (__v16hi)(__m256i)(V2), (int)(M))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpeq_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)((__v32qi)__a == (__v32qi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpeq_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)((__v16hi)__a == (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpeq_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)((__v8si)__a == (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpeq_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4di)__a == (__v4di)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpgt_epi8(__m256i __a, __m256i __b) -{ - /* This function always performs a signed comparison, but __v32qi is a char - which may be signed or unsigned, so use __v32qs. */ - return (__m256i)((__v32qs)__a > (__v32qs)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpgt_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)((__v16hi)__a > (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpgt_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)((__v8si)__a > (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cmpgt_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4di)__a > (__v4di)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_hadd_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_hadd_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_hadds_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_hsub_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_hsub_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_hsubs_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maddubs_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_madd_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_max_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_max_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_max_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_max_epu8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_max_epu16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_max_epu32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_min_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_min_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_min_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_min_epu8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_min_epu16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_min_epu32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b); -} - -static __inline__ int __DEFAULT_FN_ATTRS256 -_mm256_movemask_epi8(__m256i __a) -{ - return __builtin_ia32_pmovmskb256((__v32qi)__a); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi8_epi16(__m128i __V) -{ - /* This function always performs a signed extension, but __v16qi is a char - which may be signed or unsigned, so use __v16qs. */ - return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi8_epi32(__m128i __V) -{ - /* This function always performs a signed extension, but __v16qi is a char - which may be signed or unsigned, so use __v16qs. */ - return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi8_epi64(__m128i __V) -{ - /* This function always performs a signed extension, but __v16qi is a char - which may be signed or unsigned, so use __v16qs. */ - return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi16_epi32(__m128i __V) -{ - return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi16_epi64(__m128i __V) -{ - return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepi32_epi64(__m128i __V) -{ - return (__m256i)__builtin_convertvector((__v4si)__V, __v4di); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepu8_epi16(__m128i __V) -{ - return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepu8_epi32(__m128i __V) -{ - return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepu8_epi64(__m128i __V) -{ - return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepu16_epi32(__m128i __V) -{ - return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepu16_epi64(__m128i __V) -{ - return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_cvtepu32_epi64(__m128i __V) -{ - return (__m256i)__builtin_convertvector((__v4su)__V, __v4di); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mul_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mulhrs_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mulhi_epu16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mulhi_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mullo_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)((__v16hu)__a * (__v16hu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mullo_epi32 (__m256i __a, __m256i __b) -{ - return (__m256i)((__v8su)__a * (__v8su)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_mul_epu32(__m256i __a, __m256i __b) -{ - return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_or_si256(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4du)__a | (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sad_epu8(__m256i __a, __m256i __b) -{ - return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_shuffle_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b); -} - -#define _mm256_shuffle_epi32(a, imm) \ - ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm))) - -#define _mm256_shufflehi_epi16(a, imm) \ - ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm))) - -#define _mm256_shufflelo_epi16(a, imm) \ - ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sign_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sign_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sign_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b); -} - -#define _mm256_slli_si256(a, imm) \ - ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))) - -#define _mm256_bslli_epi128(a, imm) \ - ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_slli_epi16(__m256i __a, int __count) -{ - return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sll_epi16(__m256i __a, __m128i __count) -{ - return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_slli_epi32(__m256i __a, int __count) -{ - return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sll_epi32(__m256i __a, __m128i __count) -{ - return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_slli_epi64(__m256i __a, int __count) -{ - return __builtin_ia32_psllqi256((__v4di)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sll_epi64(__m256i __a, __m128i __count) -{ - return __builtin_ia32_psllq256((__v4di)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srai_epi16(__m256i __a, int __count) -{ - return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sra_epi16(__m256i __a, __m128i __count) -{ - return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srai_epi32(__m256i __a, int __count) -{ - return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sra_epi32(__m256i __a, __m128i __count) -{ - return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count); -} - -#define _mm256_srli_si256(a, imm) \ - ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))) - -#define _mm256_bsrli_epi128(a, imm) \ - ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srli_epi16(__m256i __a, int __count) -{ - return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srl_epi16(__m256i __a, __m128i __count) -{ - return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srli_epi32(__m256i __a, int __count) -{ - return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srl_epi32(__m256i __a, __m128i __count) -{ - return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srli_epi64(__m256i __a, int __count) -{ - return __builtin_ia32_psrlqi256((__v4di)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srl_epi64(__m256i __a, __m128i __count) -{ - return __builtin_ia32_psrlq256((__v4di)__a, __count); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sub_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)((__v32qu)__a - (__v32qu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sub_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)((__v16hu)__a - (__v16hu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sub_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)((__v8su)__a - (__v8su)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sub_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4du)__a - (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_subs_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_subs_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_subs_epu8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_subs_epu16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_unpackhi_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_unpackhi_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_unpackhi_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_unpackhi_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_unpacklo_epi8(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_unpacklo_epi16(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_unpacklo_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_unpacklo_epi64(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_xor_si256(__m256i __a, __m256i __b) -{ - return (__m256i)((__v4du)__a ^ (__v4du)__b); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_stream_load_si256(__m256i const *__V) -{ - typedef __v4di __v4di_aligned __attribute__((aligned(32))); - return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_broadcastss_ps(__m128 __X) -{ - return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_broadcastsd_pd(__m128d __a) -{ - return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); -} - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_broadcastss_ps(__m128 __X) -{ - return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0); -} - -static __inline__ __m256d __DEFAULT_FN_ATTRS256 -_mm256_broadcastsd_pd(__m128d __X) -{ - return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_broadcastsi128_si256(__m128i __X) -{ - return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1); -} - -#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X) - -#define _mm_blend_epi32(V1, V2, M) \ - ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \ - (__v4si)(__m128i)(V2), (int)(M))) - -#define _mm256_blend_epi32(V1, V2, M) \ - ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \ - (__v8si)(__m256i)(V2), (int)(M))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_broadcastb_epi8(__m128i __X) -{ - return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_broadcastw_epi16(__m128i __X) -{ - return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_broadcastd_epi32(__m128i __X) -{ - return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_broadcastq_epi64(__m128i __X) -{ - return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_broadcastb_epi8(__m128i __X) -{ - return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_broadcastw_epi16(__m128i __X) -{ - return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0); -} - - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_broadcastd_epi32(__m128i __X) -{ - return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_broadcastq_epi64(__m128i __X) -{ - return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b) -{ - return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b); -} - -#define _mm256_permute4x64_pd(V, M) \ - ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M))) - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_permutevar8x32_ps(__m256 __a, __m256i __b) -{ - return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b); -} - -#define _mm256_permute4x64_epi64(V, M) \ - ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M))) - -#define _mm256_permute2x128_si256(V1, V2, M) \ - ((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M))) - -#define _mm256_extracti128_si256(V, M) \ - ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M))) - -#define _mm256_inserti128_si256(V1, V2, M) \ - ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \ - (__v2di)(__m128i)(V2), (int)(M))) - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maskload_epi32(int const *__X, __m256i __M) -{ - return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_maskload_epi64(long long const *__X, __m256i __M) -{ - return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_maskload_epi32(int const *__X, __m128i __M) -{ - return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_maskload_epi64(long long const *__X, __m128i __M) -{ - return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M); -} - -static __inline__ void __DEFAULT_FN_ATTRS256 -_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y) -{ - __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y); -} - -static __inline__ void __DEFAULT_FN_ATTRS256 -_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y) -{ - __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y); -} - -static __inline__ void __DEFAULT_FN_ATTRS128 -_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y) -{ - __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y); -} - -static __inline__ void __DEFAULT_FN_ATTRS128 -_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y) -{ - __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sllv_epi32(__m256i __X, __m256i __Y) -{ - return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_sllv_epi32(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_sllv_epi64(__m256i __X, __m256i __Y) -{ - return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_sllv_epi64(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srav_epi32(__m256i __X, __m256i __Y) -{ - return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_srav_epi32(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srlv_epi32(__m256i __X, __m256i __Y) -{ - return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_srlv_epi32(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y); -} - -static __inline__ __m256i __DEFAULT_FN_ATTRS256 -_mm256_srlv_epi64(__m256i __X, __m256i __Y) -{ - return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS128 -_mm_srlv_epi64(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y); -} - -#define _mm_mask_i32gather_pd(a, m, i, mask, s) \ - ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \ - (double const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v2df)(__m128d)(mask), (s))) - -#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \ - ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \ - (double const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v4df)(__m256d)(mask), (s))) - -#define _mm_mask_i64gather_pd(a, m, i, mask, s) \ - ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \ - (double const *)(m), \ - (__v2di)(__m128i)(i), \ - (__v2df)(__m128d)(mask), (s))) - -#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \ - ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \ - (double const *)(m), \ - (__v4di)(__m256i)(i), \ - (__v4df)(__m256d)(mask), (s))) - -#define _mm_mask_i32gather_ps(a, m, i, mask, s) \ - ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \ - (float const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v4sf)(__m128)(mask), (s))) - -#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \ - ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \ - (float const *)(m), \ - (__v8si)(__m256i)(i), \ - (__v8sf)(__m256)(mask), (s))) - -#define _mm_mask_i64gather_ps(a, m, i, mask, s) \ - ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \ - (float const *)(m), \ - (__v2di)(__m128i)(i), \ - (__v4sf)(__m128)(mask), (s))) - -#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \ - ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \ - (float const *)(m), \ - (__v4di)(__m256i)(i), \ - (__v4sf)(__m128)(mask), (s))) - -#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \ - ((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \ - (int const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v4si)(__m128i)(mask), (s))) - -#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \ - ((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \ - (int const *)(m), \ - (__v8si)(__m256i)(i), \ - (__v8si)(__m256i)(mask), (s))) - -#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \ - ((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \ - (int const *)(m), \ - (__v2di)(__m128i)(i), \ - (__v4si)(__m128i)(mask), (s))) - -#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \ - ((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \ - (int const *)(m), \ - (__v4di)(__m256i)(i), \ - (__v4si)(__m128i)(mask), (s))) - -#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \ - ((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \ - (long long const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v2di)(__m128i)(mask), (s))) - -#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \ - ((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \ - (long long const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v4di)(__m256i)(mask), (s))) - -#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \ - ((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \ - (long long const *)(m), \ - (__v2di)(__m128i)(i), \ - (__v2di)(__m128i)(mask), (s))) - -#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \ - ((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \ - (long long const *)(m), \ - (__v4di)(__m256i)(i), \ - (__v4di)(__m256i)(mask), (s))) - -#define _mm_i32gather_pd(m, i, s) \ - ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \ - (double const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ - _mm_setzero_pd()), \ - (s))) - -#define _mm256_i32gather_pd(m, i, s) \ - ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \ - (double const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ - _mm256_setzero_pd(), \ - _CMP_EQ_OQ), \ - (s))) - -#define _mm_i64gather_pd(m, i, s) \ - ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \ - (double const *)(m), \ - (__v2di)(__m128i)(i), \ - (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ - _mm_setzero_pd()), \ - (s))) - -#define _mm256_i64gather_pd(m, i, s) \ - ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \ - (double const *)(m), \ - (__v4di)(__m256i)(i), \ - (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ - _mm256_setzero_pd(), \ - _CMP_EQ_OQ), \ - (s))) - -#define _mm_i32gather_ps(m, i, s) \ - ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \ - (float const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ - _mm_setzero_ps()), \ - (s))) - -#define _mm256_i32gather_ps(m, i, s) \ - ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \ - (float const *)(m), \ - (__v8si)(__m256i)(i), \ - (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \ - _mm256_setzero_ps(), \ - _CMP_EQ_OQ), \ - (s))) - -#define _mm_i64gather_ps(m, i, s) \ - ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \ - (float const *)(m), \ - (__v2di)(__m128i)(i), \ - (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ - _mm_setzero_ps()), \ - (s))) - -#define _mm256_i64gather_ps(m, i, s) \ - ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \ - (float const *)(m), \ - (__v4di)(__m256i)(i), \ - (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ - _mm_setzero_ps()), \ - (s))) - -#define _mm_i32gather_epi32(m, i, s) \ - ((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \ - (int const *)(m), (__v4si)(__m128i)(i), \ - (__v4si)_mm_set1_epi32(-1), (s))) - -#define _mm256_i32gather_epi32(m, i, s) \ - ((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \ - (int const *)(m), (__v8si)(__m256i)(i), \ - (__v8si)_mm256_set1_epi32(-1), (s))) - -#define _mm_i64gather_epi32(m, i, s) \ - ((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \ - (int const *)(m), (__v2di)(__m128i)(i), \ - (__v4si)_mm_set1_epi32(-1), (s))) - -#define _mm256_i64gather_epi32(m, i, s) \ - ((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \ - (int const *)(m), (__v4di)(__m256i)(i), \ - (__v4si)_mm_set1_epi32(-1), (s))) - -#define _mm_i32gather_epi64(m, i, s) \ - ((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \ - (long long const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v2di)_mm_set1_epi64x(-1), (s))) - -#define _mm256_i32gather_epi64(m, i, s) \ - ((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \ - (long long const *)(m), \ - (__v4si)(__m128i)(i), \ - (__v4di)_mm256_set1_epi64x(-1), (s))) - -#define _mm_i64gather_epi64(m, i, s) \ - ((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \ - (long long const *)(m), \ - (__v2di)(__m128i)(i), \ - (__v2di)_mm_set1_epi64x(-1), (s))) - -#define _mm256_i64gather_epi64(m, i, s) \ - ((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \ - (long long const *)(m), \ - (__v4di)(__m256i)(i), \ - (__v4di)_mm256_set1_epi64x(-1), (s))) - -#undef __DEFAULT_FN_ATTRS256 -#undef __DEFAULT_FN_ATTRS128 - -#endif /* __AVX2INTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/bmi2intrin.h b/third_party/clang/lib/clang/16.0.0/include/bmi2intrin.h deleted file mode 100644 index 0b56aed5f4..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/bmi2intrin.h +++ /dev/null @@ -1,81 +0,0 @@ -/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __BMI2INTRIN_H -#define __BMI2INTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2"))) - -static __inline__ unsigned int __DEFAULT_FN_ATTRS -_bzhi_u32(unsigned int __X, unsigned int __Y) -{ - return __builtin_ia32_bzhi_si(__X, __Y); -} - -static __inline__ unsigned int __DEFAULT_FN_ATTRS -_pdep_u32(unsigned int __X, unsigned int __Y) -{ - return __builtin_ia32_pdep_si(__X, __Y); -} - -static __inline__ unsigned int __DEFAULT_FN_ATTRS -_pext_u32(unsigned int __X, unsigned int __Y) -{ - return __builtin_ia32_pext_si(__X, __Y); -} - -#ifdef __x86_64__ - -static __inline__ unsigned long long __DEFAULT_FN_ATTRS -_bzhi_u64(unsigned long long __X, unsigned long long __Y) -{ - return __builtin_ia32_bzhi_di(__X, __Y); -} - -static __inline__ unsigned long long __DEFAULT_FN_ATTRS -_pdep_u64(unsigned long long __X, unsigned long long __Y) -{ - return __builtin_ia32_pdep_di(__X, __Y); -} - -static __inline__ unsigned long long __DEFAULT_FN_ATTRS -_pext_u64(unsigned long long __X, unsigned long long __Y) -{ - return __builtin_ia32_pext_di(__X, __Y); -} - -static __inline__ unsigned long long __DEFAULT_FN_ATTRS -_mulx_u64 (unsigned long long __X, unsigned long long __Y, - unsigned long long *__P) -{ - unsigned __int128 __res = (unsigned __int128) __X * __Y; - *__P = (unsigned long long) (__res >> 64); - return (unsigned long long) __res; -} - -#else /* !__x86_64__ */ - -static __inline__ unsigned int __DEFAULT_FN_ATTRS -_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P) -{ - unsigned long long __res = (unsigned long long) __X * __Y; - *__P = (unsigned int) (__res >> 32); - return (unsigned int) __res; -} - -#endif /* !__x86_64__ */ - -#undef __DEFAULT_FN_ATTRS - -#endif /* __BMI2INTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/fmaintrin.h b/third_party/clang/lib/clang/16.0.0/include/fmaintrin.h deleted file mode 100644 index d889b7c5e2..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/fmaintrin.h +++ /dev/null @@ -1,216 +0,0 @@ -/*===---- fmaintrin.h - FMA intrinsics -------------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __FMAINTRIN_H -#define __FMAINTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128))) -#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256))) - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); -} - -static __inline__ __m128 __DEFAULT_FN_ATTRS128 -_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) -{ - return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); -} - -static __inline__ __m128d __DEFAULT_FN_ATTRS128 -_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C) -{ - return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); -} - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C) -{ - return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); -} - -static __inline__ __m256d __DEFAULT_FN_ATTRS256 -_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) -{ - return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); -} - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C) -{ - return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); -} - -static __inline__ __m256d __DEFAULT_FN_ATTRS256 -_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) -{ - return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); -} - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C) -{ - return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); -} - -static __inline__ __m256d __DEFAULT_FN_ATTRS256 -_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) -{ - return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C); -} - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C) -{ - return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); -} - -static __inline__ __m256d __DEFAULT_FN_ATTRS256 -_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) -{ - return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C); -} - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) -{ - return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); -} - -static __inline__ __m256d __DEFAULT_FN_ATTRS256 -_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) -{ - return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); -} - -static __inline__ __m256 __DEFAULT_FN_ATTRS256 -_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) -{ - return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); -} - -static __inline__ __m256d __DEFAULT_FN_ATTRS256 -_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C) -{ - return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); -} - -#undef __DEFAULT_FN_ATTRS128 -#undef __DEFAULT_FN_ATTRS256 - -#endif /* __FMAINTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/hlsl/hlsl_intrinsics.h b/third_party/clang/lib/clang/16.0.0/include/hlsl/hlsl_intrinsics.h deleted file mode 100644 index d811a28a43..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/hlsl/hlsl_intrinsics.h +++ /dev/null @@ -1,223 +0,0 @@ -//===----- hlsl_intrinsics.h - HLSL definitions for intrinsics ----------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _HLSL_HLSL_INTRINSICS_H_ -#define _HLSL_HLSL_INTRINSICS_H_ - -namespace hlsl { - -__attribute__((availability(shadermodel, introduced = 6.0))) -__attribute__((clang_builtin_alias(__builtin_hlsl_wave_active_count_bits))) uint -WaveActiveCountBits(bool bBit); - -// abs builtins -#ifdef __HLSL_ENABLE_16_BIT -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -int16_t abs(int16_t); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -int16_t2 abs(int16_t2); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -int16_t3 abs(int16_t3); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -int16_t4 abs(int16_t4); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) half abs(half); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -half2 abs(half2); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -half3 abs(half3); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -half4 abs(half4); -#endif - -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int abs(int); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int2 abs(int2); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int3 abs(int3); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int4 abs(int4); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) float -abs(float); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -float2 abs(float2); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -float3 abs(float3); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -float4 abs(float4); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -int64_t abs(int64_t); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -int64_t2 abs(int64_t2); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -int64_t3 abs(int64_t3); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -int64_t4 abs(int64_t4); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) double -abs(double); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -double2 abs(double2); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -double3 abs(double3); -__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) -double4 abs(double4); - -// sqrt builtins -__attribute__((clang_builtin_alias(__builtin_sqrt))) double sqrt(double In); -__attribute__((clang_builtin_alias(__builtin_sqrtf))) float sqrt(float In); - -#ifdef __HLSL_ENABLE_16_BIT -__attribute__((clang_builtin_alias(__builtin_sqrtf16))) half sqrt(half In); -#endif - -// ceil builtins -#ifdef __HLSL_ENABLE_16_BIT -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -half ceil(half); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -half2 ceil(half2); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -half3 ceil(half3); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -half4 ceil(half4); -#endif - -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) float -ceil(float); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -float2 ceil(float2); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -float3 ceil(float3); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -float4 ceil(float4); - -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) double -ceil(double); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -double2 ceil(double2); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -double3 ceil(double3); -__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) -double4 ceil(double4); - -// floor builtins -#ifdef __HLSL_ENABLE_16_BIT -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -half floor(half); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -half2 floor(half2); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -half3 floor(half3); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -half4 floor(half4); -#endif - -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) float -floor(float); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -float2 floor(float2); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -float3 floor(float3); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -float4 floor(float4); - -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) double -floor(double); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -double2 floor(double2); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -double3 floor(double3); -__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) -double4 floor(double4); - -// cos builtins -#ifdef __HLSL_ENABLE_16_BIT -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) half cos(half); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -half2 cos(half2); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -half3 cos(half3); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -half4 cos(half4); -#endif - -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) float -cos(float); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -float2 cos(float2); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -float3 cos(float3); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -float4 cos(float4); - -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) double -cos(double); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -double2 cos(double2); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -double3 cos(double3); -__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) -double4 cos(double4); - -// sin builtins -#ifdef __HLSL_ENABLE_16_BIT -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) half sin(half); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -half2 sin(half2); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -half3 sin(half3); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -half4 sin(half4); -#endif - -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) float -sin(float); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -float2 sin(float2); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -float3 sin(float3); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -float4 sin(float4); - -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) double -sin(double); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -double2 sin(double2); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -double3 sin(double3); -__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) -double4 sin(double4); - -// trunc builtins -#ifdef __HLSL_ENABLE_16_BIT -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -half trunc(half); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -half2 trunc(half2); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -half3 trunc(half3); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -half4 trunc(half4); -#endif - -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) float -trunc(float); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -float2 trunc(float2); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -float3 trunc(float3); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -float4 trunc(float4); - -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) double -trunc(double); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -double2 trunc(double2); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -double3 trunc(double3); -__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) -double4 trunc(double4); - -} // namespace hlsl -#endif //_HLSL_HLSL_INTRINSICS_H_ diff --git a/third_party/clang/lib/clang/16.0.0/include/mwaitxintrin.h b/third_party/clang/lib/clang/16.0.0/include/mwaitxintrin.h deleted file mode 100644 index ed485380af..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/mwaitxintrin.h +++ /dev/null @@ -1,33 +0,0 @@ -/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -#ifndef __X86INTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __MWAITXINTRIN_H -#define __MWAITXINTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx"))) -static __inline__ void __DEFAULT_FN_ATTRS -_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints) -{ - __builtin_ia32_monitorx(__p, __extensions, __hints); -} - -static __inline__ void __DEFAULT_FN_ATTRS -_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock) -{ - __builtin_ia32_mwaitx(__extensions, __hints, __clock); -} - -#undef __DEFAULT_FN_ATTRS - -#endif /* __MWAITXINTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/rdseedintrin.h b/third_party/clang/lib/clang/16.0.0/include/rdseedintrin.h deleted file mode 100644 index 405bc2451e..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/rdseedintrin.h +++ /dev/null @@ -1,42 +0,0 @@ -/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __RDSEEDINTRIN_H -#define __RDSEEDINTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed"))) - -static __inline__ int __DEFAULT_FN_ATTRS -_rdseed16_step(unsigned short *__p) -{ - return (int) __builtin_ia32_rdseed16_step(__p); -} - -static __inline__ int __DEFAULT_FN_ATTRS -_rdseed32_step(unsigned int *__p) -{ - return (int) __builtin_ia32_rdseed32_step(__p); -} - -#ifdef __x86_64__ -static __inline__ int __DEFAULT_FN_ATTRS -_rdseed64_step(unsigned long long *__p) -{ - return (int) __builtin_ia32_rdseed64_step(__p); -} -#endif - -#undef __DEFAULT_FN_ATTRS - -#endif /* __RDSEEDINTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/shaintrin.h b/third_party/clang/lib/clang/16.0.0/include/shaintrin.h deleted file mode 100644 index 08b1fb1dc1..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/shaintrin.h +++ /dev/null @@ -1,61 +0,0 @@ -/*===---- shaintrin.h - SHA intrinsics -------------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __SHAINTRIN_H -#define __SHAINTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128))) - -#define _mm_sha1rnds4_epu32(V1, V2, M) \ - __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)) - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_sha1nexte_epu32(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_sha1msg1_epu32(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_sha1msg2_epu32(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z) -{ - return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_sha256msg1_epu32(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y); -} - -static __inline__ __m128i __DEFAULT_FN_ATTRS -_mm_sha256msg2_epu32(__m128i __X, __m128i __Y) -{ - return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y); -} - -#undef __DEFAULT_FN_ATTRS - -#endif /* __SHAINTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/xsavecintrin.h b/third_party/clang/lib/clang/16.0.0/include/xsavecintrin.h deleted file mode 100644 index 5524947fa9..0000000000 --- a/third_party/clang/lib/clang/16.0.0/include/xsavecintrin.h +++ /dev/null @@ -1,34 +0,0 @@ -/*===---- xsavecintrin.h - XSAVEC intrinsic --------------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ - -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __XSAVECINTRIN_H -#define __XSAVECINTRIN_H - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec"))) - -static __inline__ void __DEFAULT_FN_ATTRS -_xsavec(void *__p, unsigned long long __m) { - __builtin_ia32_xsavec(__p, __m); -} - -#ifdef __x86_64__ -static __inline__ void __DEFAULT_FN_ATTRS -_xsavec64(void *__p, unsigned long long __m) { - __builtin_ia32_xsavec64(__p, __m); -} -#endif - -#undef __DEFAULT_FN_ATTRS - -#endif diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_builtin_vars.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_builtin_vars.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_builtin_vars.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_builtin_vars.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_cmath.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_cmath.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_cmath.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_cmath.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_complex_builtins.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_complex_builtins.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_complex_builtins.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_complex_builtins.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_device_functions.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_device_functions.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_device_functions.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_device_functions.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_intrinsics.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_intrinsics.h similarity index 76% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_intrinsics.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_intrinsics.h index b87413e12a..3c3948863c 100644 --- a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_intrinsics.h +++ b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_intrinsics.h @@ -513,6 +513,197 @@ __device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) { return __nv_cvta_generic_to_shared_impl(__ptr); } } // extern "C" + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 +__device__ inline unsigned __reduce_add_sync(unsigned __mask, + unsigned __value) { + return __nvvm_redux_sync_add(__mask, __value); +} +__device__ inline unsigned __reduce_min_sync(unsigned __mask, + unsigned __value) { + return __nvvm_redux_sync_umin(__mask, __value); +} +__device__ inline unsigned __reduce_max_sync(unsigned __mask, + unsigned __value) { + return __nvvm_redux_sync_umax(__mask, __value); +} +__device__ inline int __reduce_min_sync(unsigned __mask, int __value) { + return __nvvm_redux_sync_min(__mask, __value); +} +__device__ inline int __reduce_max_sync(unsigned __mask, int __value) { + return __nvvm_redux_sync_max(__mask, __value); +} +__device__ inline unsigned __reduce_or_sync(unsigned __mask, unsigned __value) { + return __nvvm_redux_sync_or(__mask, __value); +} +__device__ inline unsigned __reduce_and_sync(unsigned __mask, + unsigned __value) { + return __nvvm_redux_sync_and(__mask, __value); +} +__device__ inline unsigned __reduce_xor_sync(unsigned __mask, + unsigned __value) { + return __nvvm_redux_sync_xor(__mask, __value); +} + +__device__ inline void __nv_memcpy_async_shared_global_4(void *__dst, + const void *__src, + unsigned __src_size) { + __nvvm_cp_async_ca_shared_global_4( + (void __attribute__((address_space(3))) *)__dst, + (const void __attribute__((address_space(1))) *)__src, __src_size); +} +__device__ inline void __nv_memcpy_async_shared_global_8(void *__dst, + const void *__src, + unsigned __src_size) { + __nvvm_cp_async_ca_shared_global_8( + (void __attribute__((address_space(3))) *)__dst, + (const void __attribute__((address_space(1))) *)__src, __src_size); +} +__device__ inline void __nv_memcpy_async_shared_global_16(void *__dst, + const void *__src, + unsigned __src_size) { + __nvvm_cp_async_ca_shared_global_16( + (void __attribute__((address_space(3))) *)__dst, + (const void __attribute__((address_space(1))) *)__src, __src_size); +} + +__device__ inline void * +__nv_associate_access_property(const void *__ptr, unsigned long long __prop) { + // TODO: it appears to provide compiler with some sort of a hint. We do not + // know what exactly it is supposed to do. However, CUDA headers suggest that + // just passing through __ptr should not affect correctness. They do so on + // pre-sm80 GPUs where this builtin is not available. + return (void*)__ptr; +} +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900 +__device__ inline unsigned __isCtaShared(const void *ptr) { + return __isShared(ptr); +} + +__device__ inline unsigned __isClusterShared(const void *__ptr) { + return __nvvm_isspacep_shared_cluster(__ptr); +} + +__device__ inline void *__cluster_map_shared_rank(const void *__ptr, + unsigned __rank) { + return __nvvm_mapa((void *)__ptr, __rank); +} + +__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) { + return __nvvm_getctarank((void *)__ptr); +} + +__device__ inline uint2 +__cluster_map_shared_multicast(const void *__ptr, + unsigned int __cluster_cta_mask) { + return make_uint2((unsigned)__cvta_generic_to_shared(__ptr), + __cluster_cta_mask); +} + +__device__ inline unsigned __clusterDimIsSpecified() { + return __nvvm_is_explicit_cluster(); +} + +__device__ inline dim3 __clusterDim() { + return dim3(__nvvm_read_ptx_sreg_cluster_nctaid_x(), + __nvvm_read_ptx_sreg_cluster_nctaid_y(), + __nvvm_read_ptx_sreg_cluster_nctaid_z()); +} + +__device__ inline dim3 __clusterRelativeBlockIdx() { + return dim3(__nvvm_read_ptx_sreg_cluster_ctaid_x(), + __nvvm_read_ptx_sreg_cluster_ctaid_y(), + __nvvm_read_ptx_sreg_cluster_ctaid_z()); +} + +__device__ inline dim3 __clusterGridDimInClusters() { + return dim3(__nvvm_read_ptx_sreg_nclusterid_x(), + __nvvm_read_ptx_sreg_nclusterid_y(), + __nvvm_read_ptx_sreg_nclusterid_z()); +} + +__device__ inline dim3 __clusterIdx() { + return dim3(__nvvm_read_ptx_sreg_clusterid_x(), + __nvvm_read_ptx_sreg_clusterid_y(), + __nvvm_read_ptx_sreg_clusterid_z()); +} + +__device__ inline unsigned __clusterRelativeBlockRank() { + return __nvvm_read_ptx_sreg_cluster_ctarank(); +} + +__device__ inline unsigned __clusterSizeInBlocks() { + return __nvvm_read_ptx_sreg_cluster_nctarank(); +} + +__device__ inline void __cluster_barrier_arrive() { + __nvvm_barrier_cluster_arrive(); +} + +__device__ inline void __cluster_barrier_arrive_relaxed() { + __nvvm_barrier_cluster_arrive_relaxed(); +} + +__device__ inline void __cluster_barrier_wait() { + __nvvm_barrier_cluster_wait(); +} + +__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); } + +__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) { + float2 __ret; + __asm__("atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};" + : "=f"(__ret.x), "=f"(__ret.y) + : "l"(__ptr), "f"(__val.x), "f"(__val.y)); + return __ret; +} + +__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) { + float2 __ret; + __asm__("atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};" + : "=f"(__ret.x), "=f"(__ret.y) + : "l"(__ptr), "f"(__val.x), "f"(__val.y)); + return __ret; +} + +__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) { + float2 __ret; + __asm__("atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};" + : "=f"(__ret.x), "=f"(__ret.y) + : "l"(__ptr), "f"(__val.x), "f"(__val.y)); + return __ret; +} + +__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) { + float4 __ret; + __asm__("atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};" + : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w) + : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)); + return __ret; +} + +__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) { + float4 __ret; + __asm__( + "atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};" + : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w) + : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)); + return __ret; +} + +__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) { + float4 __ret; + __asm__( + "atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};" + : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w) + : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w) + :); + return __ret; +} + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900 #endif // CUDA_VERSION >= 11000 #endif // defined(__CLANG_CUDA_INTRINSICS_H__) diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_libdevice_declares.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_libdevice_declares.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_libdevice_declares.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_libdevice_declares.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math.h index e447590393..6166317f8f 100644 --- a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math.h +++ b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math.h @@ -36,7 +36,7 @@ // because the OpenMP overlay requires constexpr functions here but prior to // c++14 void return functions could not be constexpr. #pragma push_macro("__DEVICE_VOID__") -#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L +#if defined(__OPENMP_NVPTX__) && defined(__cplusplus) && __cplusplus < 201402L #define __DEVICE_VOID__ static __attribute__((always_inline, nothrow)) #else #define __DEVICE_VOID__ __DEVICE__ diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math_forward_declares.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math_forward_declares.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math_forward_declares.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math_forward_declares.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_runtime_wrapper.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_runtime_wrapper.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_runtime_wrapper.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_runtime_wrapper.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_texture_intrinsics.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_texture_intrinsics.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_texture_intrinsics.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_texture_intrinsics.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_cmath.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_cmath.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_cmath.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_cmath.h index d488db0a94..b52d6b7816 100644 --- a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_cmath.h +++ b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_cmath.h @@ -171,7 +171,7 @@ __DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); } // Other functions. __DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) { - return __ocml_fma_f16(__x, __y, __z); + return __builtin_fmaf16(__x, __y, __z); } __DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) { return __ocml_pown_f16(__base, __iexp); diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_libdevice_declares.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_libdevice_declares.h similarity index 95% rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_libdevice_declares.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_libdevice_declares.h index be25f4b4a0..f15198b3d9 100644 --- a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_libdevice_declares.h +++ b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_libdevice_declares.h @@ -10,6 +10,10 @@ #ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__ #define __CLANG_HIP_LIBDEVICE_DECLARES_H__ +#if !defined(__HIPCC_RTC__) && __has_include("hip/hip_version.h") +#include "hip/hip_version.h" +#endif // __has_include("hip/hip_version.h") + #ifdef __cplusplus extern "C" { #endif @@ -137,23 +141,6 @@ __device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float); __device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float); __device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float); __device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float); - -__device__ inline __attribute__((const)) float -__llvm_amdgcn_cos_f32(float __x) { - return __builtin_amdgcn_cosf(__x); -} -__device__ inline __attribute__((const)) float -__llvm_amdgcn_rcp_f32(float __x) { - return __builtin_amdgcn_rcpf(__x); -} -__device__ inline __attribute__((const)) float -__llvm_amdgcn_rsq_f32(float __x) { - return __builtin_amdgcn_rsqf(__x); -} -__device__ inline __attribute__((const)) float -__llvm_amdgcn_sin_f32(float __x) { - return __builtin_amdgcn_sinf(__x); -} // END INTRINSICS // END FLOAT @@ -277,15 +264,6 @@ __device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double, __device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double, double); -__device__ inline __attribute__((const)) double -__llvm_amdgcn_rcp_f64(double __x) { - return __builtin_amdgcn_rcp(__x); -} -__device__ inline __attribute__((const)) double -__llvm_amdgcn_rsq_f64(double __x) { - return __builtin_amdgcn_rsq(__x); -} - __device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16); __device__ _Float16 __ocml_cos_f16(_Float16); __device__ __attribute__((const)) _Float16 __ocml_cvtrtn_f16_f32(float); @@ -305,7 +283,6 @@ __device__ __attribute__((const)) int __ocml_isnan_f16(_Float16); __device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16); __device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16); __device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16); -__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16); __device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16); __device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16); __device__ _Float16 __ocml_sin_f16(_Float16); @@ -316,8 +293,15 @@ __device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int); typedef _Float16 __2f16 __attribute__((ext_vector_type(2))); typedef short __2i16 __attribute__((ext_vector_type(2))); +// We need to match C99's bool and get an i1 in the IR. +#ifdef __cplusplus +typedef bool __ockl_bool; +#else +typedef _Bool __ockl_bool; +#endif + __device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b, - float c, bool s); + float c, __ockl_bool s); __device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16); __device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16); __device__ __2f16 __ocml_cos_2f16(__2f16); @@ -332,11 +316,29 @@ __device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16); __device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16); __device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16); __device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16); + +#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 560 +#define __DEPRECATED_SINCE_HIP_560(X) __attribute__((deprecated(X))) +#else +#define __DEPRECATED_SINCE_HIP_560(X) +#endif + +// Deprecated, should be removed when rocm releases using it are no longer +// relevant. +__DEPRECATED_SINCE_HIP_560("use ((_Float16)1.0) / ") +__device__ inline _Float16 __llvm_amdgcn_rcp_f16(_Float16 x) { + return ((_Float16)1.0f) / x; +} + +__DEPRECATED_SINCE_HIP_560("use ((__2f16)1.0) / ") __device__ inline __2f16 -__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL. +__llvm_amdgcn_rcp_2f16(__2f16 __x) { - return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)); + return ((__2f16)1.0f) / __x; } + +#undef __DEPRECATED_SINCE_HIP_560 + __device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16); __device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16); __device__ __2f16 __ocml_sin_2f16(__2f16); diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_math.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_math.h similarity index 88% rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_math.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_math.h index 537dd0fca8..a47dda3327 100644 --- a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_math.h +++ b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_math.h @@ -182,10 +182,10 @@ __DEVICE__ float cbrtf(float __x) { return __ocml_cbrt_f32(__x); } __DEVICE__ -float ceilf(float __x) { return __ocml_ceil_f32(__x); } +float ceilf(float __x) { return __builtin_ceilf(__x); } __DEVICE__ -float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); } +float copysignf(float __x, float __y) { return __builtin_copysignf(__x, __y); } __DEVICE__ float cosf(float __x) { return __ocml_cos_f32(__x); } @@ -221,10 +221,10 @@ __DEVICE__ float exp10f(float __x) { return __ocml_exp10_f32(__x); } __DEVICE__ -float exp2f(float __x) { return __ocml_exp2_f32(__x); } +float exp2f(float __x) { return __builtin_exp2f(__x); } __DEVICE__ -float expf(float __x) { return __ocml_exp_f32(__x); } +float expf(float __x) { return __builtin_expf(__x); } __DEVICE__ float expm1f(float __x) { return __ocml_expm1_f32(__x); } @@ -239,33 +239,25 @@ __DEVICE__ float fdividef(float __x, float __y) { return __x / __y; } __DEVICE__ -float floorf(float __x) { return __ocml_floor_f32(__x); } +float floorf(float __x) { return __builtin_floorf(__x); } __DEVICE__ float fmaf(float __x, float __y, float __z) { - return __ocml_fma_f32(__x, __y, __z); + return __builtin_fmaf(__x, __y, __z); } __DEVICE__ -float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); } +float fmaxf(float __x, float __y) { return __builtin_fmaxf(__x, __y); } __DEVICE__ -float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); } +float fminf(float __x, float __y) { return __builtin_fminf(__x, __y); } __DEVICE__ float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); } __DEVICE__ float frexpf(float __x, int *__nptr) { - int __tmp; -#ifdef __OPENMP_AMDGCN__ -#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) -#endif - float __r = - __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp); - *__nptr = __tmp; - - return __r; + return __builtin_frexpf(__x, __nptr); } __DEVICE__ @@ -275,13 +267,13 @@ __DEVICE__ int ilogbf(float __x) { return __ocml_ilogb_f32(__x); } __DEVICE__ -__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); } +__RETURN_TYPE __finitef(float __x) { return __builtin_isfinite(__x); } __DEVICE__ -__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); } +__RETURN_TYPE __isinff(float __x) { return __builtin_isinf(__x); } __DEVICE__ -__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); } +__RETURN_TYPE __isnanf(float __x) { return __builtin_isnan(__x); } __DEVICE__ float j0f(float __x) { return __ocml_j0_f32(__x); } @@ -311,37 +303,37 @@ float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication } __DEVICE__ -float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); } +float ldexpf(float __x, int __e) { return __builtin_amdgcn_ldexpf(__x, __e); } __DEVICE__ float lgammaf(float __x) { return __ocml_lgamma_f32(__x); } __DEVICE__ -long long int llrintf(float __x) { return __ocml_rint_f32(__x); } +long long int llrintf(float __x) { return __builtin_rintf(__x); } __DEVICE__ -long long int llroundf(float __x) { return __ocml_round_f32(__x); } +long long int llroundf(float __x) { return __builtin_roundf(__x); } __DEVICE__ -float log10f(float __x) { return __ocml_log10_f32(__x); } +float log10f(float __x) { return __builtin_log10f(__x); } __DEVICE__ float log1pf(float __x) { return __ocml_log1p_f32(__x); } __DEVICE__ -float log2f(float __x) { return __ocml_log2_f32(__x); } +float log2f(float __x) { return __builtin_log2f(__x); } __DEVICE__ float logbf(float __x) { return __ocml_logb_f32(__x); } __DEVICE__ -float logf(float __x) { return __ocml_log_f32(__x); } +float logf(float __x) { return __builtin_logf(__x); } __DEVICE__ -long int lrintf(float __x) { return __ocml_rint_f32(__x); } +long int lrintf(float __x) { return __builtin_rintf(__x); } __DEVICE__ -long int lroundf(float __x) { return __ocml_round_f32(__x); } +long int lroundf(float __x) { return __builtin_roundf(__x); } __DEVICE__ float modff(float __x, float *__iptr) { @@ -377,7 +369,7 @@ float nanf(const char *__tagp __attribute__((nonnull))) { } __DEVICE__ -float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); } +float nearbyintf(float __x) { return __builtin_nearbyintf(__x); } __DEVICE__ float nextafterf(float __x, float __y) { @@ -443,7 +435,7 @@ __DEVICE__ float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); } __DEVICE__ -float rintf(float __x) { return __ocml_rint_f32(__x); } +float rintf(float __x) { return __builtin_rintf(__x); } __DEVICE__ float rnorm3df(float __x, float __y, float __z) { @@ -468,22 +460,22 @@ float rnormf(int __dim, } __DEVICE__ -float roundf(float __x) { return __ocml_round_f32(__x); } +float roundf(float __x) { return __builtin_roundf(__x); } __DEVICE__ float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); } __DEVICE__ float scalblnf(float __x, long int __n) { - return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n) + return (__n < INT_MAX) ? __builtin_amdgcn_ldexpf(__x, __n) : __ocml_scalb_f32(__x, __n); } __DEVICE__ -float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); } +float scalbnf(float __x, int __n) { return __builtin_amdgcn_ldexpf(__x, __n); } __DEVICE__ -__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); } +__RETURN_TYPE __signbitf(float __x) { return __builtin_signbitf(__x); } __DEVICE__ void sincosf(float __x, float *__sinptr, float *__cosptr) { @@ -529,7 +521,7 @@ __DEVICE__ float tgammaf(float __x) { return __ocml_tgamma_f32(__x); } __DEVICE__ -float truncf(float __x) { return __ocml_trunc_f32(__x); } +float truncf(float __x) { return __builtin_truncf(__x); } __DEVICE__ float y0f(float __x) { return __ocml_y0_f32(__x); } @@ -621,7 +613,7 @@ float __fmaf_rz(float __x, float __y, float __z) { #else __DEVICE__ float __fmaf_rn(float __x, float __y, float __z) { - return __ocml_fma_f32(__x, __y, __z); + return __builtin_fmaf(__x, __y, __z); } #endif @@ -654,7 +646,7 @@ float __frcp_rn(float __x) { return 1.0f / __x; } #endif __DEVICE__ -float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); } +float __frsqrt_rn(float __x) { return __builtin_amdgcn_rsqf(__x); } #if defined OCML_BASIC_ROUNDED_OPERATIONS __DEVICE__ @@ -739,11 +731,11 @@ __DEVICE__ double cbrt(double __x) { return __ocml_cbrt_f64(__x); } __DEVICE__ -double ceil(double __x) { return __ocml_ceil_f64(__x); } +double ceil(double __x) { return __builtin_ceil(__x); } __DEVICE__ double copysign(double __x, double __y) { - return __ocml_copysign_f64(__x, __y); + return __builtin_copysign(__x, __y); } __DEVICE__ @@ -795,32 +787,25 @@ __DEVICE__ double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); } __DEVICE__ -double floor(double __x) { return __ocml_floor_f64(__x); } +double floor(double __x) { return __builtin_floor(__x); } __DEVICE__ double fma(double __x, double __y, double __z) { - return __ocml_fma_f64(__x, __y, __z); + return __builtin_fma(__x, __y, __z); } __DEVICE__ -double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); } +double fmax(double __x, double __y) { return __builtin_fmax(__x, __y); } __DEVICE__ -double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); } +double fmin(double __x, double __y) { return __builtin_fmin(__x, __y); } __DEVICE__ double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); } __DEVICE__ double frexp(double __x, int *__nptr) { - int __tmp; -#ifdef __OPENMP_AMDGCN__ -#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) -#endif - double __r = - __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp); - *__nptr = __tmp; - return __r; + return __builtin_frexp(__x, __nptr); } __DEVICE__ @@ -830,13 +815,13 @@ __DEVICE__ int ilogb(double __x) { return __ocml_ilogb_f64(__x); } __DEVICE__ -__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); } +__RETURN_TYPE __finite(double __x) { return __builtin_isfinite(__x); } __DEVICE__ -__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); } +__RETURN_TYPE __isinf(double __x) { return __builtin_isinf(__x); } __DEVICE__ -__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); } +__RETURN_TYPE __isnan(double __x) { return __builtin_isnan(__x); } __DEVICE__ double j0(double __x) { return __ocml_j0_f64(__x); } @@ -866,16 +851,16 @@ double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication } __DEVICE__ -double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); } +double ldexp(double __x, int __e) { return __builtin_amdgcn_ldexp(__x, __e); } __DEVICE__ double lgamma(double __x) { return __ocml_lgamma_f64(__x); } __DEVICE__ -long long int llrint(double __x) { return __ocml_rint_f64(__x); } +long long int llrint(double __x) { return __builtin_rint(__x); } __DEVICE__ -long long int llround(double __x) { return __ocml_round_f64(__x); } +long long int llround(double __x) { return __builtin_round(__x); } __DEVICE__ double log(double __x) { return __ocml_log_f64(__x); } @@ -893,10 +878,10 @@ __DEVICE__ double logb(double __x) { return __ocml_logb_f64(__x); } __DEVICE__ -long int lrint(double __x) { return __ocml_rint_f64(__x); } +long int lrint(double __x) { return __builtin_rint(__x); } __DEVICE__ -long int lround(double __x) { return __ocml_round_f64(__x); } +long int lround(double __x) { return __builtin_round(__x); } __DEVICE__ double modf(double __x, double *__iptr) { @@ -940,7 +925,7 @@ double nan(const char *__tagp) { } __DEVICE__ -double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); } +double nearbyint(double __x) { return __builtin_nearbyint(__x); } __DEVICE__ double nextafter(double __x, double __y) { @@ -1006,7 +991,7 @@ __DEVICE__ double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); } __DEVICE__ -double rint(double __x) { return __ocml_rint_f64(__x); } +double rint(double __x) { return __builtin_rint(__x); } __DEVICE__ double rnorm(int __dim, @@ -1031,21 +1016,21 @@ double rnorm4d(double __x, double __y, double __z, double __w) { } __DEVICE__ -double round(double __x) { return __ocml_round_f64(__x); } +double round(double __x) { return __builtin_round(__x); } __DEVICE__ double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); } __DEVICE__ double scalbln(double __x, long int __n) { - return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n) + return (__n < INT_MAX) ? __builtin_amdgcn_ldexp(__x, __n) : __ocml_scalb_f64(__x, __n); } __DEVICE__ -double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); } +double scalbn(double __x, int __n) { return __builtin_amdgcn_ldexp(__x, __n); } __DEVICE__ -__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); } +__RETURN_TYPE __signbit(double __x) { return __builtin_signbit(__x); } __DEVICE__ double sin(double __x) { return __ocml_sin_f64(__x); } @@ -1091,7 +1076,7 @@ __DEVICE__ double tgamma(double __x) { return __ocml_tgamma_f64(__x); } __DEVICE__ -double trunc(double __x) { return __ocml_trunc_f64(__x); } +double trunc(double __x) { return __builtin_trunc(__x); } __DEVICE__ double y0(double __x) { return __ocml_y0_f64(__x); } @@ -1258,7 +1243,7 @@ double __fma_rz(double __x, double __y, double __z) { #else __DEVICE__ double __fma_rn(double __x, double __y, double __z) { - return __ocml_fma_f64(__x, __y, __z); + return __builtin_fma(__x, __y, __z); } #endif // END INTRINSICS @@ -1290,16 +1275,16 @@ __DEVICE__ int max(int __arg1, int __arg2) { } __DEVICE__ -float max(float __x, float __y) { return fmaxf(__x, __y); } +float max(float __x, float __y) { return __builtin_fmaxf(__x, __y); } __DEVICE__ -double max(double __x, double __y) { return fmax(__x, __y); } +double max(double __x, double __y) { return __builtin_fmax(__x, __y); } __DEVICE__ -float min(float __x, float __y) { return fminf(__x, __y); } +float min(float __x, float __y) { return __builtin_fminf(__x, __y); } __DEVICE__ -double min(double __x, double __y) { return fmin(__x, __y); } +double min(double __x, double __y) { return __builtin_fmin(__x, __y); } #if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__) __host__ inline static int min(int __arg1, int __arg2) { diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_runtime_wrapper.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_runtime_wrapper.h similarity index 85% rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_runtime_wrapper.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_runtime_wrapper.h index 0508731de1..e8817073ef 100644 --- a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_runtime_wrapper.h +++ b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_runtime_wrapper.h @@ -80,12 +80,25 @@ extern "C" { #if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405 extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size); extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr); +#if __has_feature(address_sanitizer) +extern "C" __device__ unsigned long long __asan_malloc_impl(unsigned long long __size, unsigned long long __pc); +extern "C" __device__ void __asan_free_impl(unsigned long long __addr, unsigned long long __pc); +__attribute__((noinline, weak)) __device__ void *malloc(__hip_size_t __size) { + unsigned long long __pc = (unsigned long long)__builtin_return_address(0); + return (void *)__asan_malloc_impl(__size, __pc); +} +__attribute__((noinline, weak)) __device__ void free(void *__ptr) { + unsigned long long __pc = (unsigned long long)__builtin_return_address(0); + __asan_free_impl((unsigned long long)__ptr, __pc); +} +#else __attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) { return (void *) __ockl_dm_alloc(__size); } __attribute__((weak)) inline __device__ void free(void *__ptr) { __ockl_dm_dealloc((unsigned long long)__ptr); } +#endif // __has_feature(address_sanitizer) #else // HIP version check #if __HIP_ENABLE_DEVICE_MALLOC__ __device__ void *__hip_malloc(__hip_size_t __size); diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_stdlib.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_stdlib.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_stdlib.h rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_stdlib.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__stddef_max_align_t.h b/third_party/clang/lib/clang/17.0.1/include/__stddef_max_align_t.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__stddef_max_align_t.h rename to third_party/clang/lib/clang/17.0.1/include/__stddef_max_align_t.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__wmmintrin_aes.h b/third_party/clang/lib/clang/17.0.1/include/__wmmintrin_aes.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__wmmintrin_aes.h rename to third_party/clang/lib/clang/17.0.1/include/__wmmintrin_aes.h diff --git a/third_party/clang/lib/clang/16.0.0/include/__wmmintrin_pclmul.h b/third_party/clang/lib/clang/17.0.1/include/__wmmintrin_pclmul.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/__wmmintrin_pclmul.h rename to third_party/clang/lib/clang/17.0.1/include/__wmmintrin_pclmul.h diff --git a/third_party/clang/lib/clang/17.0.1/include/adxintrin.h b/third_party/clang/lib/clang/17.0.1/include/adxintrin.h new file mode 100644 index 0000000000..20f6211e56 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/adxintrin.h @@ -0,0 +1,227 @@ +/*===---- adxintrin.h - ADX intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __ADXINTRIN_H +#define __ADXINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) + +/* Use C++ inline semantics in C++, GNU inline for C mode. */ +#if defined(__cplusplus) +#define __INLINE __inline +#else +#define __INLINE static __inline +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Intrinsics that are available only if __ADX__ is defined. */ + +/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated +/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory +/// at \a __p, and returns the 8-bit carry-out (carry flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store32(__p, __x + __y + temp) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADCX instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// A 32-bit unsigned addend. +/// \param __y +/// A 32-bit unsigned addend. +/// \param __p +/// Pointer to memory for storing the sum. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char + __attribute__((__always_inline__, __nodebug__, __target__("adx"))) + _addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y, + unsigned int *__p) { + return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p); +} + +#ifdef __x86_64__ +/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated +/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory +/// at \a __p, and returns the 8-bit carry-out (carry flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store64(__p, __x + __y + temp) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADCX instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// A 64-bit unsigned addend. +/// \param __y +/// A 64-bit unsigned addend. +/// \param __p +/// Pointer to memory for storing the sum. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char + __attribute__((__always_inline__, __nodebug__, __target__("adx"))) + _addcarryx_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) { + return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); +} +#endif + +/* Intrinsics that are also available if __ADX__ is undefined. */ + +/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated +/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory +/// at \a __p, and returns the 8-bit carry-out (carry flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store32(__p, __x + __y + temp) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADC instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// A 32-bit unsigned addend. +/// \param __y +/// A 32-bit unsigned addend. +/// \param __p +/// Pointer to memory for storing the sum. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarry_u32(unsigned char __cf, + unsigned int __x, + unsigned int __y, + unsigned int *__p) { + return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p); +} + +#ifdef __x86_64__ +/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated +/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory +/// at \a __p, and returns the 8-bit carry-out (carry flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store64(__p, __x + __y + temp) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADC instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// A 64-bit unsigned addend. +/// \param __y +/// A 64-bit unsigned addend. +/// \param __p +/// Pointer to memory for storing the sum. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS +_addcarry_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) { + return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); +} +#endif + +/// Adds unsigned 32-bit integer \a __y to 0 or 1 as indicated by the carry +/// flag \a __cf, and subtracts the result from unsigned 32-bit integer +/// \a __x. Stores the unsigned 32-bit difference in the memory at \a __p, +/// and returns the 8-bit carry-out (carry or overflow flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store32(__p, __x - (__y + temp)) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SBB instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// The 32-bit unsigned minuend. +/// \param __y +/// The 32-bit unsigned subtrahend. +/// \param __p +/// Pointer to memory for storing the difference. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS _subborrow_u32(unsigned char __cf, + unsigned int __x, + unsigned int __y, + unsigned int *__p) { + return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p); +} + +#ifdef __x86_64__ +/// Adds unsigned 64-bit integer \a __y to 0 or 1 as indicated by the carry +/// flag \a __cf, and subtracts the result from unsigned 64-bit integer +/// \a __x. Stores the unsigned 64-bit difference in the memory at \a __p, +/// and returns the 8-bit carry-out (carry or overflow flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store64(__p, __x - (__y + temp)) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADC instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// The 64-bit unsigned minuend. +/// \param __y +/// The 64-bit unsigned subtrahend. +/// \param __p +/// Pointer to memory for storing the difference. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS +_subborrow_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) { + return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p); +} +#endif + +#if defined(__cplusplus) +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __ADXINTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/altivec.h b/third_party/clang/lib/clang/17.0.1/include/altivec.h similarity index 98% rename from third_party/clang/lib/clang/16.0.0/include/altivec.h rename to third_party/clang/lib/clang/17.0.1/include/altivec.h index f50466ec96..c036f5ebba 100644 --- a/third_party/clang/lib/clang/16.0.0/include/altivec.h +++ b/third_party/clang/lib/clang/17.0.1/include/altivec.h @@ -3202,71 +3202,79 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a, // the XL-compatible signatures are used for those functions. #ifdef __XL_COMPAT_ALTIVEC__ #define vec_ctf(__a, __b) \ - _Generic( \ - (__a), vector int \ - : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \ - vector unsigned int \ - : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \ - (__b)), \ - vector unsigned long long \ - : (vector float)(__builtin_vsx_xvcvuxdsp( \ - (vector unsigned long long)(__a)) * \ - (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \ - vector signed long long \ - : (vector float)(__builtin_vsx_xvcvsxdsp( \ - (vector signed long long)(__a)) * \ - (vector float)(vector unsigned)((0x7f - (__b)) << 23))) + _Generic((__a), \ + vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \ + ((__b)&0x1F)), \ + vector unsigned int: (vector float)__builtin_altivec_vcfux( \ + (vector unsigned int)(__a), ((__b)&0x1F)), \ + vector unsigned long long: ( \ + vector float)(__builtin_vsx_xvcvuxdsp( \ + (vector unsigned long long)(__a)) * \ + (vector float)(vector unsigned)((0x7f - \ + ((__b)&0x1F)) \ + << 23)), \ + vector signed long long: ( \ + vector float)(__builtin_vsx_xvcvsxdsp( \ + (vector signed long long)(__a)) * \ + (vector float)(vector unsigned)((0x7f - \ + ((__b)&0x1F)) \ + << 23))) #else // __XL_COMPAT_ALTIVEC__ -#define vec_ctf(__a, __b) \ - _Generic( \ - (__a), vector int \ - : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \ - vector unsigned int \ - : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \ - (__b)), \ - vector unsigned long long \ - : (vector float)(__builtin_convertvector( \ - (vector unsigned long long)(__a), vector double) * \ - (vector double)(vector unsigned long long)((0x3ffULL - \ - (__b)) \ - << 52)), \ - vector signed long long \ - : (vector float)(__builtin_convertvector((vector signed long long)(__a), \ - vector double) * \ - (vector double)(vector unsigned long long)((0x3ffULL - \ - (__b)) \ - << 52))) +#define vec_ctf(__a, __b) \ + _Generic( \ + (__a), \ + vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \ + ((__b)&0x1F)), \ + vector unsigned int: (vector float)__builtin_altivec_vcfux( \ + (vector unsigned int)(__a), ((__b)&0x1F)), \ + vector unsigned long long: ( \ + vector float)(__builtin_convertvector( \ + (vector unsigned long long)(__a), vector double) * \ + (vector double)(vector unsigned long long)((0x3ffULL - \ + ((__b)&0x1F)) \ + << 52)), \ + vector signed long long: ( \ + vector float)(__builtin_convertvector( \ + (vector signed long long)(__a), vector double) * \ + (vector double)(vector unsigned long long)((0x3ffULL - \ + ((__b)&0x1F)) \ + << 52))) #endif // __XL_COMPAT_ALTIVEC__ #else #define vec_ctf(__a, __b) \ - _Generic((__a), vector int \ - : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \ - vector unsigned int \ - : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \ - (__b))) + _Generic((__a), \ + vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \ + ((__b)&0x1F)), \ + vector unsigned int: (vector float)__builtin_altivec_vcfux( \ + (vector unsigned int)(__a), ((__b)&0x1F))) #endif /* vec_ctd */ #ifdef __VSX__ #define vec_ctd(__a, __b) \ - _Generic((__a), vector signed int \ - : (vec_doublee((vector signed int)(__a)) * \ - (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ - << 52)), \ - vector unsigned int \ - : (vec_doublee((vector unsigned int)(__a)) * \ - (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ - << 52)), \ - vector unsigned long long \ - : (__builtin_convertvector((vector unsigned long long)(__a), \ - vector double) * \ - (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ - << 52)), \ - vector signed long long \ - : (__builtin_convertvector((vector signed long long)(__a), \ - vector double) * \ - (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \ - << 52))) + _Generic((__a), \ + vector signed int: ( \ + vec_doublee((vector signed int)(__a)) * \ + (vector double)(vector unsigned long long)((0x3ffULL - \ + ((__b)&0x1F)) \ + << 52)), \ + vector unsigned int: ( \ + vec_doublee((vector unsigned int)(__a)) * \ + (vector double)(vector unsigned long long)((0x3ffULL - \ + ((__b)&0x1F)) \ + << 52)), \ + vector unsigned long long: ( \ + __builtin_convertvector((vector unsigned long long)(__a), \ + vector double) * \ + (vector double)(vector unsigned long long)((0x3ffULL - \ + ((__b)&0x1F)) \ + << 52)), \ + vector signed long long: ( \ + __builtin_convertvector((vector signed long long)(__a), \ + vector double) * \ + (vector double)(vector unsigned long long)((0x3ffULL - \ + ((__b)&0x1F)) \ + << 52))) #endif // __VSX__ /* vec_vcfsx */ @@ -3281,27 +3289,27 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a, #ifdef __VSX__ #ifdef __XL_COMPAT_ALTIVEC__ #define vec_cts(__a, __b) \ - _Generic((__a), vector float \ - : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \ - (__b)), \ - vector double \ - : __extension__({ \ + _Generic((__a), \ + vector float: (vector signed int)__builtin_altivec_vctsxs( \ + (vector float)(__a), ((__b)&0x1F)), \ + vector double: __extension__({ \ vector double __ret = \ (vector double)(__a) * \ - (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \ + (vector double)(vector unsigned long long)((0x3ffULL + \ + ((__b)&0x1F)) \ << 52); \ (vector signed long long)__builtin_vsx_xvcvdpsxws(__ret); \ })) #else // __XL_COMPAT_ALTIVEC__ #define vec_cts(__a, __b) \ - _Generic((__a), vector float \ - : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \ - (__b)), \ - vector double \ - : __extension__({ \ + _Generic((__a), \ + vector float: (vector signed int)__builtin_altivec_vctsxs( \ + (vector float)(__a), ((__b)&0x1F)), \ + vector double: __extension__({ \ vector double __ret = \ (vector double)(__a) * \ - (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \ + (vector double)(vector unsigned long long)((0x3ffULL + \ + ((__b)&0x1F)) \ << 52); \ (vector signed long long)__builtin_convertvector( \ __ret, vector signed long long); \ @@ -3320,27 +3328,27 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a, #ifdef __VSX__ #ifdef __XL_COMPAT_ALTIVEC__ #define vec_ctu(__a, __b) \ - _Generic((__a), vector float \ - : (vector unsigned int)__builtin_altivec_vctuxs( \ - (vector float)(__a), (__b)), \ - vector double \ - : __extension__({ \ + _Generic((__a), \ + vector float: (vector unsigned int)__builtin_altivec_vctuxs( \ + (vector float)(__a), ((__b)&0x1F)), \ + vector double: __extension__({ \ vector double __ret = \ (vector double)(__a) * \ - (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + (vector double)(vector unsigned long long)((0x3ffULL + \ + ((__b)&0x1F)) \ << 52); \ (vector unsigned long long)__builtin_vsx_xvcvdpuxws(__ret); \ })) #else // __XL_COMPAT_ALTIVEC__ #define vec_ctu(__a, __b) \ - _Generic((__a), vector float \ - : (vector unsigned int)__builtin_altivec_vctuxs( \ - (vector float)(__a), (__b)), \ - vector double \ - : __extension__({ \ + _Generic((__a), \ + vector float: (vector unsigned int)__builtin_altivec_vctuxs( \ + (vector float)(__a), ((__b)&0x1F)), \ + vector double: __extension__({ \ vector double __ret = \ (vector double)(__a) * \ - (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + (vector double)(vector unsigned long long)((0x3ffULL + \ + ((__b)&0x1F)) \ << 52); \ (vector unsigned long long)__builtin_convertvector( \ __ret, vector unsigned long long); \ @@ -3355,60 +3363,62 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a, #ifdef __VSX__ #define vec_ctsl(__a, __b) \ - _Generic((__a), vector float \ - : __extension__({ \ - vector float __ret = \ - (vector float)(__a) * \ - (vector float)(vector unsigned)((0x7f + (__b)) << 23); \ - __builtin_vsx_xvcvspsxds( \ - __builtin_vsx_xxsldwi(__ret, __ret, 1)); \ - }), \ - vector double \ - : __extension__({ \ - vector double __ret = \ - (vector double)(__a) * \ - (vector double)(vector unsigned long long)((0x3ffULL + __b) \ - << 52); \ - __builtin_convertvector(__ret, vector signed long long); \ - })) + _Generic( \ + (__a), vector float \ + : __extension__({ \ + vector float __ret = \ + (vector float)(__a) * \ + (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \ + __builtin_vsx_xvcvspsxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \ + }), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + \ + ((__b)&0x1F)) \ + << 52); \ + __builtin_convertvector(__ret, vector signed long long); \ + })) /* vec_ctul */ #define vec_ctul(__a, __b) \ - _Generic((__a), vector float \ - : __extension__({ \ - vector float __ret = \ - (vector float)(__a) * \ - (vector float)(vector unsigned)((0x7f + (__b)) << 23); \ - __builtin_vsx_xvcvspuxds( \ - __builtin_vsx_xxsldwi(__ret, __ret, 1)); \ - }), \ - vector double \ - : __extension__({ \ - vector double __ret = \ - (vector double)(__a) * \ - (vector double)(vector unsigned long long)((0x3ffULL + __b) \ - << 52); \ - __builtin_convertvector(__ret, vector unsigned long long); \ - })) + _Generic( \ + (__a), vector float \ + : __extension__({ \ + vector float __ret = \ + (vector float)(__a) * \ + (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \ + __builtin_vsx_xvcvspuxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \ + }), \ + vector double \ + : __extension__({ \ + vector double __ret = \ + (vector double)(__a) * \ + (vector double)(vector unsigned long long)((0x3ffULL + \ + ((__b)&0x1F)) \ + << 52); \ + __builtin_convertvector(__ret, vector unsigned long long); \ + })) #endif #else // __LITTLE_ENDIAN__ /* vec_ctsl */ #ifdef __VSX__ #define vec_ctsl(__a, __b) \ - _Generic((__a), vector float \ - : __extension__({ \ - vector float __ret = \ - (vector float)(__a) * \ - (vector float)(vector unsigned)((0x7f + (__b)) << 23); \ - __builtin_vsx_xvcvspsxds(__ret); \ - }), \ - vector double \ - : __extension__({ \ + _Generic((__a), \ + vector float: __extension__({ \ + vector float __ret = \ + (vector float)(__a) * \ + (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \ + __builtin_vsx_xvcvspsxds(__ret); \ + }), \ + vector double: __extension__({ \ vector double __ret = \ (vector double)(__a) * \ - (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + (vector double)(vector unsigned long long)((0x3ffULL + \ + ((__b)&0x1F)) \ << 52); \ __builtin_convertvector(__ret, vector signed long long); \ })) @@ -3420,14 +3430,16 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a, : __extension__({ \ vector float __ret = \ (vector float)(__a) * \ - (vector float)(vector unsigned)((0x7f + (__b)) << 23); \ + (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) \ + << 23); \ __builtin_vsx_xvcvspuxds(__ret); \ }), \ vector double \ : __extension__({ \ vector double __ret = \ (vector double)(__a) * \ - (vector double)(vector unsigned long long)((0x3ffULL + __b) \ + (vector double)(vector unsigned long long)((0x3ffULL + \ + ((__b)&0x1F)) \ << 52); \ __builtin_convertvector(__ret, vector unsigned long long); \ })) diff --git a/third_party/clang/lib/clang/16.0.0/include/ammintrin.h b/third_party/clang/lib/clang/17.0.1/include/ammintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ammintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ammintrin.h diff --git a/third_party/clang/lib/clang/17.0.1/include/amxcomplexintrin.h b/third_party/clang/lib/clang/17.0.1/include/amxcomplexintrin.h new file mode 100644 index 0000000000..84ef972fca --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/amxcomplexintrin.h @@ -0,0 +1,169 @@ +/*===--------- amxcomplexintrin.h - AMXCOMPLEX intrinsics -*- C++ -*---------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===------------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __AMX_COMPLEXINTRIN_H +#define __AMX_COMPLEXINTRIN_H +#ifdef __x86_64__ + +#define __DEFAULT_FN_ATTRS_COMPLEX \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-complex"))) + +/// Perform matrix multiplication of two tiles containing complex elements and +/// accumulate the results into a packed single precision tile. Each dword +/// element in input tiles \a a and \a b is interpreted as a complex number +/// with FP16 real part and FP16 imaginary part. +/// Calculates the imaginary part of the result. For each possible combination +/// of (row of \a a, column of \a b), it performs a set of multiplication +/// and accumulations on all corresponding complex numbers (one from \a a +/// and one from \a b). The imaginary part of the \a a element is multiplied +/// with the real part of the corresponding \a b element, and the real part +/// of the \a a element is multiplied with the imaginary part of the +/// corresponding \a b elements. The two accumulated results are added, and +/// then accumulated into the corresponding row and column of \a dst. +/// +/// \headerfile +/// +/// \code +/// void _tile_cmmimfp16ps(__tile dst, __tile a, __tile b); +/// \endcode +/// +/// \code{.operation} +/// FOR m := 0 TO dst.rows - 1 +/// tmp := dst.row[m] +/// FOR k := 0 TO (a.colsb / 4) - 1 +/// FOR n := 0 TO (dst.colsb / 4) - 1 +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1]) +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0]) +/// ENDFOR +/// ENDFOR +/// write_row_and_zero(dst, m, tmp, dst.colsb) +/// ENDFOR +/// zero_upper_rows(dst, dst.rows) +/// zero_tileconfig_start() +/// \endcode +/// +/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param a +/// The 1st source tile. Max size is 1024 Bytes. +/// \param b +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_cmmimfp16ps(dst, a, b) __builtin_ia32_tcmmimfp16ps(dst, a, b) + +/// Perform matrix multiplication of two tiles containing complex elements and +/// accumulate the results into a packed single precision tile. Each dword +/// element in input tiles \a a and \a b is interpreted as a complex number +/// with FP16 real part and FP16 imaginary part. +/// Calculates the real part of the result. For each possible combination +/// of (row of \a a, column of \a b), it performs a set of multiplication +/// and accumulations on all corresponding complex numbers (one from \a a +/// and one from \a b). The real part of the \a a element is multiplied +/// with the real part of the corresponding \a b element, and the negated +/// imaginary part of the \a a element is multiplied with the imaginary +/// part of the corresponding \a b elements. The two accumulated results +/// are added, and then accumulated into the corresponding row and column +/// of \a dst. +/// +/// \headerfile +/// +/// \code +/// void _tile_cmmrlfp16ps(__tile dst, __tile a, __tile b); +/// \endcode +/// +/// \code{.operation} +/// FOR m := 0 TO dst.rows - 1 +/// tmp := dst.row[m] +/// FOR k := 0 TO (a.colsb / 4) - 1 +/// FOR n := 0 TO (dst.colsb / 4) - 1 +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0]) +/// tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1]) +/// ENDFOR +/// ENDFOR +/// write_row_and_zero(dst, m, tmp, dst.colsb) +/// ENDFOR +/// zero_upper_rows(dst, dst.rows) +/// zero_tileconfig_start() +/// \endcode +/// +/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param a +/// The 1st source tile. Max size is 1024 Bytes. +/// \param b +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b) + +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX +_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tcmmimfp16ps_internal(m, n, k, dst, src1, src2); +} + +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX +_tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tcmmrlfp16ps_internal(m, n, k, dst, src1, src2); +} + +/// Perform matrix multiplication of two tiles containing complex elements and +/// accumulate the results into a packed single precision tile. Each dword +/// element in input tiles src0 and src1 is interpreted as a complex number with +/// FP16 real part and FP16 imaginary part. +/// This function calculates the imaginary part of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TCMMIMFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_COMPLEX +static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col, + dst->tile, src0.tile, src1.tile); +} + +/// Perform matrix multiplication of two tiles containing complex elements and +/// accumulate the results into a packed single precision tile. Each dword +/// element in input tiles src0 and src1 is interpreted as a complex number with +/// FP16 real part and FP16 imaginary part. +/// This function calculates the real part of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TCMMRLFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_COMPLEX +static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col, + dst->tile, src0.tile, src1.tile); +} + +#endif // __x86_64__ +#endif // __AMX_COMPLEXINTRIN_H diff --git a/third_party/clang/lib/clang/16.0.0/include/amxfp16intrin.h b/third_party/clang/lib/clang/17.0.1/include/amxfp16intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/amxfp16intrin.h rename to third_party/clang/lib/clang/17.0.1/include/amxfp16intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/amxintrin.h b/third_party/clang/lib/clang/17.0.1/include/amxintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/amxintrin.h rename to third_party/clang/lib/clang/17.0.1/include/amxintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/arm64intr.h b/third_party/clang/lib/clang/17.0.1/include/arm64intr.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/arm64intr.h rename to third_party/clang/lib/clang/17.0.1/include/arm64intr.h diff --git a/third_party/clang/lib/clang/16.0.0/include/arm_acle.h b/third_party/clang/lib/clang/17.0.1/include/arm_acle.h similarity index 97% rename from third_party/clang/lib/clang/16.0.0/include/arm_acle.h rename to third_party/clang/lib/clang/17.0.1/include/arm_acle.h index e086f1f02d..c208512bab 100644 --- a/third_party/clang/lib/clang/16.0.0/include/arm_acle.h +++ b/third_party/clang/lib/clang/17.0.1/include/arm_acle.h @@ -138,28 +138,32 @@ __rorl(unsigned long __x, uint32_t __y) { /* CLZ */ -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) __clz(uint32_t __t) { - return (uint32_t)__builtin_clz(__t); + return __builtin_arm_clz(__t); } -static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) __clzl(unsigned long __t) { - return (unsigned long)__builtin_clzl(__t); +#if __SIZEOF_LONG__ == 4 + return __builtin_arm_clz(__t); +#else + return __builtin_arm_clz64(__t); +#endif } -static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) __clzll(uint64_t __t) { - return (uint64_t)__builtin_clzll(__t); + return __builtin_arm_clz64(__t); } /* CLS */ -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) __cls(uint32_t __t) { return __builtin_arm_cls(__t); } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) __clsl(unsigned long __t) { #if __SIZEOF_LONG__ == 4 return __builtin_arm_cls(__t); @@ -168,7 +172,7 @@ __clsl(unsigned long __t) { #endif } -static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) __clsll(uint64_t __t) { return __builtin_arm_cls64(__t); } diff --git a/third_party/clang/lib/clang/16.0.0/include/arm_cmse.h b/third_party/clang/lib/clang/17.0.1/include/arm_cmse.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/arm_cmse.h rename to third_party/clang/lib/clang/17.0.1/include/arm_cmse.h diff --git a/third_party/clang/lib/clang/16.0.0/include/arm_neon_sve_bridge.h b/third_party/clang/lib/clang/17.0.1/include/arm_neon_sve_bridge.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/arm_neon_sve_bridge.h rename to third_party/clang/lib/clang/17.0.1/include/arm_neon_sve_bridge.h diff --git a/third_party/clang/lib/clang/16.0.0/include/armintr.h b/third_party/clang/lib/clang/17.0.1/include/armintr.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/armintr.h rename to third_party/clang/lib/clang/17.0.1/include/armintr.h diff --git a/third_party/clang/lib/clang/17.0.1/include/avx2intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx2intrin.h new file mode 100644 index 0000000000..8f2de05674 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/avx2intrin.h @@ -0,0 +1,5263 @@ +/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX2INTRIN_H +#define __AVX2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128))) + +/* SSE4 Multiple Packed Sums of Absolute Difference. */ +/// Computes sixteen sum of absolute difference (SAD) operations on sets of +/// four unsigned 8-bit integers from the 256-bit integer vectors \a X and +/// \a Y. +/// +/// Eight SAD results are computed using the lower half of the input +/// vectors, and another eight using the upper half. These 16-bit values +/// are returned in the lower and upper halves of the 256-bit result, +/// respectively. +/// +/// A single SAD operation selects four bytes from \a X and four bytes from +/// \a Y as input. It computes the differences between each \a X byte and +/// the corresponding \a Y byte, takes the absolute value of each +/// difference, and sums these four values to form one 16-bit result. The +/// intrinsic computes 16 of these results with different sets of input +/// bytes. +/// +/// For each set of eight results, the SAD operations use the same four +/// bytes from \a Y; the starting bit position for these four bytes is +/// specified by \a M[1:0] times 32. The eight operations use successive +/// sets of four bytes from \a X; the starting bit position for the first +/// set of four bytes is specified by \a M[2] times 32. These bit positions +/// are all relative to the 128-bit lane for each set of eight operations. +/// +/// \code{.operation} +/// r := 0 +/// FOR i := 0 TO 1 +/// j := i*3 +/// Ybase := M[j+1:j]*32 + i*128 +/// Xbase := M[j+2]*32 + i*128 +/// FOR k := 0 TO 3 +/// temp0 := ABS(X[Xbase+7:Xbase] - Y[Ybase+7:Ybase]) +/// temp1 := ABS(X[Xbase+15:Xbase+8] - Y[Ybase+15:Ybase+8]) +/// temp2 := ABS(X[Xbase+23:Xbase+16] - Y[Ybase+23:Ybase+16]) +/// temp3 := ABS(X[Xbase+31:Xbase+24] - Y[Ybase+31:Ybase+24]) +/// result[r+15:r] := temp0 + temp1 + temp2 + temp3 +/// Xbase := Xbase + 8 +/// r := r + 16 +/// ENDFOR +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_mpsadbw_epu8(__m256i X, __m256i Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VMPSADBW instruction. +/// +/// \param X +/// A 256-bit integer vector containing one of the inputs. +/// \param Y +/// A 256-bit integer vector containing one of the inputs. +/// \param M +/// An unsigned immediate value specifying the starting positions of the +/// bytes to operate on. +/// \returns A 256-bit vector of [16 x i16] containing the result. +#define _mm256_mpsadbw_epu8(X, Y, M) \ + ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(M))) + +/// Computes the absolute value of each signed byte in the 256-bit integer +/// vector \a __a and returns each value in the corresponding byte of +/// the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi8(__m256i __a) +{ + return (__m256i)__builtin_elementwise_abs((__v32qs)__a); +} + +/// Computes the absolute value of each signed 16-bit element in the 256-bit +/// vector of [16 x i16] in \a __a and returns each value in the +/// corresponding element of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi16(__m256i __a) +{ + return (__m256i)__builtin_elementwise_abs((__v16hi)__a); +} + +/// Computes the absolute value of each signed 32-bit element in the 256-bit +/// vector of [8 x i32] in \a __a and returns each value in the +/// corresponding element of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi32(__m256i __a) +{ + return (__m256i)__builtin_elementwise_abs((__v8si)__a); +} + +/// Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit +/// integers using signed saturation, and returns the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// k := i*8 +/// result[7+k:k] := SATURATE8(__a[15+j:j]) +/// result[71+k:64+k] := SATURATE8(__b[15+j:j]) +/// result[135+k:128+k] := SATURATE8(__a[143+j:128+j]) +/// result[199+k:192+k] := SATURATE8(__b[143+j:128+j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPACKSSWB instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] used to generate result[63:0] and +/// result[191:128]. +/// \param __b +/// A 256-bit vector of [16 x i16] used to generate result[127:64] and +/// result[255:192]. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b); +} + +/// Converts the elements of two 256-bit vectors of [8 x i32] to 16-bit +/// integers using signed saturation, and returns the resulting 256-bit +/// vector of [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// k := i*16 +/// result[15+k:k] := SATURATE16(__a[31+j:j]) +/// result[79+k:64+k] := SATURATE16(__b[31+j:j]) +/// result[143+k:128+k] := SATURATE16(__a[159+j:128+j]) +/// result[207+k:192+k] := SATURATE16(__b[159+j:128+j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPACKSSDW instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] used to generate result[63:0] and +/// result[191:128]. +/// \param __b +/// A 256-bit vector of [8 x i32] used to generate result[127:64] and +/// result[255:192]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packs_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b); +} + +/// Converts elements from two 256-bit vectors of [16 x i16] to 8-bit integers +/// using unsigned saturation, and returns the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// k := i*8 +/// result[7+k:k] := SATURATE8U(__a[15+j:j]) +/// result[71+k:64+k] := SATURATE8U(__b[15+j:j]) +/// result[135+k:128+k] := SATURATE8U(__a[143+j:128+j]) +/// result[199+k:192+k] := SATURATE8U(__b[143+j:128+j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPACKUSWB instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] used to generate result[63:0] and +/// result[191:128]. +/// \param __b +/// A 256-bit vector of [16 x i16] used to generate result[127:64] and +/// result[255:192]. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packus_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b); +} + +/// Converts elements from two 256-bit vectors of [8 x i32] to 16-bit integers +/// using unsigned saturation, and returns the resulting 256-bit vector of +/// [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// k := i*16 +/// result[15+k:k] := SATURATE16U(__V1[31+j:j]) +/// result[79+k:64+k] := SATURATE16U(__V2[31+j:j]) +/// result[143+k:128+k] := SATURATE16U(__V1[159+j:128+j]) +/// result[207+k:192+k] := SATURATE16U(__V2[159+j:128+j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPACKUSDW instruction. +/// +/// \param __V1 +/// A 256-bit vector of [8 x i32] used to generate result[63:0] and +/// result[191:128]. +/// \param __V2 +/// A 256-bit vector of [8 x i32] used to generate result[127:64] and +/// result[255:192]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packus_epi32(__m256i __V1, __m256i __V2) +{ + return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2); +} + +/// Adds 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors and returns the lower 8 bits of each sum in the corresponding +/// byte of the 256-bit integer vector result (overflow is ignored). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the source operands. +/// \param __b +/// A 256-bit integer vector containing one of the source operands. +/// \returns A 256-bit integer vector containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qu)__a + (__v32qu)__b); +} + +/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of +/// [16 x i16] and returns the lower 16 bits of each sum in the +/// corresponding element of the [16 x i16] result (overflow is ignored). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a + (__v16hu)__b); +} + +/// Adds 32-bit integers from corresponding elements of two 256-bit vectors of +/// [8 x i32] and returns the lower 32 bits of each sum in the corresponding +/// element of the [8 x i32] result (overflow is ignored). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a + (__v8su)__b); +} + +/// Adds 64-bit integers from corresponding elements of two 256-bit vectors of +/// [4 x i64] and returns the lower 64 bits of each sum in the corresponding +/// element of the [4 x i64] result (overflow is ignored). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x i64] containing one of the source operands. +/// \returns A 256-bit vector of [4 x i64] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a + (__v4du)__b); +} + +/// Adds 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors using signed saturation, and returns each sum in the +/// corresponding byte of the 256-bit integer vector result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDSB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the source operands. +/// \param __b +/// A 256-bit integer vector containing one of the source operands. +/// \returns A 256-bit integer vector containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b); +} + +/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of +/// [16 x i16] using signed saturation, and returns the [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b); +} + +/// Adds 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors using unsigned saturation, and returns each sum in the +/// corresponding byte of the 256-bit integer vector result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDUSB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the source operands. +/// \param __b +/// A 256-bit integer vector containing one of the source operands. +/// \returns A 256-bit integer vector containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b); +} + +/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of +/// [16 x i16] using unsigned saturation, and returns the [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDUSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b); +} + +/// Uses the lower half of the 256-bit vector \a a as the upper half of a +/// temporary 256-bit value, and the lower half of the 256-bit vector \a b +/// as the lower half of the temporary value. Right-shifts the temporary +/// value by \a n bytes, and uses the lower 16 bytes of the shifted value +/// as the lower 16 bytes of the result. Uses the upper halves of \a a and +/// \a b to make another temporary value, right shifts by \a n, and uses +/// the lower 16 bytes of the shifted value as the upper 16 bytes of the +/// result. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_alignr_epi8(__m256i a, __m256i b, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPALIGNR instruction. +/// +/// \param a +/// A 256-bit integer vector containing source values. +/// \param b +/// A 256-bit integer vector containing source values. +/// \param n +/// An immediate value specifying the number of bytes to shift. +/// \returns A 256-bit integer vector containing the result. +#define _mm256_alignr_epi8(a, b, n) \ + ((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (n))) + +/// Computes the bitwise AND of the 256-bit integer vectors in \a __a and +/// \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPAND instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_and_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a & (__v4du)__b); +} + +/// Computes the bitwise AND of the 256-bit integer vector in \a __b with +/// the bitwise NOT of the 256-bit integer vector in \a __a. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPANDN instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_andnot_si256(__m256i __a, __m256i __b) +{ + return (__m256i)(~(__v4du)__a & (__v4du)__b); +} + +/// Computes the averages of the corresponding unsigned bytes in the two +/// 256-bit integer vectors in \a __a and \a __b and returns each +/// average in the corresponding byte of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := (__a[j+7:j] + __b[j+7:j] + 1) >> 1 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPAVGB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_avg_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b); +} + +/// Computes the averages of the corresponding unsigned 16-bit integers in +/// the two 256-bit vectors of [16 x i16] in \a __a and \a __b and returns +/// each average in the corresponding element of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := (__a[j+15:j] + __b[j+15:j] + 1) >> 1 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPAVGW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_avg_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b); +} + +/// Merges 8-bit integer values from either of the two 256-bit vectors +/// \a __V1 or \a __V2, as specified by the 256-bit mask \a __M and returns +/// the resulting 256-bit integer vector. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// IF __M[7+i] == 0 +/// result[7+j:j] := __V1[7+j:j] +/// ELSE +/// result[7+j:j] := __V2[7+j:j] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBLENDVB instruction. +/// +/// \param __V1 +/// A 256-bit integer vector containing source values. +/// \param __V2 +/// A 256-bit integer vector containing source values. +/// \param __M +/// A 256-bit integer vector, with bit [7] of each byte specifying the +/// source for each corresponding byte of the result. When the mask bit +/// is 0, the byte is copied from \a __V1; otherwise, it is copied from +/// \a __V2. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M) +{ + return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2, + (__v32qi)__M); +} + +/// Merges 16-bit integer values from either of the two 256-bit vectors +/// \a V1 or \a V2, as specified by the immediate integer operand \a M, +/// and returns the resulting 256-bit vector of [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// IF M[i] == 0 +/// result[7+j:j] := V1[7+j:j] +/// result[135+j:128+j] := V1[135+j:128+j] +/// ELSE +/// result[7+j:j] := V2[7+j:j] +/// result[135+j:128+j] := V2[135+j:128+j] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_blend_epi16(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPBLENDW instruction. +/// +/// \param V1 +/// A 256-bit vector of [16 x i16] containing source values. +/// \param V2 +/// A 256-bit vector of [16 x i16] containing source values. +/// \param M +/// An immediate 8-bit integer operand, with bits [7:0] specifying the +/// source for each element of the result. The position of the mask bit +/// corresponds to the index of a copied value. When a mask bit is 0, the +/// element is copied from \a V1; otherwise, it is copied from \a V2. +/// \a M[0] determines the source for elements 0 and 8, \a M[1] for +/// elements 1 and 9, and so forth. +/// \returns A 256-bit vector of [16 x i16] containing the result. +#define _mm256_blend_epi16(V1, V2, M) \ + ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \ + (__v16hi)(__m256i)(V2), (int)(M))) + +/// Compares corresponding bytes in the 256-bit integer vectors in \a __a and +/// \a __b for equality and returns the outcomes in the corresponding +/// bytes of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := (__a[j+7:j] == __b[j+7:j]) ? 0xFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPEQB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the inputs. +/// \param __b +/// A 256-bit integer vector containing one of the inputs. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qi)__a == (__v32qi)__b); +} + +/// Compares corresponding elements in the 256-bit vectors of [16 x i16] in +/// \a __a and \a __b for equality and returns the outcomes in the +/// corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := (__a[j+15:j] == __b[j+15:j]) ? 0xFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPEQW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the inputs. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hi)__a == (__v16hi)__b); +} + +/// Compares corresponding elements in the 256-bit vectors of [8 x i32] in +/// \a __a and \a __b for equality and returns the outcomes in the +/// corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// result[j+31:j] := (__a[j+31:j] == __b[j+31:j]) ? 0xFFFFFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPEQD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the inputs. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8si)__a == (__v8si)__b); +} + +/// Compares corresponding elements in the 256-bit vectors of [4 x i64] in +/// \a __a and \a __b for equality and returns the outcomes in the +/// corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// result[j+63:j] := (__a[j+63:j] == __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPEQQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [4 x i64] containing one of the inputs. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4di)__a == (__v4di)__b); +} + +/// Compares corresponding signed bytes in the 256-bit integer vectors in +/// \a __a and \a __b for greater-than and returns the outcomes in the +/// corresponding bytes of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := (__a[j+7:j] > __b[j+7:j]) ? 0xFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPGTB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the inputs. +/// \param __b +/// A 256-bit integer vector containing one of the inputs. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi8(__m256i __a, __m256i __b) +{ + /* This function always performs a signed comparison, but __v32qi is a char + which may be signed or unsigned, so use __v32qs. */ + return (__m256i)((__v32qs)__a > (__v32qs)__b); +} + +/// Compares corresponding signed elements in the 256-bit vectors of +/// [16 x i16] in \a __a and \a __b for greater-than and returns the +/// outcomes in the corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := (__a[j+15:j] > __b[j+15:j]) ? 0xFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPGTW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the inputs. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hi)__a > (__v16hi)__b); +} + +/// Compares corresponding signed elements in the 256-bit vectors of +/// [8 x i32] in \a __a and \a __b for greater-than and returns the +/// outcomes in the corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// result[j+31:j] := (__a[j+31:j] > __b[j+31:j]) ? 0xFFFFFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPGTD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the inputs. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8si)__a > (__v8si)__b); +} + +/// Compares corresponding signed elements in the 256-bit vectors of +/// [4 x i64] in \a __a and \a __b for greater-than and returns the +/// outcomes in the corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// result[j+63:j] := (__a[j+63:j] > __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPGTQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [4 x i64] containing one of the inputs. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4di)__a > (__v4di)__b); +} + +/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit +/// vectors of [16 x i16] and returns the lower 16 bits of each sum in an +/// element of the [16 x i16] result (overflow is ignored). Sums from +/// \a __a are returned in the lower 64 bits of each 128-bit half of the +/// result; sums from \a __b are returned in the upper 64 bits of each +/// 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+15:j] := __a[j+15:j] + __a[j+31:j+16] +/// result[j+31:j+16] := __a[j+47:j+32] + __a[j+63:j+48] +/// result[j+47:j+32] := __a[j+79:j+64] + __a[j+95:j+80] +/// result[j+63:j+48] := __a[j+111:j+96] + __a[j+127:j+112] +/// result[j+79:j+64] := __b[j+15:j] + __b[j+31:j+16] +/// result[j+95:j+80] := __b[j+47:j+32] + __b[j+63:j+48] +/// result[j+111:j+96] := __b[j+79:j+64] + __b[j+95:j+80] +/// result[j+127:j+112] := __b[j+111:j+96] + __b[j+127:j+112] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadd_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b); +} + +/// Horizontally adds the adjacent pairs of 32-bit integers from two 256-bit +/// vectors of [8 x i32] and returns the lower 32 bits of each sum in an +/// element of the [8 x i32] result (overflow is ignored). Sums from \a __a +/// are returned in the lower 64 bits of each 128-bit half of the result; +/// sums from \a __b are returned in the upper 64 bits of each 128-bit half +/// of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+31:j] := __a[j+31:j] + __a[j+63:j+32] +/// result[j+63:j+32] := __a[j+95:j+64] + __a[j+127:j+96] +/// result[j+95:j+64] := __b[j+31:j] + __b[j+63:j+32] +/// result[j+127:j+96] := __b[j+95:j+64] + __b[j+127:j+96] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadd_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b); +} + +/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit +/// vectors of [16 x i16] using signed saturation and returns each sum in +/// an element of the [16 x i16] result. Sums from \a __a are returned in +/// the lower 64 bits of each 128-bit half of the result; sums from \a __b +/// are returned in the upper 64 bits of each 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+15:j] := SATURATE16(__a[j+15:j] + __a[j+31:j+16]) +/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] + __a[j+63:j+48]) +/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] + __a[j+95:j+80]) +/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] + __a[j+127:j+112]) +/// result[j+79:j+64] := SATURATE16(__b[j+15:j] + __b[j+31:j+16]) +/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] + __b[j+63:j+48]) +/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] + __b[j+95:j+80]) +/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] + __b[j+127:j+112]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadds_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b); +} + +/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit +/// vectors of [16 x i16] and returns the lower 16 bits of each difference +/// in an element of the [16 x i16] result (overflow is ignored). +/// Differences from \a __a are returned in the lower 64 bits of each +/// 128-bit half of the result; differences from \a __b are returned in the +/// upper 64 bits of each 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+15:j] := __a[j+15:j] - __a[j+31:j+16] +/// result[j+31:j+16] := __a[j+47:j+32] - __a[j+63:j+48] +/// result[j+47:j+32] := __a[j+79:j+64] - __a[j+95:j+80] +/// result[j+63:j+48] := __a[j+111:j+96] - __a[j+127:j+112] +/// result[j+79:j+64] := __b[j+15:j] - __b[j+31:j+16] +/// result[j+95:j+80] := __b[j+47:j+32] - __b[j+63:j+48] +/// result[j+111:j+96] := __b[j+79:j+64] - __b[j+95:j+80] +/// result[j+127:j+112] := __b[j+111:j+96] - __b[j+127:j+112] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsub_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b); +} + +/// Horizontally subtracts adjacent pairs of 32-bit integers from two 256-bit +/// vectors of [8 x i32] and returns the lower 32 bits of each difference in +/// an element of the [8 x i32] result (overflow is ignored). Differences +/// from \a __a are returned in the lower 64 bits of each 128-bit half of +/// the result; differences from \a __b are returned in the upper 64 bits +/// of each 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+31:j] := __a[j+31:j] - __a[j+63:j+32] +/// result[j+63:j+32] := __a[j+95:j+64] - __a[j+127:j+96] +/// result[j+95:j+64] := __b[j+31:j] - __b[j+63:j+32] +/// result[j+127:j+96] := __b[j+95:j+64] - __b[j+127:j+96] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsub_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b); +} + +/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit +/// vectors of [16 x i16] using signed saturation and returns each sum in +/// an element of the [16 x i16] result. Differences from \a __a are +/// returned in the lower 64 bits of each 128-bit half of the result; +/// differences from \a __b are returned in the upper 64 bits of each +/// 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+15:j] := SATURATE16(__a[j+15:j] - __a[j+31:j+16]) +/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] - __a[j+63:j+48]) +/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] - __a[j+95:j+80]) +/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] - __a[j+127:j+112]) +/// result[j+79:j+64] := SATURATE16(__b[j+15:j] - __b[j+31:j+16]) +/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] - __b[j+63:j+48]) +/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] - __b[j+95:j+80]) +/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] - __b[j+127:j+112]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsubs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b); +} + +/// Multiplies each unsigned byte from the 256-bit integer vector in \a __a +/// with the corresponding signed byte from the 256-bit integer vector in +/// \a __b, forming signed 16-bit intermediate products. Adds adjacent +/// pairs of those products using signed saturation to form 16-bit sums +/// returned as elements of the [16 x i16] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// temp1 := __a[j+7:j] * __b[j+7:j] +/// temp2 := __a[j+15:j+8] * __b[j+15:j+8] +/// result[j+15:j] := SATURATE16(temp1 + temp2) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMADDUBSW instruction. +/// +/// \param __a +/// A 256-bit vector containing one of the source operands. +/// \param __b +/// A 256-bit vector containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maddubs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b); +} + +/// Multiplies corresponding 16-bit elements of two 256-bit vectors of +/// [16 x i16], forming 32-bit intermediate products, and adds pairs of +/// those products to form 32-bit sums returned as elements of the +/// [8 x i32] result. +/// +/// There is only one wraparound case: when all four of the 16-bit sources +/// are \c 0x8000, the result will be \c 0x80000000. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// temp1 := __a[j+15:j] * __b[j+15:j] +/// temp2 := __a[j+31:j+16] * __b[j+31:j+16] +/// result[j+31:j] := temp1 + temp2 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMADDWD instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b); +} + +/// Compares the corresponding signed bytes in the two 256-bit integer vectors +/// in \a __a and \a __b and returns the larger of each pair in the +/// corresponding byte of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXSB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b); +} + +/// Compares the corresponding signed 16-bit integers in the two 256-bit +/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b); +} + +/// Compares the corresponding signed 32-bit integers in the two 256-bit +/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXSD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b); +} + +/// Compares the corresponding unsigned bytes in the two 256-bit integer +/// vectors in \a __a and \a __b and returns the larger of each pair in +/// the corresponding byte of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXUB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b); +} + +/// Compares the corresponding unsigned 16-bit integers in the two 256-bit +/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXUW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b); +} + +/// Compares the corresponding unsigned 32-bit integers in the two 256-bit +/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXUD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b); +} + +/// Compares the corresponding signed bytes in the two 256-bit integer vectors +/// in \a __a and \a __b and returns the smaller of each pair in the +/// corresponding byte of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINSB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b); +} + +/// Compares the corresponding signed 16-bit integers in the two 256-bit +/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b); +} + +/// Compares the corresponding signed 32-bit integers in the two 256-bit +/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINSD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b); +} + +/// Compares the corresponding unsigned bytes in the two 256-bit integer +/// vectors in \a __a and \a __b and returns the smaller of each pair in +/// the corresponding byte of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINUB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b); +} + +/// Compares the corresponding unsigned 16-bit integers in the two 256-bit +/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINUW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b); +} + +/// Compares the corresponding unsigned 32-bit integers in the two 256-bit +/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINUD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b); +} + +static __inline__ int __DEFAULT_FN_ATTRS256 +_mm256_movemask_epi8(__m256i __a) +{ + return __builtin_ia32_pmovmskb256((__v32qi)__a); +} + +/// Sign-extends bytes from the 128-bit integer vector in \a __V and returns +/// the 16-bit values in the corresponding elements of a 256-bit vector +/// of [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*8 +/// k := i*16 +/// result[k+15:k] := SignExtend(__V[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXBW instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [16 x i16] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi16(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi); +} + +/// Sign-extends bytes from the lower half of the 128-bit integer vector in +/// \a __V and returns the 32-bit values in the corresponding elements of a +/// 256-bit vector of [8 x i32]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*8 +/// k := i*32 +/// result[k+31:k] := SignExtend(__V[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXBD instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [8 x i32] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi32(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); +} + +/// Sign-extends the first four bytes from the 128-bit integer vector in +/// \a __V and returns the 64-bit values in the corresponding elements of a +/// 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := SignExtend(__V[7:0]) +/// result[127:64] := SignExtend(__V[15:8]) +/// result[191:128] := SignExtend(__V[23:16]) +/// result[255:192] := SignExtend(__V[31:24]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXBQ instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [4 x i64] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi64(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di); +} + +/// Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in +/// \a __V and returns the 32-bit values in the corresponding elements of a +/// 256-bit vector of [8 x i32]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// k := i*32 +/// result[k+31:k] := SignExtend(__V[j+15:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXWD instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16] containing the source values. +/// \returns A 256-bit vector of [8 x i32] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si); +} + +/// Sign-extends 16-bit elements from the lower half of the 128-bit vector of +/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding +/// elements of a 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := SignExtend(__V[15:0]) +/// result[127:64] := SignExtend(__V[31:16]) +/// result[191:128] := SignExtend(__V[47:32]) +/// result[255:192] := SignExtend(__V[64:48]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXWQ instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16] containing the source values. +/// \returns A 256-bit vector of [4 x i64] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di); +} + +/// Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in +/// \a __V and returns the 64-bit values in the corresponding elements of a +/// 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := SignExtend(__V[31:0]) +/// result[127:64] := SignExtend(__V[63:32]) +/// result[191:128] := SignExtend(__V[95:64]) +/// result[255:192] := SignExtend(__V[127:96]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXDQ instruction. +/// +/// \param __V +/// A 128-bit vector of [4 x i32] containing the source values. +/// \returns A 256-bit vector of [4 x i64] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v4si)__V, __v4di); +} + +/// Zero-extends bytes from the 128-bit integer vector in \a __V and returns +/// the 16-bit values in the corresponding elements of a 256-bit vector +/// of [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*8 +/// k := i*16 +/// result[k+15:k] := ZeroExtend(__V[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXBW instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [16 x i16] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi16(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi); +} + +/// Zero-extends bytes from the lower half of the 128-bit integer vector in +/// \a __V and returns the 32-bit values in the corresponding elements of a +/// 256-bit vector of [8 x i32]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*8 +/// k := i*32 +/// result[k+31:k] := ZeroExtend(__V[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXBD instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [8 x i32] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); +} + +/// Zero-extends the first four bytes from the 128-bit integer vector in +/// \a __V and returns the 64-bit values in the corresponding elements of a +/// 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := ZeroExtend(__V[7:0]) +/// result[127:64] := ZeroExtend(__V[15:8]) +/// result[191:128] := ZeroExtend(__V[23:16]) +/// result[255:192] := ZeroExtend(__V[31:24]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXBQ instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [4 x i64] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di); +} + +/// Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in +/// \a __V and returns the 32-bit values in the corresponding elements of a +/// 256-bit vector of [8 x i32]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// k := i*32 +/// result[k+31:k] := ZeroExtend(__V[j+15:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXWD instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16] containing the source values. +/// \returns A 256-bit vector of [8 x i32] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu16_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si); +} + +/// Zero-extends 16-bit elements from the lower half of the 128-bit vector of +/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding +/// elements of a 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := ZeroExtend(__V[15:0]) +/// result[127:64] := ZeroExtend(__V[31:16]) +/// result[191:128] := ZeroExtend(__V[47:32]) +/// result[255:192] := ZeroExtend(__V[64:48]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXWQ instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16] containing the source values. +/// \returns A 256-bit vector of [4 x i64] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu16_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di); +} + +/// Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in +/// \a __V and returns the 64-bit values in the corresponding elements of a +/// 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := ZeroExtend(__V[31:0]) +/// result[127:64] := ZeroExtend(__V[63:32]) +/// result[191:128] := ZeroExtend(__V[95:64]) +/// result[255:192] := ZeroExtend(__V[127:96]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXDQ instruction. +/// +/// \param __V +/// A 128-bit vector of [4 x i32] containing the source values. +/// \returns A 256-bit vector of [4 x i64] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v4su)__V, __v4di); +} + +/// Multiplies signed 32-bit integers from even-numbered elements of two +/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the +/// [4 x i64] result. +/// +/// \code{.operation} +/// result[63:0] := __a[31:0] * __b[31:0] +/// result[127:64] := __a[95:64] * __b[95:64] +/// result[191:128] := __a[159:128] * __b[159:128] +/// result[255:192] := __a[223:192] * __b[223:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [4 x i64] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mul_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b); +} + +/// Multiplies signed 16-bit integer elements of two 256-bit vectors of +/// [16 x i16], truncates the 32-bit results to the most significant 18 +/// bits, rounds by adding 1, and returns bits [16:1] of each rounded +/// product in the [16 x i16] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// temp := ((__a[j+15:j] * __b[j+15:j]) >> 14) + 1 +/// result[j+15:j] := temp[16:1] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHRSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the rounded products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhrs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b); +} + +/// Multiplies unsigned 16-bit integer elements of two 256-bit vectors of +/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the +/// [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHUW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhi_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b); +} + +/// Multiplies signed 16-bit integer elements of two 256-bit vectors of +/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the +/// [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhi_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b); +} + +/// Multiplies signed 16-bit integer elements of two 256-bit vectors of +/// [16 x i16], and returns the lower 16 bits of each 32-bit product in the +/// [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mullo_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a * (__v16hu)__b); +} + +/// Multiplies signed 32-bit integer elements of two 256-bit vectors of +/// [8 x i32], and returns the lower 32 bits of each 64-bit product in the +/// [8 x i32] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mullo_epi32 (__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a * (__v8su)__b); +} + +/// Multiplies unsigned 32-bit integers from even-numered elements of two +/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the +/// [4 x i64] result. +/// +/// \code{.operation} +/// result[63:0] := __a[31:0] * __b[31:0] +/// result[127:64] := __a[95:64] * __b[95:64] +/// result[191:128] := __a[159:128] * __b[159:128] +/// result[255:192] := __a[223:192] * __b[223:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULUDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [4 x i64] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mul_epu32(__m256i __a, __m256i __b) +{ + return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b); +} + +/// Computes the bitwise OR of the 256-bit integer vectors in \a __a and +/// \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPOR instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_or_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a | (__v4du)__b); +} + +/// Computes four sum of absolute difference (SAD) operations on sets of eight +/// unsigned 8-bit integers from the 256-bit integer vectors \a __a and +/// \a __b. +/// +/// One SAD result is computed for each set of eight bytes from \a __a and +/// eight bytes from \a __b. The zero-extended SAD value is returned in the +/// corresponding 64-bit element of the result. +/// +/// A single SAD operation takes the differences between the corresponding +/// bytes of \a __a and \a __b, takes the absolute value of each difference, +/// and sums these eight values to form one 16-bit result. This operation +/// is repeated four times with successive sets of eight bytes. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// temp0 := ABS(__a[j+7:j] - __b[j+7:j]) +/// temp1 := ABS(__a[j+15:j+8] - __b[j+15:j+8]) +/// temp2 := ABS(__a[j+23:j+16] - __b[j+23:j+16]) +/// temp3 := ABS(__a[j+31:j+24] - __b[j+31:j+24]) +/// temp4 := ABS(__a[j+39:j+32] - __b[j+39:j+32]) +/// temp5 := ABS(__a[j+47:j+40] - __b[j+47:j+40]) +/// temp6 := ABS(__a[j+55:j+48] - __b[j+55:j+48]) +/// temp7 := ABS(__a[j+63:j+56] - __b[j+63:j+56]) +/// result[j+15:j] := temp0 + temp1 + temp2 + temp3 + +/// temp4 + temp5 + temp6 + temp7 +/// result[j+63:j+16] := 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSADBW instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sad_epu8(__m256i __a, __m256i __b) +{ + return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b); +} + +/// Shuffles 8-bit integers in the 256-bit integer vector \a __a according +/// to control information in the 256-bit integer vector \a __b, and +/// returns the 256-bit result. In effect there are two separate 128-bit +/// shuffles in the lower and upper halves. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// IF __b[j+7] == 1 +/// result[j+7:j] := 0 +/// ELSE +/// k := __b[j+3:j] * 8 +/// IF i > 15 +/// k := k + 128 +/// FI +/// result[j+7:j] := __a[k+7:k] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSHUFB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing source values. +/// \param __b +/// A 256-bit integer vector containing control information to determine +/// what goes into the corresponding byte of the result. If bit 7 of the +/// control byte is 1, the result byte is 0; otherwise, bits 3:0 of the +/// control byte specify the index (within the same 128-bit half) of \a __a +/// to copy to the result byte. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shuffle_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b); +} + +/// Shuffles 32-bit integers from the 256-bit vector of [8 x i32] in \a a +/// according to control information in the integer literal \a imm, and +/// returns the 256-bit result. In effect there are two parallel 128-bit +/// shuffles in the lower and upper halves. +/// +/// \code{.operation} +/// FOR i := 0 to 3 +/// j := i*32 +/// k := (imm >> i*2)[1:0] * 32 +/// result[j+31:j] := a[k+31:k] +/// result[128+j+31:128+j] := a[128+k+31:128+k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_shuffle_epi32(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSHUFB instruction. +/// +/// \param a +/// A 256-bit vector of [8 x i32] containing source values. +/// \param imm +/// An immediate 8-bit value specifying which elements to copy from \a a. +/// \a imm[1:0] specifies the index in \a a for elements 0 and 4 of the +/// result, \a imm[3:2] specifies the index for elements 1 and 5, and so +/// forth. +/// \returns A 256-bit vector of [8 x i32] containing the result. +#define _mm256_shuffle_epi32(a, imm) \ + ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm))) + +/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] in \a a +/// according to control information in the integer literal \a imm, and +/// returns the 256-bit result. The upper 64 bits of each 128-bit half +/// are shuffled in parallel; the lower 64 bits of each 128-bit half are +/// copied from \a a unchanged. +/// +/// \code{.operation} +/// result[63:0] := a[63:0] +/// result[191:128] := a[191:128] +/// FOR i := 0 TO 3 +/// j := i * 16 + 64 +/// k := (imm >> i*2)[1:0] * 16 + 64 +/// result[j+15:j] := a[k+15:k] +/// result[128+j+15:128+j] := a[128+k+15:128+k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_shufflehi_epi16(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSHUFHW instruction. +/// +/// \param a +/// A 256-bit vector of [16 x i16] containing source values. +/// \param imm +/// An immediate 8-bit value specifying which elements to copy from \a a. +/// \a imm[1:0] specifies the index in \a a for elements 4 and 8 of the +/// result, \a imm[3:2] specifies the index for elements 5 and 9, and so +/// forth. Indexes are offset by 4 (so 0 means index 4, and so forth). +/// \returns A 256-bit vector of [16 x i16] containing the result. +#define _mm256_shufflehi_epi16(a, imm) \ + ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm))) + +/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] \a a +/// according to control information in the integer literal \a imm, and +/// returns the 256-bit [16 x i16] result. The lower 64 bits of each +/// 128-bit half are shuffled; the upper 64 bits of each 128-bit half are +/// copied from \a a unchanged. +/// +/// \code{.operation} +/// result[127:64] := a[127:64] +/// result[255:192] := a[255:192] +/// FOR i := 0 TO 3 +/// j := i * 16 +/// k := (imm >> i*2)[1:0] * 16 +/// result[j+15:j] := a[k+15:k] +/// result[128+j+15:128+j] := a[128+k+15:128+k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_shufflelo_epi16(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSHUFLW instruction. +/// +/// \param a +/// A 256-bit vector of [16 x i16] to use as a source of data for the +/// result. +/// \param imm +/// An immediate 8-bit value specifying which elements to copy from \a a. +/// \a imm[1:0] specifies the index in \a a for elements 0 and 8 of the +/// result, \a imm[3:2] specifies the index for elements 1 and 9, and so +/// forth. +/// \returns A 256-bit vector of [16 x i16] containing the result. +#define _mm256_shufflelo_epi16(a, imm) \ + ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm))) + +/// Sets each byte of the result to the corresponding byte of the 256-bit +/// integer vector in \a __a, the negative of that byte, or zero, depending +/// on whether the corresponding byte of the 256-bit integer vector in +/// \a __b is greater than zero, less than zero, or equal to zero, +/// respectively. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector]. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b); +} + +/// Sets each element of the result to the corresponding element of the +/// 256-bit vector of [16 x i16] in \a __a, the negative of that element, +/// or zero, depending on whether the corresponding element of the 256-bit +/// vector of [16 x i16] in \a __b is greater than zero, less than zero, or +/// equal to zero, respectively. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b); +} + +/// Sets each element of the result to the corresponding element of the +/// 256-bit vector of [8 x i32] in \a __a, the negative of that element, or +/// zero, depending on whether the corresponding element of the 256-bit +/// vector of [8 x i32] in \a __b is greater than zero, less than zero, or +/// equal to zero, respectively. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGND instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b); +} + +/// Shifts each 128-bit half of the 256-bit integer vector \a a left by +/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm +/// is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_slli_si256(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSLLDQ instruction. +/// +/// \param a +/// A 256-bit integer vector to be shifted. +/// \param imm +/// An unsigned immediate value specifying the shift count (in bytes). +/// \returns A 256-bit integer vector containing the result. +#define _mm256_slli_si256(a, imm) \ + ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))) + +/// Shifts each 128-bit half of the 256-bit integer vector \a a left by +/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm +/// is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_bslli_epi128(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSLLDQ instruction. +/// +/// \param a +/// A 256-bit integer vector to be shifted. +/// \param imm +/// An unsigned immediate value specifying the shift count (in bytes). +/// \returns A 256-bit integer vector containing the result. +#define _mm256_bslli_epi128(a, imm) \ + ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))) + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// left by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count); +} + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// left by the number of bits specified by the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// left by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 31, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// left by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 31, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a +/// left by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 63, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi64(__m256i __a, int __count) +{ + return __builtin_ia32_psllqi256((__v4di)__a, __count); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a +/// left by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 63, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi64(__m256i __a, __m128i __count) +{ + return __builtin_ia32_psllq256((__v4di)__a, __count); +} + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// right by \a __count bits, shifting in sign bits, and returns the result. +/// If \a __count is greater than 15, each element of the result is either +/// 0 or -1 according to the corresponding input sign bit. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srai_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count); +} + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in sign bits, and returns the result. If \a __count is greater +/// than 15, each element of the result is either 0 or -1 according to the +/// corresponding input sign bit. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sra_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// right by \a __count bits, shifting in sign bits, and returns the result. +/// If \a __count is greater than 31, each element of the result is either +/// 0 or -1 according to the corresponding input sign bit. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srai_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in sign bits, and returns the result. If \a __count is greater +/// than 31, each element of the result is either 0 or -1 according to the +/// corresponding input sign bit. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sra_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count); +} + +/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by +/// \a imm bytes, shifting in zero bytes, and returns the result. If +/// \a imm is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_srli_si256(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSRLDQ instruction. +/// +/// \param a +/// A 256-bit integer vector to be shifted. +/// \param imm +/// An unsigned immediate value specifying the shift count (in bytes). +/// \returns A 256-bit integer vector containing the result. +#define _mm256_srli_si256(a, imm) \ + ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))) + +/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by +/// \a imm bytes, shifting in zero bytes, and returns the result. If +/// \a imm is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_bsrli_epi128(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSRLDQ instruction. +/// +/// \param a +/// A 256-bit integer vector to be shifted. +/// \param imm +/// An unsigned immediate value specifying the shift count (in bytes). +/// \returns A 256-bit integer vector containing the result. +#define _mm256_bsrli_epi128(a, imm) \ + ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))) + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// right by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count); +} + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// right by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 31, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 31, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a +/// right by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 63, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi64(__m256i __a, int __count) +{ + return __builtin_ia32_psrlqi256((__v4di)__a, __count); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 63, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi64(__m256i __a, __m128i __count) +{ + return __builtin_ia32_psrlq256((__v4di)__a, __count); +} + +/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors. Returns the lower 8 bits of each difference in the +/// corresponding byte of the 256-bit integer vector result (overflow is +/// ignored). +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := __a[j+7:j] - __b[j+7:j] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing the minuends. +/// \param __b +/// A 256-bit integer vector containing the subtrahends. +/// \returns A 256-bit integer vector containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qu)__a - (__v32qu)__b); +} + +/// Subtracts 16-bit integers from corresponding elements of two 256-bit +/// vectors of [16 x i16]. Returns the lower 16 bits of each difference in +/// the corresponding element of the [16 x i16] result (overflow is +/// ignored). +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := __a[j+15:j] - __b[j+15:j] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing the minuends. +/// \param __b +/// A 256-bit vector of [16 x i16] containing the subtrahends. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a - (__v16hu)__b); +} + +/// Subtracts 32-bit integers from corresponding elements of two 256-bit +/// vectors of [8 x i32]. Returns the lower 32 bits of each difference in +/// the corresponding element of the [8 x i32] result (overflow is ignored). +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// result[j+31:j] := __a[j+31:j] - __b[j+31:j] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing the minuends. +/// \param __b +/// A 256-bit vector of [8 x i32] containing the subtrahends. +/// \returns A 256-bit vector of [8 x i32] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a - (__v8su)__b); +} + +/// Subtracts 64-bit integers from corresponding elements of two 256-bit +/// vectors of [4 x i64]. Returns the lower 64 bits of each difference in +/// the corresponding element of the [4 x i64] result (overflow is ignored). +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// result[j+63:j] := __a[j+63:j] - __b[j+63:j] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] containing the minuends. +/// \param __b +/// A 256-bit vector of [4 x i64] containing the subtrahends. +/// \returns A 256-bit vector of [4 x i64] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a - (__v4du)__b); +} + +/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors using signed saturation, and returns each differences in the +/// corresponding byte of the 256-bit integer vector result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := SATURATE8(__a[j+7:j] - __b[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBSB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing the minuends. +/// \param __b +/// A 256-bit integer vector containing the subtrahends. +/// \returns A 256-bit integer vector containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b); +} + +/// Subtracts 16-bit integers from corresponding elements of two 256-bit +/// vectors of [16 x i16] using signed saturation, and returns each +/// difference in the corresponding element of the [16 x i16] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+7:j] := SATURATE16(__a[j+7:j] - __b[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing the minuends. +/// \param __b +/// A 256-bit vector of [16 x i16] containing the subtrahends. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b); +} + +/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors using unsigned saturation, and returns each difference in the +/// corresponding byte of the 256-bit integer vector result. For each byte, +/// computes result = __a - __b . +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := SATURATE8U(__a[j+7:j] - __b[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBUSB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing the minuends. +/// \param __b +/// A 256-bit integer vector containing the subtrahends. +/// \returns A 256-bit integer vector containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b); +} + +/// Subtracts 16-bit integers from corresponding elements of two 256-bit +/// vectors of [16 x i16] using unsigned saturation, and returns each +/// difference in the corresponding element of the [16 x i16] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := SATURATE16U(__a[j+15:j] - __b[j+15:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBUSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing the minuends. +/// \param __b +/// A 256-bit vector of [16 x i16] containing the subtrahends. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b); +} + +/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer +/// vectors in \a __a and \a __b to form the 256-bit result. Specifically, +/// uses the upper 64 bits of each 128-bit half of \a __a and \a __b as +/// input; other bits in these parameters are ignored. +/// +/// \code{.operation} +/// result[7:0] := __a[71:64] +/// result[15:8] := __b[71:64] +/// result[23:16] := __a[79:72] +/// result[31:24] := __b[79:72] +/// . . . +/// result[127:120] := __b[127:120] +/// result[135:128] := __a[199:192] +/// . . . +/// result[255:248] := __b[255:248] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKHBW instruction. +/// +/// \param __a +/// A 256-bit integer vector used as the source for the even-numbered bytes +/// of the result. +/// \param __b +/// A 256-bit integer vector used as the source for the odd-numbered bytes +/// of the result. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31); +} + +/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors +/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit +/// vector of [16 x i16]. Specifically, uses the upper 64 bits of each +/// 128-bit half of \a __a and \a __b as input; other bits in these +/// parameters are ignored. +/// +/// \code{.operation} +/// result[15:0] := __a[79:64] +/// result[31:16] := __b[79:64] +/// result[47:32] := __a[95:80] +/// result[63:48] := __b[95:80] +/// . . . +/// result[127:112] := __b[127:112] +/// result[143:128] := __a[211:196] +/// . . . +/// result[255:240] := __b[255:240] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKHWD instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15); +} + +/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors +/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector +/// of [8 x i32]. Specifically, uses the upper 64 bits of each 128-bit half +/// of \a __a and \a __b as input; other bits in these parameters are +/// ignored. +/// +/// \code{.operation} +/// result[31:0] := __a[95:64] +/// result[63:32] := __b[95:64] +/// result[95:64] := __a[127:96] +/// result[127:96] := __b[127:96] +/// result[159:128] := __a[223:192] +/// result[191:160] := __b[223:192] +/// result[223:192] := __a[255:224] +/// result[255:224] := __b[255:224] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKHDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7); +} + +/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors +/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector +/// of [4 x i64]. Specifically, uses the upper 64 bits of each 128-bit half +/// of \a __a and \a __b as input; other bits in these parameters are +/// ignored. +/// +/// \code{.operation} +/// result[63:0] := __a[127:64] +/// result[127:64] := __b[127:64] +/// result[191:128] := __a[255:192] +/// result[255:192] := __b[255:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKHQDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3); +} + +/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer +/// vectors in \a __a and \a __b to form the 256-bit result. Specifically, +/// uses the lower 64 bits of each 128-bit half of \a __a and \a __b as +/// input; other bits in these parameters are ignored. +/// +/// \code{.operation} +/// result[7:0] := __a[7:0] +/// result[15:8] := __b[7:0] +/// result[23:16] := __a[15:8] +/// result[31:24] := __b[15:8] +/// . . . +/// result[127:120] := __b[63:56] +/// result[135:128] := __a[135:128] +/// . . . +/// result[255:248] := __b[191:184] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKLBW instruction. +/// +/// \param __a +/// A 256-bit integer vector used as the source for the even-numbered bytes +/// of the result. +/// \param __b +/// A 256-bit integer vector used as the source for the odd-numbered bytes +/// of the result. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23); +} + +/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors +/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit +/// vector of [16 x i16]. Specifically, uses the lower 64 bits of each +/// 128-bit half of \a __a and \a __b as input; other bits in these +/// parameters are ignored. +/// +/// \code{.operation} +/// result[15:0] := __a[15:0] +/// result[31:16] := __b[15:0] +/// result[47:32] := __a[31:16] +/// result[63:48] := __b[31:16] +/// . . . +/// result[127:112] := __b[63:48] +/// result[143:128] := __a[143:128] +/// . . . +/// result[255:239] := __b[191:176] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKLWD instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11); +} + +/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors +/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector +/// of [8 x i32]. Specifically, uses the lower 64 bits of each 128-bit half +/// of \a __a and \a __b as input; other bits in these parameters are +/// ignored. +/// +/// \code{.operation} +/// result[31:0] := __a[31:0] +/// result[63:32] := __b[31:0] +/// result[95:64] := __a[63:32] +/// result[127:96] := __b[63:32] +/// result[159:128] := __a[159:128] +/// result[191:160] := __b[159:128] +/// result[223:192] := __a[191:160] +/// result[255:224] := __b[191:190] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKLDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5); +} + +/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors +/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector +/// of [4 x i64]. Specifically, uses the lower 64 bits of each 128-bit half +/// of \a __a and \a __b as input; other bits in these parameters are +/// ignored. +/// +/// \code{.operation} +/// result[63:0] := __a[63:0] +/// result[127:64] := __b[63:0] +/// result[191:128] := __a[191:128] +/// result[255:192] := __b[191:128] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKLQDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2); +} + +/// Computes the bitwise XOR of the 256-bit integer vectors in \a __a and +/// \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPXOR instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_xor_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a ^ (__v4du)__b); +} + +/// Loads the 256-bit integer vector from memory \a __V using a non-temporal +/// memory hint and returns the vector. \a __V must be aligned on a 32-byte +/// boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VMOVNTDQA instruction. +/// +/// \param __V +/// A pointer to the 32-byte aligned memory containing the vector to load. +/// \returns A 256-bit integer vector loaded from memory. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_stream_load_si256(__m256i const *__V) +{ + typedef __v4di __v4di_aligned __attribute__((aligned(32))); + return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V); +} + +/// Broadcasts the 32-bit floating-point value from the low element of the +/// 128-bit vector of [4 x float] in \a __X to all elements of the result's +/// 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VBROADCASTSS instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x float] whose low element will be broadcast. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_broadcastss_ps(__m128 __X) +{ + return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0); +} + +/// Broadcasts the 64-bit floating-point value from the low element of the +/// 128-bit vector of [2 x double] in \a __a to both elements of the +/// result's 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MOVDDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] whose low element will be broadcast. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_broadcastsd_pd(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); +} + +/// Broadcasts the 32-bit floating-point value from the low element of the +/// 128-bit vector of [4 x float] in \a __X to all elements of the +/// result's 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VBROADCASTSS instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x float] whose low element will be broadcast. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_broadcastss_ps(__m128 __X) +{ + return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the 64-bit floating-point value from the low element of the +/// 128-bit vector of [2 x double] in \a __X to all elements of the +/// result's 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VBROADCASTSD instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x double] whose low element will be broadcast. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_broadcastsd_pd(__m128d __X) +{ + return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0); +} + +/// Broadcasts the 128-bit integer data from \a __X to both the lower and +/// upper halves of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VBROADCASTI128 instruction. +/// +/// \param __X +/// A 128-bit integer vector to be broadcast. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastsi128_si256(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1); +} + +#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X) + +/// Merges 32-bit integer elements from either of the two 128-bit vectors of +/// [4 x i32] in \a V1 or \a V2 to the result's 128-bit vector of [4 x i32], +/// as specified by the immediate integer operand \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// IF M[i] == 0 +/// result[31+j:j] := V1[31+j:j] +/// ELSE +/// result[31+j:j] := V2[32+j:j] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_blend_epi32(__m128i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPBLENDDD instruction. +/// +/// \param V1 +/// A 128-bit vector of [4 x i32] containing source values. +/// \param V2 +/// A 128-bit vector of [4 x i32] containing source values. +/// \param M +/// An immediate 8-bit integer operand, with bits [3:0] specifying the +/// source for each element of the result. The position of the mask bit +/// corresponds to the index of a copied value. When a mask bit is 0, the +/// element is copied from \a V1; otherwise, it is copied from \a V2. +/// \returns A 128-bit vector of [4 x i32] containing the result. +#define _mm_blend_epi32(V1, V2, M) \ + ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \ + (__v4si)(__m128i)(V2), (int)(M))) + +/// Merges 32-bit integer elements from either of the two 256-bit vectors of +/// [8 x i32] in \a V1 or \a V2 to return a 256-bit vector of [8 x i32], +/// as specified by the immediate integer operand \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// IF M[i] == 0 +/// result[31+j:j] := V1[31+j:j] +/// ELSE +/// result[31+j:j] := V2[32+j:j] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_blend_epi32(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPBLENDDD instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x i32] containing source values. +/// \param V2 +/// A 256-bit vector of [8 x i32] containing source values. +/// \param M +/// An immediate 8-bit integer operand, with bits [7:0] specifying the +/// source for each element of the result. The position of the mask bit +/// corresponds to the index of a copied value. When a mask bit is 0, the +/// element is copied from \a V1; otherwise, it is is copied from \a V2. +/// \returns A 256-bit vector of [8 x i32] containing the result. +#define _mm256_blend_epi32(V1, V2, M) \ + ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \ + (__v8si)(__m256i)(V2), (int)(M))) + +/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all +/// bytes of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTB instruction. +/// +/// \param __X +/// A 128-bit integer vector whose low byte will be broadcast. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastb_epi8(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [8 x i16] in \a __X +/// to all elements of the result's 256-bit vector of [16 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTW instruction. +/// +/// \param __X +/// A 128-bit vector of [8 x i16] whose low element will be broadcast. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastw_epi16(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X +/// to all elements of the result's 256-bit vector of [8 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] whose low element will be broadcast. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastd_epi32(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X +/// to all elements of the result's 256-bit vector of [4 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] whose low element will be broadcast. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastq_epi64(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0); +} + +/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all +/// bytes of the 128-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTB instruction. +/// +/// \param __X +/// A 128-bit integer vector whose low byte will be broadcast. +/// \returns A 128-bit integer vector containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastb_epi8(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [8 x i16] in +/// \a __X to all elements of the result's 128-bit vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTW instruction. +/// +/// \param __X +/// A 128-bit vector of [8 x i16] whose low element will be broadcast. +/// \returns A 128-bit vector of [8 x i16] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastw_epi16(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X +/// to all elements of the result's vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] whose low element will be broadcast. +/// \returns A 128-bit vector of [4 x i32] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastd_epi32(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X +/// to both elements of the result's 128-bit vector of [2 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] whose low element will be broadcast. +/// \returns A 128-bit vector of [2 x i64] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastq_epi64(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0); +} + +/// Sets the result's 256-bit vector of [8 x i32] to copies of elements of the +/// 256-bit vector of [8 x i32] in \a __a as specified by indexes in the +/// elements of the 256-bit vector of [8 x i32] in \a __b. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// k := __b[j+2:j] * 32 +/// result[j+31:j] := __a[k+31:k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPERMD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing the source values. +/// \param __b +/// A 256-bit vector of [8 x i32] containing indexes of values to use from +/// \a __a. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b); +} + +/// Sets the result's 256-bit vector of [4 x double] to copies of elements of +/// the 256-bit vector of [4 x double] in \a V as specified by the +/// immediate value \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// k := (M >> i*2)[1:0] * 64 +/// result[j+63:j] := V[k+63:k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_permute4x64_pd(__m256d V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPERMPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double] containing the source values. +/// \param M +/// An immediate 8-bit value specifying which elements to copy from \a V. +/// \a M[1:0] specifies the index in \a a for element 0 of the result, +/// \a M[3:2] specifies the index for element 1, and so forth. +/// \returns A 256-bit vector of [4 x double] containing the result. +#define _mm256_permute4x64_pd(V, M) \ + ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M))) + +/// Sets the result's 256-bit vector of [8 x float] to copies of elements of +/// the 256-bit vector of [8 x float] in \a __a as specified by indexes in +/// the elements of the 256-bit vector of [8 x i32] in \a __b. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// k := __b[j+2:j] * 32 +/// result[j+31:j] := __a[k+31:k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPERMPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the source values. +/// \param __b +/// A 256-bit vector of [8 x i32] containing indexes of values to use from +/// \a __a. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_permutevar8x32_ps(__m256 __a, __m256i __b) +{ + return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b); +} + +/// Sets the result's 256-bit vector of [4 x i64] result to copies of elements +/// of the 256-bit vector of [4 x i64] in \a V as specified by the +/// immediate value \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// k := (M >> i*2)[1:0] * 64 +/// result[j+63:j] := V[k+63:k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_permute4x64_epi64(__m256i V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPERMQ instruction. +/// +/// \param V +/// A 256-bit vector of [4 x i64] containing the source values. +/// \param M +/// An immediate 8-bit value specifying which elements to copy from \a V. +/// \a M[1:0] specifies the index in \a a for element 0 of the result, +/// \a M[3:2] specifies the index for element 1, and so forth. +/// \returns A 256-bit vector of [4 x i64] containing the result. +#define _mm256_permute4x64_epi64(V, M) \ + ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M))) + +/// Sets each half of the 256-bit result either to zero or to one of the +/// four possible 128-bit halves of the 256-bit vectors \a V1 and \a V2, +/// as specified by the immediate value \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// k := M >> (i*4) +/// IF k[3] == 0 +/// CASE (k[1:0]) OF +/// 0: result[127+j:j] := V1[127:0] +/// 1: result[127+j:j] := V1[255:128] +/// 2: result[127+j:j] := V2[127:0] +/// 3: result[127+j:j] := V2[255:128] +/// ESAC +/// ELSE +/// result[127+j:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_permute2x128_si256(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPERM2I128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector containing source values. +/// \param V2 +/// A 256-bit integer vector containing source values. +/// \param M +/// An immediate value specifying how to form the result. Bits [3:0] +/// control the lower half of the result, bits [7:4] control the upper half. +/// Within each 4-bit control value, if bit 3 is 1, the result is zero, +/// otherwise bits [1:0] determine the source as follows. \n +/// 0: the lower half of \a V1 \n +/// 1: the upper half of \a V1 \n +/// 2: the lower half of \a V2 \n +/// 3: the upper half of \a V2 +/// \returns A 256-bit integer vector containing the result. +#define _mm256_permute2x128_si256(V1, V2, M) \ + ((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M))) + +/// Extracts half of the 256-bit vector \a V to the 128-bit result. If bit 0 +/// of the immediate \a M is zero, extracts the lower half of the result; +/// otherwise, extracts the upper half. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_extracti128_si256(__m256i V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VEXTRACTI128 instruction. +/// +/// \param V +/// A 256-bit integer vector containing the source values. +/// \param M +/// An immediate value specifying which half of \a V to extract. +/// \returns A 128-bit integer vector containing the result. +#define _mm256_extracti128_si256(V, M) \ + ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M))) + +/// Copies the 256-bit vector \a V1 to the result, then overwrites half of the +/// result with the 128-bit vector \a V2. If bit 0 of the immediate \a M +/// is zero, overwrites the lower half of the result; otherwise, +/// overwrites the upper half. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_inserti128_si256(__m256i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VINSERTI128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector containing a source value. +/// \param V2 +/// A 128-bit integer vector containing a source value. +/// \param M +/// An immediate value specifying where to put \a V2 in the result. +/// \returns A 256-bit integer vector containing the result. +#define _mm256_inserti128_si256(V1, V2, M) \ + ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \ + (__v2di)(__m128i)(V2), (int)(M))) + +/// Conditionally loads eight 32-bit integer elements from memory \a __X, if +/// the most significant bit of the corresponding element in the mask +/// \a __M is set; otherwise, sets that element of the result to zero. +/// Returns the 256-bit [8 x i32] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// IF __M[j+31] == 1 +/// result[j+31:j] := Load32(__X+(i*4)) +/// ELSE +/// result[j+31:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVD instruction. +/// +/// \param __X +/// A pointer to the memory used for loading values. +/// \param __M +/// A 256-bit vector of [8 x i32] containing the mask bits. +/// \returns A 256-bit vector of [8 x i32] containing the loaded or zeroed +/// elements. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskload_epi32(int const *__X, __m256i __M) +{ + return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M); +} + +/// Conditionally loads four 64-bit integer elements from memory \a __X, if +/// the most significant bit of the corresponding element in the mask +/// \a __M is set; otherwise, sets that element of the result to zero. +/// Returns the 256-bit [4 x i64] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// IF __M[j+63] == 1 +/// result[j+63:j] := Load64(__X+(i*8)) +/// ELSE +/// result[j+63:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVQ instruction. +/// +/// \param __X +/// A pointer to the memory used for loading values. +/// \param __M +/// A 256-bit vector of [4 x i64] containing the mask bits. +/// \returns A 256-bit vector of [4 x i64] containing the loaded or zeroed +/// elements. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskload_epi64(long long const *__X, __m256i __M) +{ + return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M); +} + +/// Conditionally loads four 32-bit integer elements from memory \a __X, if +/// the most significant bit of the corresponding element in the mask +/// \a __M is set; otherwise, sets that element of the result to zero. +/// Returns the 128-bit [4 x i32] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// IF __M[j+31] == 1 +/// result[j+31:j] := Load32(__X+(i*4)) +/// ELSE +/// result[j+31:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVD instruction. +/// +/// \param __X +/// A pointer to the memory used for loading values. +/// \param __M +/// A 128-bit vector of [4 x i32] containing the mask bits. +/// \returns A 128-bit vector of [4 x i32] containing the loaded or zeroed +/// elements. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskload_epi32(int const *__X, __m128i __M) +{ + return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M); +} + +/// Conditionally loads two 64-bit integer elements from memory \a __X, if +/// the most significant bit of the corresponding element in the mask +/// \a __M is set; otherwise, sets that element of the result to zero. +/// Returns the 128-bit [2 x i64] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*64 +/// IF __M[j+63] == 1 +/// result[j+63:j] := Load64(__X+(i*8)) +/// ELSE +/// result[j+63:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVQ instruction. +/// +/// \param __X +/// A pointer to the memory used for loading values. +/// \param __M +/// A 128-bit vector of [2 x i64] containing the mask bits. +/// \returns A 128-bit vector of [2 x i64] containing the loaded or zeroed +/// elements. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskload_epi64(long long const *__X, __m128i __M) +{ + return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M); +} + +/// Conditionally stores eight 32-bit integer elements from the 256-bit vector +/// of [8 x i32] in \a __Y to memory \a __X, if the most significant bit of +/// the corresponding element in the mask \a __M is set; otherwise, the +/// memory element is unchanged. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// IF __M[j+31] == 1 +/// Store32(__X+(i*4), __Y[j+31:j]) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVD instruction. +/// +/// \param __X +/// A pointer to the memory used for storing values. +/// \param __M +/// A 256-bit vector of [8 x i32] containing the mask bits. +/// \param __Y +/// A 256-bit vector of [8 x i32] containing the values to store. +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y) +{ + __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y); +} + +/// Conditionally stores four 64-bit integer elements from the 256-bit vector +/// of [4 x i64] in \a __Y to memory \a __X, if the most significant bit of +/// the corresponding element in the mask \a __M is set; otherwise, the +/// memory element is unchanged. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// IF __M[j+63] == 1 +/// Store64(__X+(i*8), __Y[j+63:j]) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVQ instruction. +/// +/// \param __X +/// A pointer to the memory used for storing values. +/// \param __M +/// A 256-bit vector of [4 x i64] containing the mask bits. +/// \param __Y +/// A 256-bit vector of [4 x i64] containing the values to store. +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y) +{ + __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y); +} + +/// Conditionally stores four 32-bit integer elements from the 128-bit vector +/// of [4 x i32] in \a __Y to memory \a __X, if the most significant bit of +/// the corresponding element in the mask \a __M is set; otherwise, the +/// memory element is unchanged. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// IF __M[j+31] == 1 +/// Store32(__X+(i*4), __Y[j+31:j]) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVD instruction. +/// +/// \param __X +/// A pointer to the memory used for storing values. +/// \param __M +/// A 128-bit vector of [4 x i32] containing the mask bits. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the values to store. +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y) +{ + __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y); +} + +/// Conditionally stores two 64-bit integer elements from the 128-bit vector +/// of [2 x i64] in \a __Y to memory \a __X, if the most significant bit of +/// the corresponding element in the mask \a __M is set; otherwise, the +/// memory element is unchanged. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*64 +/// IF __M[j+63] == 1 +/// Store64(__X+(i*8), __Y[j+63:j]) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVQ instruction. +/// +/// \param __X +/// A pointer to the memory used for storing values. +/// \param __M +/// A 128-bit vector of [2 x i64] containing the mask bits. +/// \param __Y +/// A 128-bit vector of [2 x i64] containing the values to store. +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y) +{ + __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X +/// left by the number of bits given in the corresponding element of the +/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLVD instruction. +/// +/// \param __X +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __Y +/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sllv_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y); +} + +/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X +/// left by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLVD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] to be shifted. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [4 x i32] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sllv_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X +/// left by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 63, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLVQ instruction. +/// +/// \param __X +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __Y +/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sllv_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y); +} + +/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X +/// left by the number of bits given in the corresponding element of the +/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 63, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLVQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] to be shifted. +/// \param __Y +/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [2 x i64] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sllv_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 256-bit vector of [8 x i32] in \a __Y, shifting in sign bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is 0 or -1 according to the sign bit +/// for that element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAVD instruction. +/// +/// \param __X +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __Y +/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srav_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y); +} + +/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i32] in \a __Y, shifting in sign bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is 0 or -1 according to the sign bit +/// for that element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAVD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] to be shifted. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [4 x i32] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srav_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLVD instruction. +/// +/// \param __X +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __Y +/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srlv_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y); +} + +/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLVD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] to be shifted. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [4 x i32] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srlv_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 63, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLVQ instruction. +/// +/// \param __X +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __Y +/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srlv_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y); +} + +/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 63, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLVQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] to be shifted. +/// \param __Y +/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [2 x i64] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srlv_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y); +} + +/// Conditionally gathers two 64-bit floating-point values, either from the +/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector +/// of [2 x double] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*32 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_mask_i32gather_pd(__m128d a, const double *m, __m128i i, +/// __m128d mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only +/// the first two elements are used. +/// \param mask +/// A 128-bit vector of [2 x double] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x double] containing the gathered values. +#define _mm_mask_i32gather_pd(a, m, i, mask, s) \ + ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2df)(__m128d)(mask), (s))) + +/// Conditionally gathers four 64-bit floating-point values, either from the +/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector +/// of [4 x double] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*32 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_mask_i32gather_pd(__m256d a, const double *m, __m128i i, +/// __m256d mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [4 x double] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x double] containing the gathered values. +#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \ + ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4df)(__m256d)(mask), (s))) + +/// Conditionally gathers two 64-bit floating-point values, either from the +/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector +/// of [2 x double] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*64 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_mask_i64gather_pd(__m128d a, const double *m, __m128i i, +/// __m128d mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [2 x double] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x double] containing the gathered values. +#define _mm_mask_i64gather_pd(a, m, i, mask, s) \ + ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \ + (double const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2df)(__m128d)(mask), (s))) + +/// Conditionally gathers four 64-bit floating-point values, either from the +/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector +/// of [4 x double] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*64 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_mask_i64gather_pd(__m256d a, const double *m, __m256i i, +/// __m256d mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [4 x double] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x double] containing the gathered values. +#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \ + ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \ + (double const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4df)(__m256d)(mask), (s))) + +/// Conditionally gathers four 32-bit floating-point values, either from the +/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector +/// of [4 x float] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*32 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_mask_i32gather_ps(__m128 a, const float *m, __m128i i, +/// __m128 mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x float] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm_mask_i32gather_ps(a, m, i, mask, s) \ + ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4sf)(__m128)(mask), (s))) + +/// Conditionally gathers eight 32-bit floating-point values, either from the +/// 256-bit vector of [8 x float] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector +/// of [8 x float] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 7 +/// j := element*32 +/// k := element*32 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_mask_i32gather_ps(__m256 a, const float *m, __m256i i, +/// __m256 mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [8 x i32] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [8 x float] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [8 x float] containing the gathered values. +#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \ + ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \ + (float const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8sf)(__m256)(mask), (s))) + +/// Conditionally gathers two 32-bit floating-point values, either from the +/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector +/// of [4 x float] in \a mask determines the source for the lower two +/// elements. The upper two elements of the result are zeroed. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*32 +/// k := element*64 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// result[127:64] := 0 +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_mask_i64gather_ps(__m128 a, const float *m, __m128i i, +/// __m128 mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float] used as the source when a mask bit is +/// zero. Only the first two elements are used. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x float] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. Only the first +/// two elements are used. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm_mask_i64gather_ps(a, m, i, mask, s) \ + ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4sf)(__m128)(mask), (s))) + +/// Conditionally gathers four 32-bit floating-point values, either from the +/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector +/// of [4 x float] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*64 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm256_mask_i64gather_ps(__m128 a, const float *m, __m256i i, +/// __m128 mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x float] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \ + ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4sf)(__m128)(mask), (s))) + +/// Conditionally gathers four 32-bit integer values, either from the +/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector +/// of [4 x i32] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*32 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mask_i32gather_epi32(__m128i a, const int *m, __m128i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDD instruction. +/// +/// \param a +/// A 128-bit vector of [4 x i32] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x i32] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4si)(__m128i)(mask), (s))) + +/// Conditionally gathers eight 32-bit integer values, either from the +/// 256-bit vector of [8 x i32] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector +/// of [8 x i32] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 7 +/// j := element*32 +/// k := element*32 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_mask_i32gather_epi32(__m256i a, const int *m, __m256i i, +/// __m256i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDD instruction. +/// +/// \param a +/// A 256-bit vector of [8 x i32] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [8 x i32] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [8 x i32] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [8 x i32] containing the gathered values. +#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \ + ((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \ + (int const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8si)(__m256i)(mask), (s))) + +/// Conditionally gathers two 32-bit integer values, either from the +/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector +/// of [4 x i32] in \a mask determines the source for the lower two +/// elements. The upper two elements of the result are zeroed. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*32 +/// k := element*64 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// result[127:64] := 0 +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mask_i64gather_epi32(__m128i a, const int *m, __m128i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQD instruction. +/// +/// \param a +/// A 128-bit vector of [4 x i32] used as the source when a mask bit is +/// zero. Only the first two elements are used. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x i32] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. Only the first two elements +/// are used. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4si)(__m128i)(mask), (s))) + +/// Conditionally gathers four 32-bit integer values, either from the +/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector +/// of [4 x i32] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*64 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_mask_i64gather_epi32(__m128i a, const int *m, __m256i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQD instruction. +/// +/// \param a +/// A 128-bit vector of [4 x i32] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x i32] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4si)(__m128i)(mask), (s))) + +/// Conditionally gathers two 64-bit integer values, either from the +/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector +/// of [2 x i64] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*32 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mask_i32gather_epi64(__m128i a, const long long *m, __m128i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDQ instruction. +/// +/// \param a +/// A 128-bit vector of [2 x i64] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only +/// the first two elements are used. +/// \param mask +/// A 128-bit vector of [2 x i64] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x i64] containing the gathered values. +#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2di)(__m128i)(mask), (s))) + +/// Conditionally gathers four 64-bit integer values, either from the +/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector +/// of [4 x i64] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*32 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_mask_i32gather_epi64(__m256i a, const long long *m, +/// __m128i i, __m256i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDQ instruction. +/// +/// \param a +/// A 256-bit vector of [4 x i64] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [4 x i64] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x i64] containing the gathered values. +#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \ + ((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4di)(__m256i)(mask), (s))) + +/// Conditionally gathers two 64-bit integer values, either from the +/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector +/// of [2 x i64] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*64 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mask_i64gather_epi64(__m128i a, const long long *m, __m128i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQQ instruction. +/// +/// \param a +/// A 128-bit vector of [2 x i64] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [2 x i64] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x i64] containing the gathered values. +#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \ + (long long const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2di)(__m128i)(mask), (s))) + +/// Conditionally gathers four 64-bit integer values, either from the +/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector +/// of [4 x i64] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*64 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_mask_i64gather_epi64(__m256i a, const long long *m, +/// __m256i i, __m256i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQQ instruction. +/// +/// \param a +/// A 256-bit vector of [4 x i64] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [4 x i64] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x i64] containing the gathered values. +#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \ + ((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \ + (long long const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4di)(__m256i)(mask), (s))) + +/// Gathers two 64-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*32 +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_i32gather_pd(const double *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only +/// the first two elements are used. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x double] containing the gathered values. +#define _mm_i32gather_pd(m, i, s) \ + ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ + _mm_setzero_pd()), \ + (s))) + +/// Gathers four 64-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*32 +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_i32gather_pd(const double *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x double] containing the gathered values. +#define _mm256_i32gather_pd(m, i, s) \ + ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ + _mm256_setzero_pd(), \ + _CMP_EQ_OQ), \ + (s))) + +/// Gathers two 64-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*64 +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_i64gather_pd(const double *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x double] containing the gathered values. +#define _mm_i64gather_pd(m, i, s) \ + ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \ + (double const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ + _mm_setzero_pd()), \ + (s))) + +/// Gathers four 64-bit floating-point values from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*64 +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_i64gather_pd(const double *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x double] containing the gathered values. +#define _mm256_i64gather_pd(m, i, s) \ + ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \ + (double const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ + _mm256_setzero_pd(), \ + _CMP_EQ_OQ), \ + (s))) + +/// Gathers four 32-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*32 +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_i32gather_ps(const float *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPS instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm_i32gather_ps(m, i, s) \ + ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s))) + +/// Gathers eight 32-bit floating-point values from memory \a m using scaled +/// indexes from the 256-bit vector of [8 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 7 +/// j := element*32 +/// k := element*32 +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_i32gather_ps(const float *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPS instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [8 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [8 x float] containing the gathered values. +#define _mm256_i32gather_ps(m, i, s) \ + ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \ + (float const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \ + _mm256_setzero_ps(), \ + _CMP_EQ_OQ), \ + (s))) + +/// Gathers two 32-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The upper two +/// elements of the result are zeroed. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*32 +/// k := element*64 +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// result[127:64] := 0 +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_i64gather_ps(const float *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPS instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm_i64gather_ps(m, i, s) \ + ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s))) + +/// Gathers four 32-bit floating-point values from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*64 +/// result[j+31:j] := Load32(m + SignExtend(i[k+64:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm256_i64gather_ps(const float *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPS instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm256_i64gather_ps(m, i, s) \ + ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s))) + +/// Gathers four 32-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*32 +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_i32gather_epi32(const int *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm_i32gather_epi32(m, i, s) \ + ((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v4si)(__m128i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s))) + +/// Gathers eight 32-bit floating-point values from memory \a m using scaled +/// indexes from the 256-bit vector of [8 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 7 +/// j := element*32 +/// k := element*32 +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_i32gather_epi32(const int *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [8 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [8 x i32] containing the gathered values. +#define _mm256_i32gather_epi32(m, i, s) \ + ((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \ + (int const *)(m), (__v8si)(__m256i)(i), \ + (__v8si)_mm256_set1_epi32(-1), (s))) + +/// Gathers two 32-bit integer values from memory \a m using scaled indexes +/// from the 128-bit vector of [2 x i64] in \a i. The upper two elements +/// of the result are zeroed. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*32 +/// k := element*64 +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// result[127:64] := 0 +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_i64gather_epi32(const int *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm_i64gather_epi32(m, i, s) \ + ((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v2di)(__m128i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s))) + +/// Gathers four 32-bit integer values from memory \a m using scaled indexes +/// from the 256-bit vector of [4 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*64 +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_i64gather_epi32(const int *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm256_i64gather_epi32(m, i, s) \ + ((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v4di)(__m256i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s))) + +/// Gathers two 64-bit integer values from memory \a m using scaled indexes +/// from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*32 +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_i32gather_epi64(const long long *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDQ instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only +/// the first two elements are used. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x i64] containing the gathered values. +#define _mm_i32gather_epi64(m, i, s) \ + ((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2di)_mm_set1_epi64x(-1), (s))) + +/// Gathers four 64-bit integer values from memory \a m using scaled indexes +/// from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*32 +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_i32gather_epi64(const long long *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDQ instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x i64] containing the gathered values. +#define _mm256_i32gather_epi64(m, i, s) \ + ((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4di)_mm256_set1_epi64x(-1), (s))) + +/// Gathers two 64-bit integer values from memory \a m using scaled indexes +/// from the 128-bit vector of [2 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*64 +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_i64gather_epi64(const long long *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQQ instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x i64] containing the gathered values. +#define _mm_i64gather_epi64(m, i, s) \ + ((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \ + (long long const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2di)_mm_set1_epi64x(-1), (s))) + +/// Gathers four 64-bit integer values from memory \a m using scaled indexes +/// from the 256-bit vector of [4 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*64 +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_i64gather_epi64(const long long *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQQ instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x i64] containing the gathered values. +#define _mm256_i64gather_epi64(m, i, s) \ + ((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \ + (long long const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4di)_mm256_set1_epi64x(-1), (s))) + +#undef __DEFAULT_FN_ATTRS256 +#undef __DEFAULT_FN_ATTRS128 + +#endif /* __AVX2INTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512bf16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512bf16intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512bf16intrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512bf16intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512bitalgintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512bitalgintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512bitalgintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512bitalgintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512bwintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512bwintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512bwintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512bwintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512cdintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512cdintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512cdintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512cdintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512dqintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512dqintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512dqintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512dqintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512erintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512erintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512erintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512erintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512fintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512fintrin.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/avx512fintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512fintrin.h index b19d2fb90f..88a8cebbee 100644 --- a/third_party/clang/lib/clang/16.0.0/include/avx512fintrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/avx512fintrin.h @@ -397,14 +397,15 @@ _mm512_broadcastsd_pd(__m128d __A) static __inline __m512d __DEFAULT_FN_ATTRS512 _mm512_castpd256_pd512(__m256d __a) { - return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1); + return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0, + 1, 2, 3, 4, 5, 6, 7); } static __inline __m512 __DEFAULT_FN_ATTRS512 _mm512_castps256_ps512(__m256 __a) { - return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, - -1, -1, -1, -1, -1, -1, -1, -1); + return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); } static __inline __m128d __DEFAULT_FN_ATTRS512 @@ -446,7 +447,10 @@ _mm512_castpd_si512 (__m512d __A) static __inline__ __m512d __DEFAULT_FN_ATTRS512 _mm512_castpd128_pd512 (__m128d __A) { - return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1); + __m256d __B = __builtin_nondeterministic_value(__B); + return __builtin_shufflevector( + __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3), + __B, 0, 1, 2, 3, 4, 5, 6, 7); } static __inline __m512d __DEFAULT_FN_ATTRS512 @@ -464,19 +468,25 @@ _mm512_castps_si512 (__m512 __A) static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_castps128_ps512 (__m128 __A) { - return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); + __m256 __B = __builtin_nondeterministic_value(__B); + return __builtin_shufflevector( + __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7), + __B, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); } static __inline__ __m512i __DEFAULT_FN_ATTRS512 _mm512_castsi128_si512 (__m128i __A) { - return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1); + __m256i __B = __builtin_nondeterministic_value(__B); + return __builtin_shufflevector( + __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3), + __B, 0, 1, 2, 3, 4, 5, 6, 7); } static __inline__ __m512i __DEFAULT_FN_ATTRS512 _mm512_castsi256_si512 (__m256i __A) { - return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1); + return __builtin_shufflevector( __A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7); } static __inline __m512 __DEFAULT_FN_ATTRS512 diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512fp16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512fp16intrin.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/avx512fp16intrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512fp16intrin.h index 5cdc37fde6..d326586578 100644 --- a/third_party/clang/lib/clang/16.0.0/include/avx512fp16intrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/avx512fp16intrin.h @@ -192,22 +192,26 @@ _mm512_castph512_ph256(__m512h __a) { static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_castph128_ph256(__m128h __a) { - return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1, - -1, -1, -1, -1, -1); + return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); } static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_castph128_ph512(__m128h __a) { - return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1); + __m256h __b = __builtin_nondeterministic_value(__b); + return __builtin_shufflevector( + __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), + __b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); } static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_castph256_ph512(__m256h __a) { - return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1); + return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31); } /// Constructs a 256-bit floating-point vector of [16 x half] from a diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512ifmaintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512ifmaintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512ifmaintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512ifmaintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512ifmavlintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512ifmavlintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512ifmavlintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512ifmavlintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512pfintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512pfintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512pfintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512pfintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vbmi2intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vbmi2intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vbmi2intrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vbmi2intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vbmiintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vbmiintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vbmiintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vbmiintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vbmivlintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vbmivlintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vbmivlintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vbmivlintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlbf16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlbf16intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlbf16intrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlbf16intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlbitalgintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlbitalgintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlbitalgintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlbitalgintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlbwintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlbwintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlbwintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlbwintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlcdintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlcdintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlcdintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlcdintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vldqintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vldqintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vldqintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vldqintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlfp16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlfp16intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlfp16intrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlfp16intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlvbmi2intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlvbmi2intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlvbmi2intrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlvbmi2intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlvnniintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlvnniintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlvnniintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlvnniintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlvp2intersectintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlvp2intersectintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vlvp2intersectintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vlvp2intersectintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vnniintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vnniintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vnniintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vnniintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vp2intersectintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vp2intersectintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vp2intersectintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vp2intersectintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vpopcntdqintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vpopcntdqintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vpopcntdqintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vpopcntdqintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vpopcntdqvlintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vpopcntdqvlintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avx512vpopcntdqvlintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avx512vpopcntdqvlintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avxifmaintrin.h b/third_party/clang/lib/clang/17.0.1/include/avxifmaintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avxifmaintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avxifmaintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avxintrin.h b/third_party/clang/lib/clang/17.0.1/include/avxintrin.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/avxintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avxintrin.h index ee31569c16..94fac5e6c9 100644 --- a/third_party/clang/lib/clang/16.0.0/include/avxintrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/avxintrin.h @@ -3017,8 +3017,11 @@ _mm256_zeroupper(void) static __inline __m128 __DEFAULT_FN_ATTRS128 _mm_broadcast_ss(float const *__a) { - float __f = *__a; - return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f }; + struct __mm_broadcast_ss_struct { + float __f; + } __attribute__((__packed__, __may_alias__)); + float __f = ((const struct __mm_broadcast_ss_struct*)__a)->__f; + return __extension__ (__m128){ __f, __f, __f, __f }; } /// Loads a scalar double-precision floating point value from the @@ -3036,7 +3039,10 @@ _mm_broadcast_ss(float const *__a) static __inline __m256d __DEFAULT_FN_ATTRS _mm256_broadcast_sd(double const *__a) { - double __d = *__a; + struct __mm256_broadcast_sd_struct { + double __d; + } __attribute__((__packed__, __may_alias__)); + double __d = ((const struct __mm256_broadcast_sd_struct*)__a)->__d; return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d }; } @@ -3055,7 +3061,10 @@ _mm256_broadcast_sd(double const *__a) static __inline __m256 __DEFAULT_FN_ATTRS _mm256_broadcast_ss(float const *__a) { - float __f = *__a; + struct __mm256_broadcast_ss_struct { + float __f; + } __attribute__((__packed__, __may_alias__)); + float __f = ((const struct __mm256_broadcast_ss_struct*)__a)->__f; return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f }; } @@ -4499,7 +4508,8 @@ _mm256_castsi256_si128(__m256i __a) static __inline __m256d __DEFAULT_FN_ATTRS _mm256_castpd128_pd256(__m128d __a) { - return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1); + return __builtin_shufflevector( + (__v2df)__a, (__v2df)__builtin_nondeterministic_value(__a), 0, 1, 2, 3); } /// Constructs a 256-bit floating-point vector of [8 x float] from a @@ -4520,7 +4530,9 @@ _mm256_castpd128_pd256(__m128d __a) static __inline __m256 __DEFAULT_FN_ATTRS _mm256_castps128_ps256(__m128 __a) { - return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1); + return __builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_nondeterministic_value(__a), + 0, 1, 2, 3, 4, 5, 6, 7); } /// Constructs a 256-bit integer vector from a 128-bit integer vector. @@ -4539,7 +4551,8 @@ _mm256_castps128_ps256(__m128 __a) static __inline __m256i __DEFAULT_FN_ATTRS _mm256_castsi128_si256(__m128i __a) { - return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1); + return __builtin_shufflevector( + (__v2di)__a, (__v2di)__builtin_nondeterministic_value(__a), 0, 1, 2, 3); } /// Constructs a 256-bit floating-point vector of [4 x double] from a diff --git a/third_party/clang/lib/clang/16.0.0/include/avxneconvertintrin.h b/third_party/clang/lib/clang/17.0.1/include/avxneconvertintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avxneconvertintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avxneconvertintrin.h diff --git a/third_party/clang/lib/clang/17.0.1/include/avxvnniint16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avxvnniint16intrin.h new file mode 100644 index 0000000000..e4d342a8b4 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/avxvnniint16intrin.h @@ -0,0 +1,473 @@ +/*===----------- avxvnniint16intrin.h - AVXVNNIINT16 intrinsics-------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __AVXVNNIINT16INTRIN_H +#define __AVXVNNIINT16INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \ + __min_vector_width__(256))) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwsud_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUD instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x int]. +/// \param __A +/// A 128-bit vector of [8 x short]. +/// \param __B +/// A 128-bit vector of [8 x unsigned short]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsud_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwsud128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUD instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x int]. +/// \param __A +/// A 256-bit vector of [16 x short]. +/// \param __B +/// A 256-bit vector of [16 x unsigned short]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwsud256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x int]. +/// \param __A +/// A 128-bit vector of [8 x short]. +/// \param __B +/// A 128-bit vector of [8 x unsigned short]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsuds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwsuds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x int]. +/// \param __A +/// A 256-bit vector of [16 x short]. +/// \param __B +/// A 256-bit vector of [16 x unsigned short]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwsuds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpbusd_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWUSD instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x int]. +/// \param __A +/// A 128-bit vector of [8 x unsigned short]. +/// \param __B +/// A 128-bit vector of [8 x short]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusd_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwusd128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWUSD instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x int]. +/// \param __A +/// A 256-bit vector of [16 x unsigned short]. +/// \param __B +/// A 256-bit vector of [16 x short]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwusd256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwusds_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x int]. +/// \param __A +/// A 128-bit vector of [8 x unsigned short]. +/// \param __B +/// A 128-bit vector of [8 x short]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwusds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x int]. +/// \param __A +/// A 256-bit vector of [16 x unsigned short]. +/// \param __B +/// A 256-bit vector of [16 x short]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwusds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwusds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwuud_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWUUD instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x unsigned int]. +/// \param __A +/// A 128-bit vector of [8 x unsigned short]. +/// \param __B +/// A 128-bit vector of [8 x unsigned short]. +/// \returns +/// A 128-bit vector of [4 x unsigned int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuud_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwuud128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWUUD instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x unsigned int]. +/// \param __A +/// A 256-bit vector of [16 x unsigned short]. +/// \param __B +/// A 256-bit vector of [16 x unsigned short]. +/// \returns +/// A 256-bit vector of [8 x unsigned int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwuud256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x unsigned int]. +/// \param __A +/// A 128-bit vector of [8 x unsigned short]. +/// \param __B +/// A 128-bit vector of [8 x unsigned short]. +/// \returns +/// A 128-bit vector of [4 x unsigned int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuuds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwuuds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x unsigned int]. +/// \param __A +/// A 256-bit vector of [16 x unsigned short]. +/// \param __B +/// A 256-bit vector of [16 x unsigned short]. +/// \returns +/// A 256-bit vector of [8 x unsigned int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwuuds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXVNNIINT16INTRIN_H diff --git a/third_party/clang/lib/clang/16.0.0/include/avxvnniint8intrin.h b/third_party/clang/lib/clang/17.0.1/include/avxvnniint8intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avxvnniint8intrin.h rename to third_party/clang/lib/clang/17.0.1/include/avxvnniint8intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/avxvnniintrin.h b/third_party/clang/lib/clang/17.0.1/include/avxvnniintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/avxvnniintrin.h rename to third_party/clang/lib/clang/17.0.1/include/avxvnniintrin.h diff --git a/third_party/clang/lib/clang/17.0.1/include/bmi2intrin.h b/third_party/clang/lib/clang/17.0.1/include/bmi2intrin.h new file mode 100644 index 0000000000..f0a3343bef --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/bmi2intrin.h @@ -0,0 +1,255 @@ +/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __BMI2INTRIN_H +#define __BMI2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2"))) + +/// Copies the unsigned 32-bit integer \a __X and zeroes the upper bits +/// starting at bit number \a __Y. +/// +/// \code{.operation} +/// i := __Y[7:0] +/// result := __X +/// IF i < 32 +/// result[31:i] := 0 +/// FI +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BZHI instruction. +/// +/// \param __X +/// The 32-bit source value to copy. +/// \param __Y +/// The lower 8 bits specify the bit number of the lowest bit to zero. +/// \returns The partially zeroed 32-bit value. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_bzhi_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_bzhi_si(__X, __Y); +} + +/// Deposit (scatter) low-order bits from the unsigned 32-bit integer \a __X +/// into the 32-bit result, according to the mask in the unsigned 32-bit +/// integer \a __Y. All other bits of the result are zero. +/// +/// \code{.operation} +/// i := 0 +/// result := 0 +/// FOR m := 0 TO 31 +/// IF __Y[m] == 1 +/// result[m] := __X[i] +/// i := i + 1 +/// ENDIF +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PDEP instruction. +/// +/// \param __X +/// The 32-bit source value to copy. +/// \param __Y +/// The 32-bit mask specifying where to deposit source bits. +/// \returns The 32-bit result. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_pdep_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_pdep_si(__X, __Y); +} + +/// Extract (gather) bits from the unsigned 32-bit integer \a __X into the +/// low-order bits of the 32-bit result, according to the mask in the +/// unsigned 32-bit integer \a __Y. All other bits of the result are zero. +/// +/// \code{.operation} +/// i := 0 +/// result := 0 +/// FOR m := 0 TO 31 +/// IF __Y[m] == 1 +/// result[i] := __X[m] +/// i := i + 1 +/// ENDIF +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PEXT instruction. +/// +/// \param __X +/// The 32-bit source value to copy. +/// \param __Y +/// The 32-bit mask specifying which source bits to extract. +/// \returns The 32-bit result. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_pext_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_pext_si(__X, __Y); +} + +/// Multiplies the unsigned 32-bit integers \a __X and \a __Y to form a +/// 64-bit product. Stores the upper 32 bits of the product in the +/// memory at \a __P and returns the lower 32 bits. +/// +/// \code{.operation} +/// Store32(__P, (__X * __Y)[63:32]) +/// result := (__X * __Y)[31:0] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MULX instruction. +/// +/// \param __X +/// An unsigned 32-bit multiplicand. +/// \param __Y +/// An unsigned 32-bit multiplicand. +/// \param __P +/// A pointer to memory for storing the upper half of the product. +/// \returns The lower half of the product. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P) +{ + unsigned long long __res = (unsigned long long) __X * __Y; + *__P = (unsigned int)(__res >> 32); + return (unsigned int)__res; +} + +#ifdef __x86_64__ + +/// Copies the unsigned 64-bit integer \a __X and zeroes the upper bits +/// starting at bit number \a __Y. +/// +/// \code{.operation} +/// i := __Y[7:0] +/// result := __X +/// IF i < 64 +/// result[63:i] := 0 +/// FI +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BZHI instruction. +/// +/// \param __X +/// The 64-bit source value to copy. +/// \param __Y +/// The lower 8 bits specify the bit number of the lowest bit to zero. +/// \returns The partially zeroed 64-bit value. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_bzhi_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_bzhi_di(__X, __Y); +} + +/// Deposit (scatter) low-order bits from the unsigned 64-bit integer \a __X +/// into the 64-bit result, according to the mask in the unsigned 64-bit +/// integer \a __Y. All other bits of the result are zero. +/// +/// \code{.operation} +/// i := 0 +/// result := 0 +/// FOR m := 0 TO 63 +/// IF __Y[m] == 1 +/// result[m] := __X[i] +/// i := i + 1 +/// ENDIF +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PDEP instruction. +/// +/// \param __X +/// The 64-bit source value to copy. +/// \param __Y +/// The 64-bit mask specifying where to deposit source bits. +/// \returns The 64-bit result. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_pdep_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_pdep_di(__X, __Y); +} + +/// Extract (gather) bits from the unsigned 64-bit integer \a __X into the +/// low-order bits of the 64-bit result, according to the mask in the +/// unsigned 64-bit integer \a __Y. All other bits of the result are zero. +/// +/// \code{.operation} +/// i := 0 +/// result := 0 +/// FOR m := 0 TO 63 +/// IF __Y[m] == 1 +/// result[i] := __X[m] +/// i := i + 1 +/// ENDIF +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PEXT instruction. +/// +/// \param __X +/// The 64-bit source value to copy. +/// \param __Y +/// The 64-bit mask specifying which source bits to extract. +/// \returns The 64-bit result. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_pext_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_pext_di(__X, __Y); +} + +/// Multiplies the unsigned 64-bit integers \a __X and \a __Y to form a +/// 128-bit product. Stores the upper 64 bits of the product to the +/// memory addressed by \a __P and returns the lower 64 bits. +/// +/// \code{.operation} +/// Store64(__P, (__X * __Y)[127:64]) +/// result := (__X * __Y)[63:0] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MULX instruction. +/// +/// \param __X +/// An unsigned 64-bit multiplicand. +/// \param __Y +/// An unsigned 64-bit multiplicand. +/// \param __P +/// A pointer to memory for storing the upper half of the product. +/// \returns The lower half of the product. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mulx_u64 (unsigned long long __X, unsigned long long __Y, + unsigned long long *__P) +{ + unsigned __int128 __res = (unsigned __int128) __X * __Y; + *__P = (unsigned long long) (__res >> 64); + return (unsigned long long) __res; +} + +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __BMI2INTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/bmiintrin.h b/third_party/clang/lib/clang/17.0.1/include/bmiintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/bmiintrin.h rename to third_party/clang/lib/clang/17.0.1/include/bmiintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/builtins.h b/third_party/clang/lib/clang/17.0.1/include/builtins.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/builtins.h rename to third_party/clang/lib/clang/17.0.1/include/builtins.h diff --git a/third_party/clang/lib/clang/16.0.0/include/cet.h b/third_party/clang/lib/clang/17.0.1/include/cet.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cet.h rename to third_party/clang/lib/clang/17.0.1/include/cet.h diff --git a/third_party/clang/lib/clang/16.0.0/include/cetintrin.h b/third_party/clang/lib/clang/17.0.1/include/cetintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cetintrin.h rename to third_party/clang/lib/clang/17.0.1/include/cetintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/cldemoteintrin.h b/third_party/clang/lib/clang/17.0.1/include/cldemoteintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cldemoteintrin.h rename to third_party/clang/lib/clang/17.0.1/include/cldemoteintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/clflushoptintrin.h b/third_party/clang/lib/clang/17.0.1/include/clflushoptintrin.h similarity index 72% rename from third_party/clang/lib/clang/16.0.0/include/clflushoptintrin.h rename to third_party/clang/lib/clang/17.0.1/include/clflushoptintrin.h index 060eb36f30..ae0a0244c4 100644 --- a/third_party/clang/lib/clang/16.0.0/include/clflushoptintrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/clflushoptintrin.h @@ -17,6 +17,15 @@ /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt"))) +/// Invalidates all levels of the cache hierarchy and flushes modified data to +/// memory for the cache line specified by the address \a __m. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CLFLUSHOPT instruction. +/// +/// \param __m +/// An address within the cache line to flush and invalidate. static __inline__ void __DEFAULT_FN_ATTRS _mm_clflushopt(void const * __m) { __builtin_ia32_clflushopt(__m); diff --git a/third_party/clang/lib/clang/16.0.0/include/clwbintrin.h b/third_party/clang/lib/clang/17.0.1/include/clwbintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/clwbintrin.h rename to third_party/clang/lib/clang/17.0.1/include/clwbintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/clzerointrin.h b/third_party/clang/lib/clang/17.0.1/include/clzerointrin.h similarity index 72% rename from third_party/clang/lib/clang/16.0.0/include/clzerointrin.h rename to third_party/clang/lib/clang/17.0.1/include/clzerointrin.h index a180984a3f..acccfe94ff 100644 --- a/third_party/clang/lib/clang/16.0.0/include/clzerointrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/clzerointrin.h @@ -6,7 +6,7 @@ * *===-----------------------------------------------------------------------=== */ -#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#ifndef __X86INTRIN_H #error "Never use directly; include instead." #endif @@ -17,14 +17,16 @@ #define __DEFAULT_FN_ATTRS \ __attribute__((__always_inline__, __nodebug__, __target__("clzero"))) -/// Loads the cache line address and zero's out the cacheline +/// Zeroes out the cache line for the address \a __line. This uses a +/// non-temporal store. Calling \c _mm_sfence() afterward might be needed +/// to enforce ordering. /// -/// \headerfile +/// \headerfile /// -/// This intrinsic corresponds to the CLZERO instruction. +/// This intrinsic corresponds to the \c CLZERO instruction. /// /// \param __line -/// A pointer to a cacheline which needs to be zeroed out. +/// An address within the cache line to zero out. static __inline__ void __DEFAULT_FN_ATTRS _mm_clzero (void * __line) { diff --git a/third_party/clang/lib/clang/16.0.0/include/cmpccxaddintrin.h b/third_party/clang/lib/clang/17.0.1/include/cmpccxaddintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cmpccxaddintrin.h rename to third_party/clang/lib/clang/17.0.1/include/cmpccxaddintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/cpuid.h b/third_party/clang/lib/clang/17.0.1/include/cpuid.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cpuid.h rename to third_party/clang/lib/clang/17.0.1/include/cpuid.h diff --git a/third_party/clang/lib/clang/16.0.0/include/crc32intrin.h b/third_party/clang/lib/clang/17.0.1/include/crc32intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/crc32intrin.h rename to third_party/clang/lib/clang/17.0.1/include/crc32intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/algorithm b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/algorithm similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/algorithm rename to third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/algorithm diff --git a/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/bits/shared_ptr_base.h b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/bits/shared_ptr_base.h new file mode 100644 index 0000000000..10028dd7bd --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/bits/shared_ptr_base.h @@ -0,0 +1,9 @@ +// CUDA headers define __noinline__ which interferes with libstdc++'s use of +// `__attribute((__noinline__))`. In order to avoid compilation error, +// temporarily unset __noinline__ when we include affected libstdc++ header. + +#pragma push_macro("__noinline__") +#undef __noinline__ +#include_next "bits/shared_ptr_base.h" + +#pragma pop_macro("__noinline__") diff --git a/third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/cmath b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/cmath similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/cmath rename to third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/cmath diff --git a/third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/complex b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/complex similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/complex rename to third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/complex diff --git a/third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/new b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/new similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/new rename to third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/new diff --git a/third_party/clang/lib/clang/16.0.0/include/emmintrin.h b/third_party/clang/lib/clang/17.0.1/include/emmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/emmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/emmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/enqcmdintrin.h b/third_party/clang/lib/clang/17.0.1/include/enqcmdintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/enqcmdintrin.h rename to third_party/clang/lib/clang/17.0.1/include/enqcmdintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/f16cintrin.h b/third_party/clang/lib/clang/17.0.1/include/f16cintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/f16cintrin.h rename to third_party/clang/lib/clang/17.0.1/include/f16cintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/float.h b/third_party/clang/lib/clang/17.0.1/include/float.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/float.h rename to third_party/clang/lib/clang/17.0.1/include/float.h diff --git a/third_party/clang/lib/clang/16.0.0/include/fma4intrin.h b/third_party/clang/lib/clang/17.0.1/include/fma4intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/fma4intrin.h rename to third_party/clang/lib/clang/17.0.1/include/fma4intrin.h diff --git a/third_party/clang/lib/clang/17.0.1/include/fmaintrin.h b/third_party/clang/lib/clang/17.0.1/include/fmaintrin.h new file mode 100644 index 0000000000..ea832fac4f --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/fmaintrin.h @@ -0,0 +1,780 @@ +/*===---- fmaintrin.h - FMA intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __FMAINTRIN_H +#define __FMAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256))) + +/// Computes a multiply-add of 128-bit vectors of [4 x float]. +/// For each element, computes (__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +/// Computes a multiply-add of 128-bit vectors of [2 x double]. +/// For each element, computes (__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend. +/// \returns A 128-bit [2 x double] vector containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +/// Computes a scalar multiply-add of the single-precision values in the +/// low 32 bits of 128-bit vectors of [4 x float]. +/// \code +/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] +/// result[127:32] = __A[127:32] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213SS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand in the low +/// 32 bits. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier in the low +/// 32 bits. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend in the low +/// 32 bits. +/// \returns A 128-bit vector of [4 x float] containing the result in the low +/// 32 bits and a copy of \a __A[127:32] in the upper 96 bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +/// Computes a scalar multiply-add of the double-precision values in the +/// low 64 bits of 128-bit vectors of [2 x double]. +/// \code +/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] +/// result[127:64] = __A[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213SD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand in the low +/// 64 bits. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier in the low +/// 64 bits. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend in the low +/// 64 bits. +/// \returns A 128-bit vector of [2 x double] containing the result in the low +/// 64 bits and a copy of \a __A[127:64] in the upper 64 bits. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +/// Computes a multiply-subtract of 128-bit vectors of [4 x float]. +/// For each element, computes (__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a multiply-subtract of 128-bit vectors of [2 x double]. +/// For each element, computes (__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/// Computes a scalar multiply-subtract of the single-precision values in +/// the low 32 bits of 128-bit vectors of [4 x float]. +/// \code +/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] +/// result[127:32] = __A[127:32] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213SS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand in the low +/// 32 bits. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier in the low +/// 32 bits. +/// \param __C +/// A 128-bit vector of [4 x float] containing the subtrahend in the low +/// 32 bits. +/// \returns A 128-bit vector of [4 x float] containing the result in the low +/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a scalar multiply-subtract of the double-precision values in +/// the low 64 bits of 128-bit vectors of [2 x double]. +/// \code +/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] +/// result[127:64] = __A[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213SD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand in the low +/// 64 bits. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier in the low +/// 64 bits. +/// \param __C +/// A 128-bit vector of [2 x double] containing the subtrahend in the low +/// 64 bits. +/// \returns A 128-bit vector of [2 x double] containing the result in the low +/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/// Computes a negated multiply-add of 128-bit vectors of [4 x float]. +/// For each element, computes -(__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213DPS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend. +/// \returns A 128-bit [4 x float] vector containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +/// Computes a negated multiply-add of 128-bit vectors of [2 x double]. +/// For each element, computes -(__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +/// Computes a scalar negated multiply-add of the single-precision values in +/// the low 32 bits of 128-bit vectors of [4 x float]. +/// \code +/// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0] +/// result[127:32] = __A[127:32] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213SS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand in the low +/// 32 bits. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier in the low +/// 32 bits. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend in the low +/// 32 bits. +/// \returns A 128-bit vector of [4 x float] containing the result in the low +/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C); +} + +/// Computes a scalar negated multiply-add of the double-precision values +/// in the low 64 bits of 128-bit vectors of [2 x double]. +/// \code +/// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0] +/// result[127:64] = __A[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213SD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand in the low +/// 64 bits. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier in the low +/// 64 bits. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend in the low +/// 64 bits. +/// \returns A 128-bit vector of [2 x double] containing the result in the low +/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C); +} + +/// Computes a negated multiply-subtract of 128-bit vectors of [4 x float]. +/// For each element, computes -(__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a negated multiply-subtract of 128-bit vectors of [2 x double]. +/// For each element, computes -(__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/// Computes a scalar negated multiply-subtract of the single-precision +/// values in the low 32 bits of 128-bit vectors of [4 x float]. +/// \code +/// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0] +/// result[127:32] = __A[127:32] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213SS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand in the low +/// 32 bits. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier in the low +/// 32 bits. +/// \param __C +/// A 128-bit vector of [4 x float] containing the subtrahend in the low +/// 32 bits. +/// \returns A 128-bit vector of [4 x float] containing the result in the low +/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a scalar negated multiply-subtract of the double-precision +/// values in the low 64 bits of 128-bit vectors of [2 x double]. +/// \code +/// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0] +/// result[127:64] = __A[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213SD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand in the low +/// 64 bits. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier in the low +/// 64 bits. +/// \param __C +/// A 128-bit vector of [2 x double] containing the subtrahend in the low +/// 64 bits. +/// \returns A 128-bit vector of [2 x double] containing the result in the low +/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C); +} + +/// Computes a multiply with alternating add/subtract of 128-bit vectors of +/// [4 x float]. +/// \code +/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] +/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32] +/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64] +/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend/subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +/// Computes a multiply with alternating add/subtract of 128-bit vectors of +/// [2 x double]. +/// \code +/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] +/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend/subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +/// Computes a multiply with alternating add/subtract of 128-bit vectors of +/// [4 x float]. +/// \code +/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] +/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32] +/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64] +/// result[127:96 = (__A[127:96] * __B[127:96]) - __C[127:96] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend/subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a multiply with alternating add/subtract of 128-bit vectors of +/// [2 x double]. +/// \code +/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] +/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend/subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/// Computes a multiply-add of 256-bit vectors of [8 x float]. +/// For each element, computes (__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the addend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +/// Computes a multiply-add of 256-bit vectors of [4 x double]. +/// For each element, computes (__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the addend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +/// Computes a multiply-subtract of 256-bit vectors of [8 x float]. +/// For each element, computes (__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +/// Computes a multiply-subtract of 256-bit vectors of [4 x double]. +/// For each element, computes (__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +/// Computes a negated multiply-add of 256-bit vectors of [8 x float]. +/// For each element, computes -(__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the addend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +/// Computes a negated multiply-add of 256-bit vectors of [4 x double]. +/// For each element, computes -(__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the addend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +/// Computes a negated multiply-subtract of 256-bit vectors of [8 x float]. +/// For each element, computes -(__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +/// Computes a negated multiply-subtract of 256-bit vectors of [4 x double]. +/// For each element, computes -(__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +/// Computes a multiply with alternating add/subtract of 256-bit vectors of +/// [8 x float]. +/// \code +/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] +/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32] +/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64] +/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96] +/// result[159:128] = (__A[159:128] * __B[159:128]) - __C[159:128] +/// result[191:160] = (__A[191:160] * __B[191:160]) + __C[191:160] +/// result[223:192] = (__A[223:192] * __B[223:192]) - __C[223:192] +/// result[255:224] = (__A[255:224] * __B[255:224]) + __C[255:224] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the addend/subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +/// Computes a multiply with alternating add/subtract of 256-bit vectors of +/// [4 x double]. +/// \code +/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] +/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64] +/// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128] +/// result[255:192] = (__A[255:192] * __B[255:192]) + __C[255:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the addend/subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +/// Computes a vector multiply with alternating add/subtract of 256-bit +/// vectors of [8 x float]. +/// \code +/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] +/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32] +/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64] +/// result[127:96] = (__A[127:96] * __B[127:96]) - __C[127:96] +/// result[159:128] = (__A[159:128] * __B[159:128]) + __C[159:128] +/// result[191:160] = (__A[191:160] * __B[191:160]) - __C[191:160] +/// result[223:192] = (__A[223:192] * __B[223:192]) + __C[223:192] +/// result[255:224] = (__A[255:224] * __B[255:224]) - __C[255:224] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the addend/subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +/// Computes a vector multiply with alternating add/subtract of 256-bit +/// vectors of [4 x double]. +/// \code +/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] +/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64] +/// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128] +/// result[255:192] = (__A[255:192] * __B[255:192]) - __C[255:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUBADD213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the addend/subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __FMAINTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/fxsrintrin.h b/third_party/clang/lib/clang/17.0.1/include/fxsrintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/fxsrintrin.h rename to third_party/clang/lib/clang/17.0.1/include/fxsrintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/gfniintrin.h b/third_party/clang/lib/clang/17.0.1/include/gfniintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/gfniintrin.h rename to third_party/clang/lib/clang/17.0.1/include/gfniintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/hexagon_circ_brev_intrinsics.h b/third_party/clang/lib/clang/17.0.1/include/hexagon_circ_brev_intrinsics.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/hexagon_circ_brev_intrinsics.h rename to third_party/clang/lib/clang/17.0.1/include/hexagon_circ_brev_intrinsics.h diff --git a/third_party/clang/lib/clang/16.0.0/include/hexagon_protos.h b/third_party/clang/lib/clang/17.0.1/include/hexagon_protos.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/hexagon_protos.h rename to third_party/clang/lib/clang/17.0.1/include/hexagon_protos.h diff --git a/third_party/clang/lib/clang/16.0.0/include/hexagon_types.h b/third_party/clang/lib/clang/17.0.1/include/hexagon_types.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/hexagon_types.h rename to third_party/clang/lib/clang/17.0.1/include/hexagon_types.h diff --git a/third_party/clang/lib/clang/16.0.0/include/hlsl.h b/third_party/clang/lib/clang/17.0.1/include/hlsl.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/hlsl.h rename to third_party/clang/lib/clang/17.0.1/include/hlsl.h diff --git a/third_party/clang/lib/clang/16.0.0/include/hlsl/hlsl_basic_types.h b/third_party/clang/lib/clang/17.0.1/include/hlsl/hlsl_basic_types.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/hlsl/hlsl_basic_types.h rename to third_party/clang/lib/clang/17.0.1/include/hlsl/hlsl_basic_types.h diff --git a/third_party/clang/lib/clang/17.0.1/include/hlsl/hlsl_intrinsics.h b/third_party/clang/lib/clang/17.0.1/include/hlsl/hlsl_intrinsics.h new file mode 100644 index 0000000000..1a34e1626e --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/hlsl/hlsl_intrinsics.h @@ -0,0 +1,480 @@ +//===----- hlsl_intrinsics.h - HLSL definitions for intrinsics ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _HLSL_HLSL_INTRINSICS_H_ +#define _HLSL_HLSL_INTRINSICS_H_ + +namespace hlsl { + +__attribute__((availability(shadermodel, introduced = 6.0))) +__attribute__((clang_builtin_alias(__builtin_hlsl_wave_active_count_bits))) uint +WaveActiveCountBits(bool bBit); + +// abs builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +int16_t abs(int16_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +int16_t2 abs(int16_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +int16_t3 abs(int16_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +int16_t4 abs(int16_t4); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) half abs(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +half2 abs(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +half3 abs(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +half4 abs(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int abs(int); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int2 abs(int2); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int3 abs(int3); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int4 abs(int4); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) float +abs(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +float2 abs(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +float3 abs(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +float4 abs(float4); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +int64_t abs(int64_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +int64_t2 abs(int64_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +int64_t3 abs(int64_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +int64_t4 abs(int64_t4); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) double +abs(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +double2 abs(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +double3 abs(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) +double4 abs(double4); + +// sqrt builtins +__attribute__((clang_builtin_alias(__builtin_sqrt))) double sqrt(double In); +__attribute__((clang_builtin_alias(__builtin_sqrtf))) float sqrt(float In); + +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_sqrtf16))) half sqrt(half In); +#endif + +// ceil builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +half ceil(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +half2 ceil(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +half3 ceil(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +half4 ceil(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) float +ceil(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +float2 ceil(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +float3 ceil(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +float4 ceil(float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) double +ceil(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +double2 ceil(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +double3 ceil(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) +double4 ceil(double4); + +// floor builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +half floor(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +half2 floor(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +half3 floor(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +half4 floor(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) float +floor(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +float2 floor(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +float3 floor(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +float4 floor(float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) double +floor(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +double2 floor(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +double3 floor(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) +double4 floor(double4); + +// cos builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) half cos(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +half2 cos(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +half3 cos(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +half4 cos(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) float +cos(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +float2 cos(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +float3 cos(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +float4 cos(float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) double +cos(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +double2 cos(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +double3 cos(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) +double4 cos(double4); + +// sin builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) half sin(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +half2 sin(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +half3 sin(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +half4 sin(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) float +sin(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +float2 sin(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +float3 sin(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +float4 sin(float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) double +sin(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +double2 sin(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +double3 sin(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) +double4 sin(double4); + +// trunc builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +half trunc(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +half2 trunc(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +half3 trunc(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +half4 trunc(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) float +trunc(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +float2 trunc(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +float3 trunc(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +float4 trunc(float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) double +trunc(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +double2 trunc(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +double3 trunc(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) +double4 trunc(double4); + +// log builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) half log(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +half2 log(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +half3 log(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +half4 log(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) float +log(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +float2 log(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +float3 log(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +float4 log(float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) double +log(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +double2 log(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +double3 log(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log))) +double4 log(double4); + +// log2 builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +half log2(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +half2 log2(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +half3 log2(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +half4 log2(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) float +log2(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +float2 log2(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +float3 log2(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +float4 log2(float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) double +log2(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +double2 log2(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +double3 log2(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log2))) +double4 log2(double4); + +// log10 builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +half log10(half); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +half2 log10(half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +half3 log10(half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +half4 log10(half4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) float +log10(float); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +float2 log10(float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +float3 log10(float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +float4 log10(float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) double +log10(double); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +double2 log10(double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +double3 log10(double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_log10))) +double4 log10(double4); + +// max builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +half max(half, half); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +half2 max(half2, half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +half3 max(half3, half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +half4 max(half4, half4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int16_t max(int16_t, int16_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int16_t2 max(int16_t2, int16_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int16_t3 max(int16_t3, int16_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int16_t4 max(int16_t4, int16_t4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint16_t max(uint16_t, uint16_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint16_t2 max(uint16_t2, uint16_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint16_t3 max(uint16_t3, uint16_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint16_t4 max(uint16_t4, uint16_t4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) int max(int, + int); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int2 max(int2, int2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int3 max(int3, int3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int4 max(int4, int4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint max(uint, uint); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint2 max(uint2, uint2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint3 max(uint3, uint3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint4 max(uint4, uint4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int64_t max(int64_t, int64_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int64_t2 max(int64_t2, int64_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int64_t3 max(int64_t3, int64_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +int64_t4 max(int64_t4, int64_t4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint64_t max(uint64_t, uint64_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint64_t2 max(uint64_t2, uint64_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint64_t3 max(uint64_t3, uint64_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +uint64_t4 max(uint64_t4, uint64_t4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) float +max(float, float); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +float2 max(float2, float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +float3 max(float3, float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +float4 max(float4, float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) double +max(double, double); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +double2 max(double2, double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +double3 max(double3, double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_max))) +double4 max(double4, double4); + +// min builtins +#ifdef __HLSL_ENABLE_16_BIT +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +half min(half, half); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +half2 min(half2, half2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +half3 min(half3, half3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +half4 min(half4, half4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int16_t min(int16_t, int16_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int16_t2 min(int16_t2, int16_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int16_t3 min(int16_t3, int16_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int16_t4 min(int16_t4, int16_t4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint16_t min(uint16_t, uint16_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint16_t2 min(uint16_t2, uint16_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint16_t3 min(uint16_t3, uint16_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint16_t4 min(uint16_t4, uint16_t4); +#endif + +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) int min(int, + int); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int2 min(int2, int2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int3 min(int3, int3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int4 min(int4, int4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint min(uint, uint); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint2 min(uint2, uint2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint3 min(uint3, uint3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint4 min(uint4, uint4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int64_t min(int64_t, int64_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int64_t2 min(int64_t2, int64_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int64_t3 min(int64_t3, int64_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +int64_t4 min(int64_t4, int64_t4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint64_t min(uint64_t, uint64_t); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint64_t2 min(uint64_t2, uint64_t2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint64_t3 min(uint64_t3, uint64_t3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +uint64_t4 min(uint64_t4, uint64_t4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) float +min(float, float); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +float2 min(float2, float2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +float3 min(float3, float3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +float4 min(float4, float4); + +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) double +min(double, double); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +double2 min(double2, double2); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +double3 min(double3, double3); +__attribute__((clang_builtin_alias(__builtin_elementwise_min))) +double4 min(double4, double4); + +} // namespace hlsl +#endif //_HLSL_HLSL_INTRINSICS_H_ diff --git a/third_party/clang/lib/clang/16.0.0/include/hresetintrin.h b/third_party/clang/lib/clang/17.0.1/include/hresetintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/hresetintrin.h rename to third_party/clang/lib/clang/17.0.1/include/hresetintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/htmintrin.h b/third_party/clang/lib/clang/17.0.1/include/htmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/htmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/htmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/htmxlintrin.h b/third_party/clang/lib/clang/17.0.1/include/htmxlintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/htmxlintrin.h rename to third_party/clang/lib/clang/17.0.1/include/htmxlintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/hvx_hexagon_protos.h b/third_party/clang/lib/clang/17.0.1/include/hvx_hexagon_protos.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/hvx_hexagon_protos.h rename to third_party/clang/lib/clang/17.0.1/include/hvx_hexagon_protos.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ia32intrin.h b/third_party/clang/lib/clang/17.0.1/include/ia32intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ia32intrin.h rename to third_party/clang/lib/clang/17.0.1/include/ia32intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/immintrin.h b/third_party/clang/lib/clang/17.0.1/include/immintrin.h similarity index 85% rename from third_party/clang/lib/clang/16.0.0/include/immintrin.h rename to third_party/clang/lib/clang/17.0.1/include/immintrin.h index 0d2e8be6e4..642602be14 100644 --- a/third_party/clang/lib/clang/16.0.0/include/immintrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/immintrin.h @@ -269,6 +269,26 @@ #include #endif +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SHA512__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SM3__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__SM4__) +#include +#endif + +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AVXVNNIINT16__) +#include +#endif + #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ defined(__RDPID__) /// Returns the value of the IA32_TSC_AUX MSR (0xc0000103). @@ -284,30 +304,53 @@ _rdpid_u32(void) { #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ defined(__RDRND__) +/// Returns a 16-bit hardware-generated random value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDRAND instruction. +/// +/// \param __p +/// A pointer to a 16-bit memory location to place the random value. +/// \returns 1 if the value was successfully generated, 0 otherwise. static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) _rdrand16_step(unsigned short *__p) { return (int)__builtin_ia32_rdrand16_step(__p); } +/// Returns a 32-bit hardware-generated random value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDRAND instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location to place the random value. +/// \returns 1 if the value was successfully generated, 0 otherwise. static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) _rdrand32_step(unsigned int *__p) { return (int)__builtin_ia32_rdrand32_step(__p); } -#ifdef __x86_64__ +/// Returns a 64-bit hardware-generated random value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDRAND instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location to place the random value. +/// \returns 1 if the value was successfully generated, 0 otherwise. static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) _rdrand64_step(unsigned long long *__p) { +#ifdef __x86_64__ return (int)__builtin_ia32_rdrand64_step(__p); -} #else -// We need to emulate the functionality of 64-bit rdrand with 2 32-bit -// rdrand instructions. -static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) -_rdrand64_step(unsigned long long *__p) -{ + // We need to emulate the functionality of 64-bit rdrand with 2 32-bit + // rdrand instructions. unsigned int __lo, __hi; unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo); unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi); @@ -318,55 +361,115 @@ _rdrand64_step(unsigned long long *__p) *__p = 0; return 0; } -} #endif +} #endif /* __RDRND__ */ #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ defined(__FSGSBASE__) #ifdef __x86_64__ +/// Reads the FS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDFSBASE instruction. +/// +/// \returns The lower 32 bits of the FS base register. static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _readfsbase_u32(void) { return __builtin_ia32_rdfsbase32(); } +/// Reads the FS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDFSBASE instruction. +/// +/// \returns The contents of the FS base register. static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _readfsbase_u64(void) { return __builtin_ia32_rdfsbase64(); } +/// Reads the GS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDGSBASE instruction. +/// +/// \returns The lower 32 bits of the GS base register. static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _readgsbase_u32(void) { return __builtin_ia32_rdgsbase32(); } +/// Reads the GS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDGSBASE instruction. +/// +/// \returns The contents of the GS base register. static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _readgsbase_u64(void) { return __builtin_ia32_rdgsbase64(); } +/// Modifies the FS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the WRFSBASE instruction. +/// +/// \param __V +/// Value to use for the lower 32 bits of the FS base register. static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _writefsbase_u32(unsigned int __V) { __builtin_ia32_wrfsbase32(__V); } +/// Modifies the FS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the WRFSBASE instruction. +/// +/// \param __V +/// Value to use for the FS base register. static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _writefsbase_u64(unsigned long long __V) { __builtin_ia32_wrfsbase64(__V); } +/// Modifies the GS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the WRGSBASE instruction. +/// +/// \param __V +/// Value to use for the lower 32 bits of the GS base register. static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _writegsbase_u32(unsigned int __V) { __builtin_ia32_wrgsbase32(__V); } +/// Modifies the GS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the WRFSBASE instruction. +/// +/// \param __V +/// Value to use for GS base register. static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) _writegsbase_u64(unsigned long long __V) { @@ -538,6 +641,11 @@ _storebe_i64(void * __P, long long __D) { #include #endif +#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ + defined(__AMX_COMPLEX__) +#include +#endif + #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ defined(__AVX512VP2INTERSECT__) #include diff --git a/third_party/clang/lib/clang/16.0.0/include/intrin.h b/third_party/clang/lib/clang/17.0.1/include/intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/intrin.h rename to third_party/clang/lib/clang/17.0.1/include/intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/inttypes.h b/third_party/clang/lib/clang/17.0.1/include/inttypes.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/inttypes.h rename to third_party/clang/lib/clang/17.0.1/include/inttypes.h diff --git a/third_party/clang/lib/clang/16.0.0/include/invpcidintrin.h b/third_party/clang/lib/clang/17.0.1/include/invpcidintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/invpcidintrin.h rename to third_party/clang/lib/clang/17.0.1/include/invpcidintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/iso646.h b/third_party/clang/lib/clang/17.0.1/include/iso646.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/iso646.h rename to third_party/clang/lib/clang/17.0.1/include/iso646.h diff --git a/third_party/clang/lib/clang/16.0.0/include/keylockerintrin.h b/third_party/clang/lib/clang/17.0.1/include/keylockerintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/keylockerintrin.h rename to third_party/clang/lib/clang/17.0.1/include/keylockerintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/larchintrin.h b/third_party/clang/lib/clang/17.0.1/include/larchintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/larchintrin.h rename to third_party/clang/lib/clang/17.0.1/include/larchintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/limits.h b/third_party/clang/lib/clang/17.0.1/include/limits.h similarity index 96% rename from third_party/clang/lib/clang/16.0.0/include/limits.h rename to third_party/clang/lib/clang/17.0.1/include/limits.h index 32cc901b26..354e031a9d 100644 --- a/third_party/clang/lib/clang/16.0.0/include/limits.h +++ b/third_party/clang/lib/clang/17.0.1/include/limits.h @@ -52,7 +52,11 @@ #define LONG_MIN (-__LONG_MAX__ -1L) #define UCHAR_MAX (__SCHAR_MAX__*2 +1) -#define USHRT_MAX (__SHRT_MAX__ *2 +1) +#if __SHRT_WIDTH__ < __INT_WIDTH__ +#define USHRT_MAX (__SHRT_MAX__ * 2 + 1) +#else +#define USHRT_MAX (__SHRT_MAX__ * 2U + 1U) +#endif #define UINT_MAX (__INT_MAX__ *2U +1U) #define ULONG_MAX (__LONG_MAX__ *2UL+1UL) diff --git a/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/ctype.h b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/ctype.h new file mode 100644 index 0000000000..e20b7bb58f --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/ctype.h @@ -0,0 +1,85 @@ +//===-- Wrapper for C standard ctype.h declarations on the GPU ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__ +#define __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__ + +#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__) +#error "This file is for GPU offloading compilation only" +#endif + +#include_next + +#if __has_include() + +#if defined(__HIP__) || defined(__CUDA__) +#define __LIBC_ATTRS __attribute__((device)) +#endif + +// The GNU headers like to provide these as macros, we need to undefine them so +// they do not conflict with the following definitions for the GPU. + +#pragma push_macro("isalnum") +#pragma push_macro("isalpha") +#pragma push_macro("isblank") +#pragma push_macro("iscntrl") +#pragma push_macro("isdigit") +#pragma push_macro("isgraph") +#pragma push_macro("islower") +#pragma push_macro("isprint") +#pragma push_macro("ispunct") +#pragma push_macro("isspace") +#pragma push_macro("isupper") +#pragma push_macro("isxdigit") +#pragma push_macro("tolower") +#pragma push_macro("toupper") + +#undef isalnum +#undef isalpha +#undef iscntrl +#undef isdigit +#undef islower +#undef isgraph +#undef isprint +#undef ispunct +#undef isspace +#undef isupper +#undef isblank +#undef isxdigit +#undef tolower +#undef toupper + +#pragma omp begin declare target + +#include + +#pragma omp end declare target + +// Restore the original macros when compiling on the host. +#if !defined(__NVPTX__) && !defined(__AMDGPU__) +#pragma pop_macro("isalnum") +#pragma pop_macro("isalpha") +#pragma pop_macro("isblank") +#pragma pop_macro("iscntrl") +#pragma pop_macro("isdigit") +#pragma pop_macro("isgraph") +#pragma pop_macro("islower") +#pragma pop_macro("isprint") +#pragma pop_macro("ispunct") +#pragma pop_macro("isspace") +#pragma pop_macro("isupper") +#pragma pop_macro("isxdigit") +#pragma pop_macro("tolower") +#pragma pop_macro("toupper") +#endif + +#undef __LIBC_ATTRS + +#endif + +#endif // __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__ diff --git a/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/inttypes.h b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/inttypes.h new file mode 100644 index 0000000000..415f1e4b7b --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/inttypes.h @@ -0,0 +1,34 @@ +//===-- Wrapper for C standard inttypes.h declarations on the GPU ---------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__ +#define __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__ + +#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__) +#error "This file is for GPU offloading compilation only" +#endif + +#include_next + +#if __has_include() + +#if defined(__HIP__) || defined(__CUDA__) +#define __LIBC_ATTRS __attribute__((device)) +#endif + +#pragma omp begin declare target + +#include + +#pragma omp end declare target + +#undef __LIBC_ATTRS + +#endif + +#endif // __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__ diff --git a/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/llvm-libc-decls/README.txt b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/llvm-libc-decls/README.txt new file mode 100644 index 0000000000..e012cd9e29 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/llvm-libc-decls/README.txt @@ -0,0 +1,6 @@ +LLVM libc declarations +====================== + +This directory will be filled by the `libc` project with declarations that are +availible on the device. Each declaration will use the `__LIBC_ATTRS` attribute +to control emission on the device side. diff --git a/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/stdio.h b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/stdio.h new file mode 100644 index 0000000000..51b0f0e330 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/stdio.h @@ -0,0 +1,34 @@ +//===-- Wrapper for C standard stdio.h declarations on the GPU ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__ +#define __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__ + +#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__) +#error "This file is for GPU offloading compilation only" +#endif + +#include_next + +#if __has_include() + +#if defined(__HIP__) || defined(__CUDA__) +#define __LIBC_ATTRS __attribute__((device)) +#endif + +#pragma omp begin declare target + +#include + +#pragma omp end declare target + +#undef __LIBC_ATTRS + +#endif + +#endif // __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__ diff --git a/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/stdlib.h b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/stdlib.h new file mode 100644 index 0000000000..9cb2b4e64a --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/stdlib.h @@ -0,0 +1,42 @@ +//===-- Wrapper for C standard stdlib.h declarations on the GPU -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__ +#define __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__ + +#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__) +#error "This file is for GPU offloading compilation only" +#endif + +#include_next + +#if __has_include() + +#if defined(__HIP__) || defined(__CUDA__) +#define __LIBC_ATTRS __attribute__((device)) +#endif + +#pragma omp begin declare target + +// The LLVM C library uses this type so we forward declare it. +typedef void (*__atexithandler_t)(void); + +// Enforce ABI compatibility with the structs used by the LLVM C library. +_Static_assert(__builtin_offsetof(div_t, quot) == 0, "ABI mismatch!"); +_Static_assert(__builtin_offsetof(ldiv_t, quot) == 0, "ABI mismatch!"); +_Static_assert(__builtin_offsetof(lldiv_t, quot) == 0, "ABI mismatch!"); + +#include + +#pragma omp end declare target + +#undef __LIBC_ATTRS + +#endif + +#endif // __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__ diff --git a/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/string.h b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/string.h new file mode 100644 index 0000000000..027c415c1d --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/llvm_libc_wrappers/string.h @@ -0,0 +1,37 @@ +//===-- Wrapper for C standard string.h declarations on the GPU -----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__ +#define __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__ + +#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__) +#error "This file is for GPU offloading compilation only" +#endif + +// FIXME: The GNU headers provide C++ standard compliant headers when in C++ +// mode and the LLVM libc does not. We cannot enable memchr, strchr, strchrnul, +// strpbrk, strrchr, strstr, or strcasestr until this is addressed. +#include_next + +#if __has_include() + +#if defined(__HIP__) || defined(__CUDA__) +#define __LIBC_ATTRS __attribute__((device)) +#endif + +#pragma omp begin declare target + +#include + +#pragma omp end declare target + +#undef __LIBC_ATTRS + +#endif + +#endif // __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__ diff --git a/third_party/clang/lib/clang/16.0.0/include/lwpintrin.h b/third_party/clang/lib/clang/17.0.1/include/lwpintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/lwpintrin.h rename to third_party/clang/lib/clang/17.0.1/include/lwpintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/lzcntintrin.h b/third_party/clang/lib/clang/17.0.1/include/lzcntintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/lzcntintrin.h rename to third_party/clang/lib/clang/17.0.1/include/lzcntintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/mm3dnow.h b/third_party/clang/lib/clang/17.0.1/include/mm3dnow.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/mm3dnow.h rename to third_party/clang/lib/clang/17.0.1/include/mm3dnow.h diff --git a/third_party/clang/lib/clang/16.0.0/include/mm_malloc.h b/third_party/clang/lib/clang/17.0.1/include/mm_malloc.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/mm_malloc.h rename to third_party/clang/lib/clang/17.0.1/include/mm_malloc.h diff --git a/third_party/clang/lib/clang/16.0.0/include/mmintrin.h b/third_party/clang/lib/clang/17.0.1/include/mmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/mmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/mmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/module.modulemap b/third_party/clang/lib/clang/17.0.1/include/module.modulemap similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/module.modulemap rename to third_party/clang/lib/clang/17.0.1/include/module.modulemap diff --git a/third_party/clang/lib/clang/16.0.0/include/movdirintrin.h b/third_party/clang/lib/clang/17.0.1/include/movdirintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/movdirintrin.h rename to third_party/clang/lib/clang/17.0.1/include/movdirintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/msa.h b/third_party/clang/lib/clang/17.0.1/include/msa.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/msa.h rename to third_party/clang/lib/clang/17.0.1/include/msa.h diff --git a/third_party/clang/lib/clang/17.0.1/include/mwaitxintrin.h b/third_party/clang/lib/clang/17.0.1/include/mwaitxintrin.h new file mode 100644 index 0000000000..65f427105b --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/mwaitxintrin.h @@ -0,0 +1,62 @@ +/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __MWAITXINTRIN_H +#define __MWAITXINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx"))) + +/// Establishes a linear address memory range to be monitored and puts +/// the processor in the monitor event pending state. Data stored in the +/// monitored address range causes the processor to exit the pending state. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MONITORX instruction. +/// +/// \param __p +/// The memory range to be monitored. The size of the range is determined by +/// CPUID function 0000_0005h. +/// \param __extensions +/// Optional extensions for the monitoring state. +/// \param __hints +/// Optional hints for the monitoring state. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints) +{ + __builtin_ia32_monitorx(__p, __extensions, __hints); +} + +/// Used with the \c MONITORX instruction to wait while the processor is in +/// the monitor event pending state. Data stored in the monitored address +/// range, or an interrupt, causes the processor to exit the pending state. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MWAITX instruction. +/// +/// \param __extensions +/// Optional extensions for the monitoring state, which can vary by +/// processor. +/// \param __hints +/// Optional hints for the monitoring state, which can vary by processor. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock) +{ + __builtin_ia32_mwaitx(__extensions, __hints, __clock); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __MWAITXINTRIN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/nmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/nmmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/nmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/nmmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/opencl-c-base.h b/third_party/clang/lib/clang/17.0.1/include/opencl-c-base.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/opencl-c-base.h rename to third_party/clang/lib/clang/17.0.1/include/opencl-c-base.h index fad2f9c027..af3deae892 100644 --- a/third_party/clang/lib/clang/16.0.0/include/opencl-c-base.h +++ b/third_party/clang/lib/clang/17.0.1/include/opencl-c-base.h @@ -474,6 +474,9 @@ typedef enum memory_order #define CLK_HALF_FLOAT 0x10DD #define CLK_FLOAT 0x10DE #define CLK_UNORM_INT24 0x10DF +#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0 +#define CLK_UNORM_INT_101010_2 0x10E0 +#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0 // Channel order, numbering must be aligned with cl_channel_order in cl.h // diff --git a/third_party/clang/lib/clang/16.0.0/include/opencl-c.h b/third_party/clang/lib/clang/17.0.1/include/opencl-c.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/opencl-c.h rename to third_party/clang/lib/clang/17.0.1/include/opencl-c.h diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h index 279fb26fba..d5b6846b03 100644 --- a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/__clang_openmp_device_functions.h +++ b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h @@ -40,7 +40,6 @@ extern "C" { // Import types which will be used by __clang_hip_libdevice_declares.h #ifndef __cplusplus -#include #include #endif diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/cmath b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/cmath similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/cmath rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/cmath diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/complex b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/complex similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/complex rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/complex diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/complex.h b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/complex.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/complex.h rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/complex.h diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/complex_cmath.h b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/complex_cmath.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/complex_cmath.h rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/complex_cmath.h diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/math.h b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/math.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/math.h rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/math.h diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/new b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/new similarity index 95% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/new rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/new index 985ddc567f..8bad3f19d6 100644 --- a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/new +++ b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/new @@ -13,7 +13,7 @@ // which do not use nothrow_t are provided without the header. #include_next -#if defined(__NVPTX__) && defined(_OPENMP) +#if (defined(__NVPTX__) || defined(__AMDGPU__)) && defined(_OPENMP) #include diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/stdlib.h b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/stdlib.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/stdlib.h rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/stdlib.h diff --git a/third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/time.h b/third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/time.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/openmp_wrappers/time.h rename to third_party/clang/lib/clang/17.0.1/include/openmp_wrappers/time.h diff --git a/third_party/clang/lib/clang/16.0.0/include/pconfigintrin.h b/third_party/clang/lib/clang/17.0.1/include/pconfigintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/pconfigintrin.h rename to third_party/clang/lib/clang/17.0.1/include/pconfigintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/pkuintrin.h b/third_party/clang/lib/clang/17.0.1/include/pkuintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/pkuintrin.h rename to third_party/clang/lib/clang/17.0.1/include/pkuintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/pmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/pmmintrin.h similarity index 93% rename from third_party/clang/lib/clang/16.0.0/include/pmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/pmmintrin.h index ee660e95d2..203c0aa0f8 100644 --- a/third_party/clang/lib/clang/16.0.0/include/pmmintrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/pmmintrin.h @@ -253,9 +253,12 @@ _mm_movedup_pd(__m128d __a) /// the processor in the monitor event pending state. Data stored in the /// monitored address range causes the processor to exit the pending state. /// +/// The \c MONITOR instruction can be used in kernel mode, and in other modes +/// if MSR C001_0015h[MonMwaitUserEn] is set. +/// /// \headerfile /// -/// This intrinsic corresponds to the MONITOR instruction. +/// This intrinsic corresponds to the \c MONITOR instruction. /// /// \param __p /// The memory range to be monitored. The size of the range is determined by @@ -270,19 +273,22 @@ _mm_monitor(void const *__p, unsigned __extensions, unsigned __hints) __builtin_ia32_monitor(__p, __extensions, __hints); } -/// Used with the MONITOR instruction to wait while the processor is in +/// Used with the \c MONITOR instruction to wait while the processor is in /// the monitor event pending state. Data stored in the monitored address -/// range causes the processor to exit the pending state. +/// range, or an interrupt, causes the processor to exit the pending state. +/// +/// The \c MWAIT instruction can be used in kernel mode, and in other modes if +/// MSR C001_0015h[MonMwaitUserEn] is set. /// /// \headerfile /// -/// This intrinsic corresponds to the MWAIT instruction. +/// This intrinsic corresponds to the \c MWAIT instruction. /// /// \param __extensions -/// Optional extensions for the monitoring state, which may vary by +/// Optional extensions for the monitoring state, which can vary by /// processor. /// \param __hints -/// Optional hints for the monitoring state, which may vary by processor. +/// Optional hints for the monitoring state, which can vary by processor. static __inline__ void __DEFAULT_FN_ATTRS _mm_mwait(unsigned __extensions, unsigned __hints) { diff --git a/third_party/clang/lib/clang/16.0.0/include/popcntintrin.h b/third_party/clang/lib/clang/17.0.1/include/popcntintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/popcntintrin.h rename to third_party/clang/lib/clang/17.0.1/include/popcntintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/bmi2intrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/bmi2intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/bmi2intrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/bmi2intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/bmiintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/bmiintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/bmiintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/bmiintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/emmintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/emmintrin.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/emmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/emmintrin.h index 0814ea5593..fc18ab9d43 100644 --- a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/emmintrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/emmintrin.h @@ -46,6 +46,7 @@ /* SSE2 */ typedef __vector double __v2df; +typedef __vector float __v4f; typedef __vector long long __v2di; typedef __vector unsigned long long __v2du; typedef __vector int __v4si; @@ -951,7 +952,7 @@ extern __inline __m128d _mm_cvtpi32_pd(__m64 __A) { __v4si __temp; __v2di __tmp2; - __v2df __result; + __v4f __result; __temp = (__v4si)vec_splats(__A); __tmp2 = (__v2di)vec_unpackl(__temp); diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/immintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/immintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/immintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/immintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/mm_malloc.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/mm_malloc.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/mm_malloc.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/mm_malloc.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/mmintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/mmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/mmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/mmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/nmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/nmmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/nmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/nmmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/pmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/pmmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/pmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/pmmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/smmintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/smmintrin.h similarity index 99% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/smmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/smmintrin.h index 6fe6d2a157..349b395c4f 100644 --- a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/smmintrin.h +++ b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/smmintrin.h @@ -305,9 +305,9 @@ extern __inline int extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) { - __v16qi __charmask = vec_splats((signed char)__imm8); + __v16qu __charmask = vec_splats((unsigned char)__imm8); __charmask = vec_gb(__charmask); - __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask); + __v8hu __shortmask = (__v8hu)vec_unpackh((__v16qi)__charmask); #ifdef __BIG_ENDIAN__ __shortmask = vec_reve(__shortmask); #endif diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/tmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/tmmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/tmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/tmmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/x86gprintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/x86gprintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/x86gprintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/x86gprintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/x86intrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/x86intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/x86intrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/x86intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/xmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/xmmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ppc_wrappers/xmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ppc_wrappers/xmmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/prfchiintrin.h b/third_party/clang/lib/clang/17.0.1/include/prfchiintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/prfchiintrin.h rename to third_party/clang/lib/clang/17.0.1/include/prfchiintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/prfchwintrin.h b/third_party/clang/lib/clang/17.0.1/include/prfchwintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/prfchwintrin.h rename to third_party/clang/lib/clang/17.0.1/include/prfchwintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/ptwriteintrin.h b/third_party/clang/lib/clang/17.0.1/include/ptwriteintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/ptwriteintrin.h rename to third_party/clang/lib/clang/17.0.1/include/ptwriteintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/raointintrin.h b/third_party/clang/lib/clang/17.0.1/include/raointintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/raointintrin.h rename to third_party/clang/lib/clang/17.0.1/include/raointintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/rdpruintrin.h b/third_party/clang/lib/clang/17.0.1/include/rdpruintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/rdpruintrin.h rename to third_party/clang/lib/clang/17.0.1/include/rdpruintrin.h diff --git a/third_party/clang/lib/clang/17.0.1/include/rdseedintrin.h b/third_party/clang/lib/clang/17.0.1/include/rdseedintrin.h new file mode 100644 index 0000000000..8a4fe09305 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/rdseedintrin.h @@ -0,0 +1,105 @@ +/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __RDSEEDINTRIN_H +#define __RDSEEDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed"))) + +/// Stores a hardware-generated 16-bit random value in the memory at \a __p. +/// +/// The random number generator complies with NIST SP800-90B and SP800-90C. +/// +/// \code{.operation} +/// IF HW_NRND_GEN.ready == 1 +/// Store16(__p, HW_NRND_GEN.data) +/// result := 1 +/// ELSE +/// Store16(__p, 0) +/// result := 0 +/// END +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c RDSEED instruction. +/// +/// \param __p +/// Pointer to memory for storing the 16-bit random number. +/// \returns 1 if a random number was generated, 0 if not. +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed16_step(unsigned short *__p) +{ + return (int) __builtin_ia32_rdseed16_step(__p); +} + +/// Stores a hardware-generated 32-bit random value in the memory at \a __p. +/// +/// The random number generator complies with NIST SP800-90B and SP800-90C. +/// +/// \code{.operation} +/// IF HW_NRND_GEN.ready == 1 +/// Store32(__p, HW_NRND_GEN.data) +/// result := 1 +/// ELSE +/// Store32(__p, 0) +/// result := 0 +/// END +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c RDSEED instruction. +/// +/// \param __p +/// Pointer to memory for storing the 32-bit random number. +/// \returns 1 if a random number was generated, 0 if not. +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed32_step(unsigned int *__p) +{ + return (int) __builtin_ia32_rdseed32_step(__p); +} + +#ifdef __x86_64__ +/// Stores a hardware-generated 64-bit random value in the memory at \a __p. +/// +/// The random number generator complies with NIST SP800-90B and SP800-90C. +/// +/// \code{.operation} +/// IF HW_NRND_GEN.ready == 1 +/// Store64(__p, HW_NRND_GEN.data) +/// result := 1 +/// ELSE +/// Store64(__p, 0) +/// result := 0 +/// END +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c RDSEED instruction. +/// +/// \param __p +/// Pointer to memory for storing the 64-bit random number. +/// \returns 1 if a random number was generated, 0 if not. +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed64_step(unsigned long long *__p) +{ + return (int) __builtin_ia32_rdseed64_step(__p); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __RDSEEDINTRIN_H */ diff --git a/third_party/clang/lib/clang/17.0.1/include/riscv_ntlh.h b/third_party/clang/lib/clang/17.0.1/include/riscv_ntlh.h new file mode 100644 index 0000000000..9ce1709205 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/riscv_ntlh.h @@ -0,0 +1,28 @@ +/*===---- riscv_ntlh.h - RISC-V NTLH intrinsics ----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __RISCV_NTLH_H +#define __RISCV_NTLH_H + +#ifndef __riscv_zihintntl +#error "NTLH intrinsics require the NTLH extension." +#endif + +enum { + __RISCV_NTLH_INNERMOST_PRIVATE = 2, + __RISCV_NTLH_ALL_PRIVATE, + __RISCV_NTLH_INNERMOST_SHARED, + __RISCV_NTLH_ALL +}; + +#define __riscv_ntl_load(PTR, DOMAIN) __builtin_riscv_ntl_load((PTR), (DOMAIN)) +#define __riscv_ntl_store(PTR, VAL, DOMAIN) \ + __builtin_riscv_ntl_store((PTR), (VAL), (DOMAIN)) + +#endif \ No newline at end of file diff --git a/third_party/clang/lib/clang/16.0.0/include/rtmintrin.h b/third_party/clang/lib/clang/17.0.1/include/rtmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/rtmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/rtmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/s390intrin.h b/third_party/clang/lib/clang/17.0.1/include/s390intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/s390intrin.h rename to third_party/clang/lib/clang/17.0.1/include/s390intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/serializeintrin.h b/third_party/clang/lib/clang/17.0.1/include/serializeintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/serializeintrin.h rename to third_party/clang/lib/clang/17.0.1/include/serializeintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/sgxintrin.h b/third_party/clang/lib/clang/17.0.1/include/sgxintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/sgxintrin.h rename to third_party/clang/lib/clang/17.0.1/include/sgxintrin.h diff --git a/third_party/clang/lib/clang/17.0.1/include/sha512intrin.h b/third_party/clang/lib/clang/17.0.1/include/sha512intrin.h new file mode 100644 index 0000000000..065ef5dac2 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/sha512intrin.h @@ -0,0 +1,200 @@ +/*===--------------- sha512intrin.h - SHA512 intrinsics -----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __SHA512INTRIN_H +#define __SHA512INTRIN_H + +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("sha512"), \ + __min_vector_width__(256))) + +/// This intrinisc is one of the two SHA512 message scheduling instructions. +/// The intrinsic performs an intermediate calculation for the next four +/// SHA512 message qwords. The calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_sha512msg1_epi64(__m256i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSHA512MSG1 instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x long long]. +/// \param __B +/// A 128-bit vector of [2 x long long]. +/// \returns +/// A 256-bit vector of [4 x long long]. +/// +/// \code{.operation} +/// DEFINE ROR64(qword, n) { +/// count := n % 64 +/// dest := (qword >> count) | (qword << (64 - count)) +/// RETURN dest +/// } +/// DEFINE SHR64(qword, n) { +/// RETURN qword >> n +/// } +/// DEFINE s0(qword): +/// RETURN ROR64(qword,1) ^ ROR64(qword, 8) ^ SHR64(qword, 7) +/// } +/// W[4] := __B.qword[0] +/// W[3] := __A.qword[3] +/// W[2] := __A.qword[2] +/// W[1] := __A.qword[1] +/// W[0] := __A.qword[0] +/// dst.qword[3] := W[3] + s0(W[4]) +/// dst.qword[2] := W[2] + s0(W[3]) +/// dst.qword[1] := W[1] + s0(W[2]) +/// dst.qword[0] := W[0] + s0(W[1]) +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sha512msg1_epi64(__m256i __A, __m128i __B) { + return (__m256i)__builtin_ia32_vsha512msg1((__v4du)__A, (__v2du)__B); +} + +/// This intrinisc is one of the two SHA512 message scheduling instructions. +/// The intrinsic performs the final calculation for the next four SHA512 +/// message qwords. The calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_sha512msg2_epi64(__m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSHA512MSG2 instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x long long]. +/// \param __B +/// A 256-bit vector of [4 x long long]. +/// \returns +/// A 256-bit vector of [4 x long long]. +/// +/// \code{.operation} +/// DEFINE ROR64(qword, n) { +/// count := n % 64 +/// dest := (qword >> count) | (qword << (64 - count)) +/// RETURN dest +/// } +/// DEFINE SHR64(qword, n) { +/// RETURN qword >> n +/// } +/// DEFINE s1(qword) { +/// RETURN ROR64(qword,19) ^ ROR64(qword, 61) ^ SHR64(qword, 6) +/// } +/// W[14] := __B.qword[2] +/// W[15] := __B.qword[3] +/// W[16] := __A.qword[0] + s1(W[14]) +/// W[17] := __A.qword[1] + s1(W[15]) +/// W[18] := __A.qword[2] + s1(W[16]) +/// W[19] := __A.qword[3] + s1(W[17]) +/// dst.qword[3] := W[19] +/// dst.qword[2] := W[18] +/// dst.qword[1] := W[17] +/// dst.qword[0] := W[16] +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sha512msg2_epi64(__m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vsha512msg2((__v4du)__A, (__v4du)__B); +} + +/// This intrinisc performs two rounds of SHA512 operation using initial SHA512 +/// state (C,D,G,H) from \a __A, an initial SHA512 state (A,B,E,F) from +/// \a __A, and a pre-computed sum of the next two round message qwords and +/// the corresponding round constants from \a __C (only the two lower qwords +/// of the third operand). The updated SHA512 state (A,B,E,F) is written to +/// \a __A, and \a __A can be used as the updated state (C,D,G,H) in later +/// rounds. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSHA512RNDS2 instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x long long]. +/// \param __B +/// A 256-bit vector of [4 x long long]. +/// \param __C +/// A 128-bit vector of [2 x long long]. +/// \returns +/// A 256-bit vector of [4 x long long]. +/// +/// \code{.operation} +/// DEFINE ROR64(qword, n) { +/// count := n % 64 +/// dest := (qword >> count) | (qword << (64 - count)) +/// RETURN dest +/// } +/// DEFINE SHR64(qword, n) { +/// RETURN qword >> n +/// } +/// DEFINE cap_sigma0(qword) { +/// RETURN ROR64(qword,28) ^ ROR64(qword, 34) ^ ROR64(qword, 39) +/// } +/// DEFINE cap_sigma1(qword) { +/// RETURN ROR64(qword,14) ^ ROR64(qword, 18) ^ ROR64(qword, 41) +/// } +/// DEFINE MAJ(a,b,c) { +/// RETURN (a & b) ^ (a & c) ^ (b & c) +/// } +/// DEFINE CH(e,f,g) { +/// RETURN (e & f) ^ (g & ~e) +/// } +/// A[0] := __B.qword[3] +/// B[0] := __B.qword[2] +/// C[0] := __C.qword[3] +/// D[0] := __C.qword[2] +/// E[0] := __B.qword[1] +/// F[0] := __B.qword[0] +/// G[0] := __C.qword[1] +/// H[0] := __C.qword[0] +/// WK[0]:= __A.qword[0] +/// WK[1]:= __A.qword[1] +/// FOR i := 0 to 1: +/// A[i+1] := CH(E[i], F[i], G[i]) + +/// cap_sigma1(E[i]) + WK[i] + H[i] + +/// MAJ(A[i], B[i], C[i]) + +/// cap_sigma0(A[i]) +/// B[i+1] := A[i] +/// C[i+1] := B[i] +/// D[i+1] := C[i] +/// E[i+1] := CH(E[i], F[i], G[i]) + +/// cap_sigma1(E[i]) + WK[i] + H[i] + D[i] +/// F[i+1] := E[i] +/// G[i+1] := F[i] +/// H[i+1] := G[i] +/// ENDFOR +/// dst.qword[3] := A[2] +/// dst.qword[2] := B[2] +/// dst.qword[1] := E[2] +/// dst.qword[0] := F[2] +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C) { + return (__m256i)__builtin_ia32_vsha512rnds2((__v4du)__A, (__v4du)__B, + (__v2du)__C); +} + +#undef __DEFAULT_FN_ATTRS256 + +#endif // __SHA512INTRIN_H diff --git a/third_party/clang/lib/clang/17.0.1/include/shaintrin.h b/third_party/clang/lib/clang/17.0.1/include/shaintrin.h new file mode 100644 index 0000000000..232e1fa298 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/shaintrin.h @@ -0,0 +1,189 @@ +/*===---- shaintrin.h - SHA intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __SHAINTRIN_H +#define __SHAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128))) + +/// Performs four iterations of the inner loop of the SHA-1 message digest +/// algorithm using the starting SHA-1 state (A, B, C, D) from the 128-bit +/// vector of [4 x i32] in \a V1 and the next four 32-bit elements of the +/// message from the 128-bit vector of [4 x i32] in \a V2. Note that the +/// SHA-1 state variable E must have already been added to \a V2 +/// (\c _mm_sha1nexte_epu32() can perform this step). Returns the updated +/// SHA-1 state (A, B, C, D) as a 128-bit vector of [4 x i32]. +/// +/// The SHA-1 algorithm has an inner loop of 80 iterations, twenty each +/// with a different combining function and rounding constant. This +/// intrinsic performs four iterations using a combining function and +/// rounding constant selected by \a M[1:0]. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_sha1rnds4_epu32(__m128i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c SHA1RNDS4 instruction. +/// +/// \param V1 +/// A 128-bit vector of [4 x i32] containing the initial SHA-1 state. +/// \param V2 +/// A 128-bit vector of [4 x i32] containing the next four elements of +/// the message, plus SHA-1 state variable E. +/// \param M +/// An immediate value where bits [1:0] select among four possible +/// combining functions and rounding constants (not specified here). +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state. +#define _mm_sha1rnds4_epu32(V1, V2, M) \ + __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)) + +/// Calculates the SHA-1 state variable E from the SHA-1 state variables in +/// the 128-bit vector of [4 x i32] in \a __X, adds that to the next set of +/// four message elements in the 128-bit vector of [4 x i32] in \a __Y, and +/// returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA1NEXTE instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing the current SHA-1 state. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the next four elements of the +/// message. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1nexte_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y); +} + +/// Performs an intermediate calculation for deriving the next four SHA-1 +/// message elements using previous message elements from the 128-bit +/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA1MSG1 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing previous message elements. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing previous message elements. +/// \returns A 128-bit vector of [4 x i32] containing the derived SHA-1 +/// elements. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1msg1_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y); +} + +/// Performs the final calculation for deriving the next four SHA-1 message +/// elements using previous message elements from the 128-bit vectors of +/// [4 x i32] in \a __X and \a __Y, and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA1MSG2 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing an intermediate result. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing previous message values. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1msg2_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y); +} + +/// Performs two rounds of SHA-256 operation using the following inputs: a +/// starting SHA-256 state (C, D, G, H) from the 128-bit vector of +/// [4 x i32] in \a __X; a starting SHA-256 state (A, B, E, F) from the +/// 128-bit vector of [4 x i32] in \a __Y; and a pre-computed sum of the +/// next two message elements (unsigned 32-bit integers) and corresponding +/// rounding constants from the 128-bit vector of [4 x i32] in \a __Z. +/// Returns the updated SHA-256 state (A, B, E, F) as a 128-bit vector of +/// [4 x i32]. +/// +/// The SHA-256 algorithm has a core loop of 64 iterations. This intrinsic +/// performs two of those iterations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA256RNDS2 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256 +/// state. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256 +/// state. +/// \param __Z +/// A 128-bit vector of [4 x i32] containing additional input to the +/// SHA-256 operation. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z); +} + +/// Performs an intermediate calculation for deriving the next four SHA-256 +/// message elements using previous message elements from the 128-bit +/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA256MSG1 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing previous message elements. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing previous message elements. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256 +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256msg1_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y); +} + +/// Performs the final calculation for deriving the next four SHA-256 message +/// elements using previous message elements from the 128-bit vectors of +/// [4 x i32] in \a __X and \a __Y, and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA256MSG2 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing an intermediate result. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing previous message values. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256 +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256msg2_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __SHAINTRIN_H */ diff --git a/third_party/clang/lib/clang/17.0.1/include/sifive_vector.h b/third_party/clang/lib/clang/17.0.1/include/sifive_vector.h new file mode 100644 index 0000000000..42d7224db6 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/sifive_vector.h @@ -0,0 +1,16 @@ +//===----- sifive_vector.h - SiFive Vector definitions --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _SIFIVE_VECTOR_H_ +#define _SIFIVE_VECTOR_H_ + +#include "riscv_vector.h" + +#pragma clang riscv intrinsic sifive_vector + +#endif //_SIFIVE_VECTOR_H_ diff --git a/third_party/clang/lib/clang/17.0.1/include/sm3intrin.h b/third_party/clang/lib/clang/17.0.1/include/sm3intrin.h new file mode 100644 index 0000000000..8a3d8bc9ef --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/sm3intrin.h @@ -0,0 +1,238 @@ +/*===-------------------- sm3intrin.h - SM3 intrinsics ---------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __SM3INTRIN_H +#define __SM3INTRIN_H + +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("sm3"), \ + __min_vector_width__(128))) + +/// This intrinisc is one of the two SM3 message scheduling intrinsics. The +/// intrinsic performs an initial calculation for the next four SM3 message +/// words. The calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_sm3msg1_epi32(__m128i __A, __m128i __B, __m128i __C) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM3MSG1 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \param __C +/// A 128-bit vector of [4 x int]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32 - count)) +/// RETURN dest +/// } +/// DEFINE P1(x) { +/// RETURN x ^ ROL32(x, 15) ^ ROL32(x, 23) +/// } +/// W[0] := __C.dword[0] +/// W[1] := __C.dword[1] +/// W[2] := __C.dword[2] +/// W[3] := __C.dword[3] +/// W[7] := __A.dword[0] +/// W[8] := __A.dword[1] +/// W[9] := __A.dword[2] +/// W[10] := __A.dword[3] +/// W[13] := __B.dword[0] +/// W[14] := __B.dword[1] +/// W[15] := __B.dword[2] +/// TMP0 := W[7] ^ W[0] ^ ROL32(W[13], 15) +/// TMP1 := W[8] ^ W[1] ^ ROL32(W[14], 15) +/// TMP2 := W[9] ^ W[2] ^ ROL32(W[15], 15) +/// TMP3 := W[10] ^ W[3] +/// dst.dword[0] := P1(TMP0) +/// dst.dword[1] := P1(TMP1) +/// dst.dword[2] := P1(TMP2) +/// dst.dword[3] := P1(TMP3) +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg1_epi32(__m128i __A, + __m128i __B, + __m128i __C) { + return (__m128i)__builtin_ia32_vsm3msg1((__v4su)__A, (__v4su)__B, + (__v4su)__C); +} + +/// This intrinisc is one of the two SM3 message scheduling intrinsics. The +/// intrinsic performs the final calculation for the next four SM3 message +/// words. The calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_sm3msg2_epi32(__m128i __A, __m128i __B, __m128i __C) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM3MSG2 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \param __C +/// A 128-bit vector of [4 x int]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// WTMP[0] := __A.dword[0] +/// WTMP[1] := __A.dword[1] +/// WTMP[2] := __A.dword[2] +/// WTMP[3] := __A.dword[3] +/// W[3] := __B.dword[0] +/// W[4] := __B.dword[1] +/// W[5] := __B.dword[2] +/// W[6] := __B.dword[3] +/// W[10] := __C.dword[0] +/// W[11] := __C.dword[1] +/// W[12] := __C.dword[2] +/// W[13] := __C.dword[3] +/// W[16] := ROL32(W[3], 7) ^ W[10] ^ WTMP[0] +/// W[17] := ROL32(W[4], 7) ^ W[11] ^ WTMP[1] +/// W[18] := ROL32(W[5], 7) ^ W[12] ^ WTMP[2] +/// W[19] := ROL32(W[6], 7) ^ W[13] ^ WTMP[3] +/// W[19] := W[19] ^ ROL32(W[16], 6) ^ ROL32(W[16], 15) ^ ROL32(W[16], 30) +/// dst.dword[0] := W[16] +/// dst.dword[1] := W[17] +/// dst.dword[2] := W[18] +/// dst.dword[3] := W[19] +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg2_epi32(__m128i __A, + __m128i __B, + __m128i __C) { + return (__m128i)__builtin_ia32_vsm3msg2((__v4su)__A, (__v4su)__B, + (__v4su)__C); +} + +/// This intrinsic performs two rounds of SM3 operation using initial SM3 state +/// (C, D, G, H) from \a __A, an initial SM3 states (A, B, E, F) +/// from \a __B and a pre-computed words from the \a __C. \a __A with +/// initial SM3 state of (C, D, G, H) assumes input of non-rotated left +/// variables from previous state. The updated SM3 state (A, B, E, F) is +/// written to \a __A. The \a imm8 should contain the even round number +/// for the first of the two rounds computed by this instruction. The +/// computation masks the \a imm8 value by AND’ing it with 0x3E so that only +/// even round numbers from 0 through 62 are used for this operation. The +/// calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_sm3rnds2_epi32(__m128i __A, __m128i __B, __m128i __C, const int +/// imm8) \endcode +/// +/// This intrinsic corresponds to the \c VSM3RNDS2 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \param __C +/// A 128-bit vector of [4 x int]. +/// \param imm8 +/// A 8-bit constant integer. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE P0(dword) { +/// RETURN dword ^ ROL32(dword, 9) ^ ROL32(dword, 17) +/// } +/// DEFINE FF(x,y,z, round){ +/// IF round < 16 +/// RETURN (x ^ y ^ z) +/// ELSE +/// RETURN (x & y) | (x & z) | (y & z) +/// FI +/// } +/// DEFINE GG(x, y, z, round){ +/// IF round < 16 +/// RETURN (x ^ y ^ z) +/// ELSE +/// RETURN (x & y) | (~x & z) +/// FI +/// } +/// A[0] := __B.dword[3] +/// B[0] := __B.dword[2] +/// C[0] := __A.dword[3] +/// D[0] := __A.dword[2] +/// E[0] := __B.dword[1] +/// F[0] := __B.dword[0] +/// G[0] := __A.dword[1] +/// H[0] := __A.dword[0] +/// W[0] := __C.dword[0] +/// W[1] := __C.dword[1] +/// W[4] := __C.dword[2] +/// W[5] := __C.dword[3] +/// C[0] := ROL32(C[0], 9) +/// D[0] := ROL32(D[0], 9) +/// G[0] := ROL32(G[0], 19) +/// H[0] := ROL32(H[0], 19) +/// ROUND := __D & 0x3E +/// IF ROUND < 16 +/// CONST := 0x79CC4519 +/// ELSE +/// CONST := 0x7A879D8A +/// FI +/// CONST := ROL32(CONST,ROUND) +/// FOR i:= 0 to 1 +/// S1 := ROL32((ROL32(A[i], 12) + E[i] + CONST), 7) +/// S2 := S1 ^ ROL32(A[i], 12) +/// T1 := FF(A[i], B[i], C[i], ROUND) + D[i] + S2 + (W[i] ^ W[i+4]) +/// T2 := GG(E[i], F[i], G[i], ROUND) + H[i] + S1 + W[i] +/// D[i+1] := C[i] +/// C[i+1] := ROL32(B[i],9) +/// B[i+1] := A[i] +/// A[i+1] := T1 +/// H[i+1] := G[i] +/// G[i+1] := ROL32(F[i], 19) +/// F[i+1] := E[i] +/// E[i+1] := P0(T2) +/// CONST := ROL32(CONST, 1) +/// ENDFOR +/// dst.dword[3] := A[2] +/// dst.dword[2] := B[2] +/// dst.dword[1] := E[2] +/// dst.dword[0] := F[2] +/// dst[MAX:128] := 0 +/// \endcode +#define _mm_sm3rnds2_epi32(A, B, C, D) \ + (__m128i) __builtin_ia32_vsm3rnds2((__v4su)A, (__v4su)B, (__v4su)C, (int)D) + +#undef __DEFAULT_FN_ATTRS128 + +#endif // __SM3INTRIN_H diff --git a/third_party/clang/lib/clang/17.0.1/include/sm4intrin.h b/third_party/clang/lib/clang/17.0.1/include/sm4intrin.h new file mode 100644 index 0000000000..47aeec46a6 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/sm4intrin.h @@ -0,0 +1,269 @@ +/*===--------------- sm4intrin.h - SM4 intrinsics -----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __SM4INTRIN_H +#define __SM4INTRIN_H + +/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic +/// operates on independent 128-bit lanes. The calculated results are +/// stored in \a dst. +/// \headerfile +/// +/// \code +/// __m128i _mm_sm4key4_epi32(__m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM4KEY4 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE SBOX_BYTE(dword, i) { +/// RETURN sbox[dword.byte[i]] +/// } +/// DEFINE lower_t(dword) { +/// tmp.byte[0] := SBOX_BYTE(dword, 0) +/// tmp.byte[1] := SBOX_BYTE(dword, 1) +/// tmp.byte[2] := SBOX_BYTE(dword, 2) +/// tmp.byte[3] := SBOX_BYTE(dword, 3) +/// RETURN tmp +/// } +/// DEFINE L_KEY(dword) { +/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23) +/// } +/// DEFINE T_KEY(dword) { +/// RETURN L_KEY(lower_t(dword)) +/// } +/// DEFINE F_KEY(X0, X1, X2, X3, round_key) { +/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key) +/// } +/// FOR i:= 0 to 0 +/// P[0] := __B.xmm[i].dword[0] +/// P[1] := __B.xmm[i].dword[1] +/// P[2] := __B.xmm[i].dword[2] +/// P[3] := __B.xmm[i].dword[3] +/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0]) +/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1]) +/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2]) +/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3]) +/// DEST.xmm[i].dword[0] := C[0] +/// DEST.xmm[i].dword[1] := C[1] +/// DEST.xmm[i].dword[2] := C[2] +/// DEST.xmm[i].dword[3] := C[3] +/// ENDFOR +/// DEST[MAX:128] := 0 +/// \endcode +#define _mm_sm4key4_epi32(A, B) \ + (__m128i) __builtin_ia32_vsm4key4128((__v4su)A, (__v4su)B) + +/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic +/// operates on independent 128-bit lanes. The calculated results are +/// stored in \a dst. +/// \headerfile +/// +/// \code +/// __m256i _mm256_sm4key4_epi32(__m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM4KEY4 instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x int]. +/// \param __B +/// A 256-bit vector of [8 x int]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE SBOX_BYTE(dword, i) { +/// RETURN sbox[dword.byte[i]] +/// } +/// DEFINE lower_t(dword) { +/// tmp.byte[0] := SBOX_BYTE(dword, 0) +/// tmp.byte[1] := SBOX_BYTE(dword, 1) +/// tmp.byte[2] := SBOX_BYTE(dword, 2) +/// tmp.byte[3] := SBOX_BYTE(dword, 3) +/// RETURN tmp +/// } +/// DEFINE L_KEY(dword) { +/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23) +/// } +/// DEFINE T_KEY(dword) { +/// RETURN L_KEY(lower_t(dword)) +/// } +/// DEFINE F_KEY(X0, X1, X2, X3, round_key) { +/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key) +/// } +/// FOR i:= 0 to 1 +/// P[0] := __B.xmm[i].dword[0] +/// P[1] := __B.xmm[i].dword[1] +/// P[2] := __B.xmm[i].dword[2] +/// P[3] := __B.xmm[i].dword[3] +/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0]) +/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1]) +/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2]) +/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3]) +/// DEST.xmm[i].dword[0] := C[0] +/// DEST.xmm[i].dword[1] := C[1] +/// DEST.xmm[i].dword[2] := C[2] +/// DEST.xmm[i].dword[3] := C[3] +/// ENDFOR +/// DEST[MAX:256] := 0 +/// \endcode +#define _mm256_sm4key4_epi32(A, B) \ + (__m256i) __builtin_ia32_vsm4key4256((__v8su)A, (__v8su)B) + +/// This intrinisc performs four rounds of SM4 encryption. The intrinisc +/// operates on independent 128-bit lanes. The calculated results are +/// stored in \a dst. +/// \headerfile +/// +/// \code +/// __m128i _mm_sm4rnds4_epi32(__m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM4RNDS4 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE lower_t(dword) { +/// tmp.byte[0] := SBOX_BYTE(dword, 0) +/// tmp.byte[1] := SBOX_BYTE(dword, 1) +/// tmp.byte[2] := SBOX_BYTE(dword, 2) +/// tmp.byte[3] := SBOX_BYTE(dword, 3) +/// RETURN tmp +/// } +/// DEFINE L_RND(dword) { +/// tmp := dword +/// tmp := tmp ^ ROL32(dword, 2) +/// tmp := tmp ^ ROL32(dword, 10) +/// tmp := tmp ^ ROL32(dword, 18) +/// tmp := tmp ^ ROL32(dword, 24) +/// RETURN tmp +/// } +/// DEFINE T_RND(dword) { +/// RETURN L_RND(lower_t(dword)) +/// } +/// DEFINE F_RND(X0, X1, X2, X3, round_key) { +/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key) +/// } +/// FOR i:= 0 to 0 +/// P[0] := __B.xmm[i].dword[0] +/// P[1] := __B.xmm[i].dword[1] +/// P[2] := __B.xmm[i].dword[2] +/// P[3] := __B.xmm[i].dword[3] +/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0]) +/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1]) +/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2]) +/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3]) +/// DEST.xmm[i].dword[0] := C[0] +/// DEST.xmm[i].dword[1] := C[1] +/// DEST.xmm[i].dword[2] := C[2] +/// DEST.xmm[i].dword[3] := C[3] +/// ENDFOR +/// DEST[MAX:128] := 0 +/// \endcode +#define _mm_sm4rnds4_epi32(A, B) \ + (__m128i) __builtin_ia32_vsm4rnds4128((__v4su)A, (__v4su)B) + +/// This intrinisc performs four rounds of SM4 encryption. The intrinisc +/// operates on independent 128-bit lanes. The calculated results are +/// stored in \a dst. +/// \headerfile +/// +/// \code +/// __m256i _mm256_sm4rnds4_epi32(__m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM4RNDS4 instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x int]. +/// \param __B +/// A 256-bit vector of [8 x int]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE lower_t(dword) { +/// tmp.byte[0] := SBOX_BYTE(dword, 0) +/// tmp.byte[1] := SBOX_BYTE(dword, 1) +/// tmp.byte[2] := SBOX_BYTE(dword, 2) +/// tmp.byte[3] := SBOX_BYTE(dword, 3) +/// RETURN tmp +/// } +/// DEFINE L_RND(dword) { +/// tmp := dword +/// tmp := tmp ^ ROL32(dword, 2) +/// tmp := tmp ^ ROL32(dword, 10) +/// tmp := tmp ^ ROL32(dword, 18) +/// tmp := tmp ^ ROL32(dword, 24) +/// RETURN tmp +/// } +/// DEFINE T_RND(dword) { +/// RETURN L_RND(lower_t(dword)) +/// } +/// DEFINE F_RND(X0, X1, X2, X3, round_key) { +/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key) +/// } +/// FOR i:= 0 to 0 +/// P[0] := __B.xmm[i].dword[0] +/// P[1] := __B.xmm[i].dword[1] +/// P[2] := __B.xmm[i].dword[2] +/// P[3] := __B.xmm[i].dword[3] +/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0]) +/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1]) +/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2]) +/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3]) +/// DEST.xmm[i].dword[0] := C[0] +/// DEST.xmm[i].dword[1] := C[1] +/// DEST.xmm[i].dword[2] := C[2] +/// DEST.xmm[i].dword[3] := C[3] +/// ENDFOR +/// DEST[MAX:256] := 0 +/// \endcode +#define _mm256_sm4rnds4_epi32(A, B) \ + (__m256i) __builtin_ia32_vsm4rnds4256((__v8su)A, (__v8su)B) + +#endif // __SM4INTRIN_H diff --git a/third_party/clang/lib/clang/16.0.0/include/smmintrin.h b/third_party/clang/lib/clang/17.0.1/include/smmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/smmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/smmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/stdalign.h b/third_party/clang/lib/clang/17.0.1/include/stdalign.h similarity index 63% rename from third_party/clang/lib/clang/16.0.0/include/stdalign.h rename to third_party/clang/lib/clang/17.0.1/include/stdalign.h index 6ad25db453..8ae6e658dd 100644 --- a/third_party/clang/lib/clang/16.0.0/include/stdalign.h +++ b/third_party/clang/lib/clang/17.0.1/include/stdalign.h @@ -10,6 +10,10 @@ #ifndef __STDALIGN_H #define __STDALIGN_H +/* FIXME: This is using the placeholder dates Clang produces for these macros + in C2x mode; switch to the correct values once they've been published. */ +#if defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202000L) #ifndef __cplusplus #define alignas _Alignas #define alignof _Alignof @@ -17,5 +21,6 @@ #define __alignas_is_defined 1 #define __alignof_is_defined 1 +#endif /* __STDC_VERSION__ */ #endif /* __STDALIGN_H */ diff --git a/third_party/clang/lib/clang/16.0.0/include/stdarg.h b/third_party/clang/lib/clang/17.0.1/include/stdarg.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/stdarg.h rename to third_party/clang/lib/clang/17.0.1/include/stdarg.h diff --git a/third_party/clang/lib/clang/16.0.0/include/stdatomic.h b/third_party/clang/lib/clang/17.0.1/include/stdatomic.h similarity index 94% rename from third_party/clang/lib/clang/16.0.0/include/stdatomic.h rename to third_party/clang/lib/clang/17.0.1/include/stdatomic.h index 0f893beea6..aed33d4333 100644 --- a/third_party/clang/lib/clang/16.0.0/include/stdatomic.h +++ b/third_party/clang/lib/clang/17.0.1/include/stdatomic.h @@ -45,9 +45,16 @@ extern "C" { #define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE /* 7.17.2 Initialization */ - +/* FIXME: This is using the placeholder dates Clang produces for these macros + in C2x mode; switch to the correct values once they've been published. */ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202000L) || \ + defined(__cplusplus) +/* ATOMIC_VAR_INIT was removed in C2x, but still remains in C++23. */ #define ATOMIC_VAR_INIT(value) (value) -#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L) || \ +#endif + +#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L && \ + __STDC_VERSION__ < 202000L) || \ (defined(__cplusplus) && __cplusplus >= 202002L)) && \ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS) /* ATOMIC_VAR_INIT was deprecated in C17 and C++20. */ diff --git a/third_party/clang/lib/clang/16.0.0/include/stdbool.h b/third_party/clang/lib/clang/17.0.1/include/stdbool.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/stdbool.h rename to third_party/clang/lib/clang/17.0.1/include/stdbool.h diff --git a/third_party/clang/lib/clang/16.0.0/include/stddef.h b/third_party/clang/lib/clang/17.0.1/include/stddef.h similarity index 94% rename from third_party/clang/lib/clang/16.0.0/include/stddef.h rename to third_party/clang/lib/clang/17.0.1/include/stddef.h index 42815176dc..539541f0ed 100644 --- a/third_party/clang/lib/clang/16.0.0/include/stddef.h +++ b/third_party/clang/lib/clang/17.0.1/include/stddef.h @@ -103,6 +103,11 @@ using ::std::nullptr_t; typedef typeof(nullptr) nullptr_t; #endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000L */ +#if defined(__need_STDDEF_H_misc) && defined(__STDC_VERSION__) && \ + __STDC_VERSION__ >= 202000L +#define unreachable() __builtin_unreachable() +#endif /* defined(__need_STDDEF_H_misc) && >= C23 */ + #if defined(__need_STDDEF_H_misc) #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ (defined(__cplusplus) && __cplusplus >= 201103L) diff --git a/third_party/clang/lib/clang/16.0.0/include/stdint.h b/third_party/clang/lib/clang/17.0.1/include/stdint.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/stdint.h rename to third_party/clang/lib/clang/17.0.1/include/stdint.h diff --git a/third_party/clang/lib/clang/16.0.0/include/stdnoreturn.h b/third_party/clang/lib/clang/17.0.1/include/stdnoreturn.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/stdnoreturn.h rename to third_party/clang/lib/clang/17.0.1/include/stdnoreturn.h diff --git a/third_party/clang/lib/clang/16.0.0/include/tbmintrin.h b/third_party/clang/lib/clang/17.0.1/include/tbmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/tbmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/tbmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/tgmath.h b/third_party/clang/lib/clang/17.0.1/include/tgmath.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/tgmath.h rename to third_party/clang/lib/clang/17.0.1/include/tgmath.h diff --git a/third_party/clang/lib/clang/16.0.0/include/tmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/tmmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/tmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/tmmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/tsxldtrkintrin.h b/third_party/clang/lib/clang/17.0.1/include/tsxldtrkintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/tsxldtrkintrin.h rename to third_party/clang/lib/clang/17.0.1/include/tsxldtrkintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/uintrintrin.h b/third_party/clang/lib/clang/17.0.1/include/uintrintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/uintrintrin.h rename to third_party/clang/lib/clang/17.0.1/include/uintrintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/unwind.h b/third_party/clang/lib/clang/17.0.1/include/unwind.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/unwind.h rename to third_party/clang/lib/clang/17.0.1/include/unwind.h diff --git a/third_party/clang/lib/clang/16.0.0/include/vadefs.h b/third_party/clang/lib/clang/17.0.1/include/vadefs.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/vadefs.h rename to third_party/clang/lib/clang/17.0.1/include/vadefs.h diff --git a/third_party/clang/lib/clang/16.0.0/include/vaesintrin.h b/third_party/clang/lib/clang/17.0.1/include/vaesintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/vaesintrin.h rename to third_party/clang/lib/clang/17.0.1/include/vaesintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/varargs.h b/third_party/clang/lib/clang/17.0.1/include/varargs.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/varargs.h rename to third_party/clang/lib/clang/17.0.1/include/varargs.h diff --git a/third_party/clang/lib/clang/16.0.0/include/vecintrin.h b/third_party/clang/lib/clang/17.0.1/include/vecintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/vecintrin.h rename to third_party/clang/lib/clang/17.0.1/include/vecintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/velintrin.h b/third_party/clang/lib/clang/17.0.1/include/velintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/velintrin.h rename to third_party/clang/lib/clang/17.0.1/include/velintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/velintrin_approx.h b/third_party/clang/lib/clang/17.0.1/include/velintrin_approx.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/velintrin_approx.h rename to third_party/clang/lib/clang/17.0.1/include/velintrin_approx.h diff --git a/third_party/clang/lib/clang/16.0.0/include/velintrin_gen.h b/third_party/clang/lib/clang/17.0.1/include/velintrin_gen.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/velintrin_gen.h rename to third_party/clang/lib/clang/17.0.1/include/velintrin_gen.h diff --git a/third_party/clang/lib/clang/16.0.0/include/vpclmulqdqintrin.h b/third_party/clang/lib/clang/17.0.1/include/vpclmulqdqintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/vpclmulqdqintrin.h rename to third_party/clang/lib/clang/17.0.1/include/vpclmulqdqintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/waitpkgintrin.h b/third_party/clang/lib/clang/17.0.1/include/waitpkgintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/waitpkgintrin.h rename to third_party/clang/lib/clang/17.0.1/include/waitpkgintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/wasm_simd128.h b/third_party/clang/lib/clang/17.0.1/include/wasm_simd128.h similarity index 93% rename from third_party/clang/lib/clang/16.0.0/include/wasm_simd128.h rename to third_party/clang/lib/clang/17.0.1/include/wasm_simd128.h index f93de129f9..2327bec525 100644 --- a/third_party/clang/lib/clang/16.0.0/include/wasm_simd128.h +++ b/third_party/clang/lib/clang/17.0.1/include/wasm_simd128.h @@ -961,17 +961,17 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) { static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a, uint32_t __b) { - return (v128_t)((__i8x16)__a << __b); + return (v128_t)((__i8x16)__a << (__b & 0x7)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a, uint32_t __b) { - return (v128_t)((__i8x16)__a >> __b); + return (v128_t)((__i8x16)__a >> (__b & 0x7)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a, uint32_t __b) { - return (v128_t)((__u8x16)__a >> __b); + return (v128_t)((__u8x16)__a >> (__b & 0x7)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a, @@ -1047,17 +1047,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) { static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a, uint32_t __b) { - return (v128_t)((__i16x8)__a << __b); + return (v128_t)((__i16x8)__a << (__b & 0xF)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a, uint32_t __b) { - return (v128_t)((__i16x8)__a >> __b); + return (v128_t)((__i16x8)__a >> (__b & 0xF)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a, uint32_t __b) { - return (v128_t)((__u16x8)__a >> __b); + return (v128_t)((__u16x8)__a >> (__b & 0xF)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a, @@ -1138,17 +1138,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) { static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a, uint32_t __b) { - return (v128_t)((__i32x4)__a << __b); + return (v128_t)((__i32x4)__a << (__b & 0x1F)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a, uint32_t __b) { - return (v128_t)((__i32x4)__a >> __b); + return (v128_t)((__i32x4)__a >> (__b & 0x1F)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a, uint32_t __b) { - return (v128_t)((__u32x4)__a >> __b); + return (v128_t)((__u32x4)__a >> (__b & 0x1F)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a, @@ -1209,17 +1209,17 @@ static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) { static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a, uint32_t __b) { - return (v128_t)((__i64x2)__a << (int64_t)__b); + return (v128_t)((__i64x2)__a << ((int64_t)__b & 0x3F)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a, uint32_t __b) { - return (v128_t)((__i64x2)__a >> (int64_t)__b); + return (v128_t)((__i64x2)__a >> ((int64_t)__b & 0x3F)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a, uint32_t __b) { - return (v128_t)((__u64x2)__a >> (int64_t)__b); + return (v128_t)((__u64x2)__a >> ((int64_t)__b & 0x3F)); } static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a, @@ -1760,6 +1760,126 @@ wasm_u64x2_load_32x2(const void *__mem) { __DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle") \ wasm_i64x2_shuffle(__a, __b, __c0, __c1) +// Relaxed SIMD intrinsics + +#define __RELAXED_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("relaxed-simd"), \ + __min_vector_width__(128))) + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_f32x4_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) { + return (v128_t)__builtin_wasm_relaxed_madd_f32x4((__f32x4)__a, (__f32x4)__b, + (__f32x4)__c); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_f32x4_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) { + return (v128_t)__builtin_wasm_relaxed_nmadd_f32x4((__f32x4)__a, (__f32x4)__b, + (__f32x4)__c); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_f64x2_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) { + return (v128_t)__builtin_wasm_relaxed_madd_f64x2((__f64x2)__a, (__f64x2)__b, + (__f64x2)__c); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_f64x2_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) { + return (v128_t)__builtin_wasm_relaxed_nmadd_f64x2((__f64x2)__a, (__f64x2)__b, + (__f64x2)__c); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i8x16_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) { + return (v128_t)__builtin_wasm_relaxed_laneselect_i8x16( + (__i8x16)__a, (__i8x16)__b, (__i8x16)__m); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i16x8_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) { + return (v128_t)__builtin_wasm_relaxed_laneselect_i16x8( + (__i16x8)__a, (__i16x8)__b, (__i16x8)__m); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i32x4_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) { + return (v128_t)__builtin_wasm_relaxed_laneselect_i32x4( + (__i32x4)__a, (__i32x4)__b, (__i32x4)__m); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i64x2_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) { + return (v128_t)__builtin_wasm_relaxed_laneselect_i64x2( + (__i64x2)__a, (__i64x2)__b, (__i64x2)__m); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i8x16_relaxed_swizzle(v128_t __a, v128_t __s) { + return (v128_t)__builtin_wasm_relaxed_swizzle_i8x16((__i8x16)__a, + (__i8x16)__s); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_relaxed_min_f32x4((__f32x4)__a, (__f32x4)__b); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_relaxed_max_f32x4((__f32x4)__a, (__f32x4)__b); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_min(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_relaxed_min_f64x2((__f64x2)__a, (__f64x2)__b); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_max(v128_t __a, + v128_t __b) { + return (v128_t)__builtin_wasm_relaxed_max_f64x2((__f64x2)__a, (__f64x2)__b); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i32x4_relaxed_trunc_f32x4(v128_t __a) { + return (v128_t)__builtin_wasm_relaxed_trunc_s_i32x4_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_u32x4_relaxed_trunc_f32x4(v128_t __a) { + return (v128_t)__builtin_wasm_relaxed_trunc_u_i32x4_f32x4((__f32x4)__a); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i32x4_relaxed_trunc_f64x2_zero(v128_t __a) { + return (v128_t)__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2((__f64x2)__a); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_u32x4_relaxed_trunc_f64x2_zero(v128_t __a) { + return (v128_t)__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2((__f64x2)__a); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i16x8_relaxed_q15mulr(v128_t __a, v128_t __b) { + return (v128_t)__builtin_wasm_relaxed_q15mulr_s_i16x8((__i16x8)__a, + (__i16x8)__b); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i16x8_relaxed_dot_i8x16_i7x16(v128_t __a, v128_t __b) { + return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8((__i8x16)__a, + (__i8x16)__b); +} + +static __inline__ v128_t __RELAXED_FN_ATTRS +wasm_i32x4_relaxed_dot_i8x16_i7x16_add(v128_t __a, v128_t __b, v128_t __c) { + return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4( + (__i8x16)__a, (__i8x16)__b, (__i32x4)__c); +} + +// Deprecated intrinsics + static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_swizzle") wasm_v8x16_swizzle(v128_t __a, v128_t __b) { return wasm_i8x16_swizzle(__a, __b); diff --git a/third_party/clang/lib/clang/16.0.0/include/wbnoinvdintrin.h b/third_party/clang/lib/clang/17.0.1/include/wbnoinvdintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/wbnoinvdintrin.h rename to third_party/clang/lib/clang/17.0.1/include/wbnoinvdintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/wmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/wmmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/wmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/wmmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/x86gprintrin.h b/third_party/clang/lib/clang/17.0.1/include/x86gprintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/x86gprintrin.h rename to third_party/clang/lib/clang/17.0.1/include/x86gprintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/x86intrin.h b/third_party/clang/lib/clang/17.0.1/include/x86intrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/x86intrin.h rename to third_party/clang/lib/clang/17.0.1/include/x86intrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/xmmintrin.h b/third_party/clang/lib/clang/17.0.1/include/xmmintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/xmmintrin.h rename to third_party/clang/lib/clang/17.0.1/include/xmmintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/xopintrin.h b/third_party/clang/lib/clang/17.0.1/include/xopintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/xopintrin.h rename to third_party/clang/lib/clang/17.0.1/include/xopintrin.h diff --git a/third_party/clang/lib/clang/17.0.1/include/xsavecintrin.h b/third_party/clang/lib/clang/17.0.1/include/xsavecintrin.h new file mode 100644 index 0000000000..1f2d001207 --- /dev/null +++ b/third_party/clang/lib/clang/17.0.1/include/xsavecintrin.h @@ -0,0 +1,84 @@ +/*===---- xsavecintrin.h - XSAVEC intrinsic --------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVECINTRIN_H +#define __XSAVECINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec"))) + +/// Performs a full or partial save of processor state to the memory at +/// \a __p. The exact state saved depends on the 64-bit mask \a __m and +/// processor control register \c XCR0. +/// +/// \code{.operation} +/// mask[62:0] := __m[62:0] AND XCR0[62:0] +/// FOR i := 0 TO 62 +/// IF mask[i] == 1 +/// CASE (i) OF +/// 0: save X87 FPU state +/// 1: save SSE state +/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i] +/// FI +/// ENDFOR +/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c XSAVEC instruction. +/// +/// \param __p +/// Pointer to the save area; must be 64-byte aligned. +/// \param __m +/// A 64-bit mask indicating what state should be saved. +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec(__p, __m); +} + +#ifdef __x86_64__ +/// Performs a full or partial save of processor state to the memory at +/// \a __p. The exact state saved depends on the 64-bit mask \a __m and +/// processor control register \c XCR0. +/// +/// \code{.operation} +/// mask[62:0] := __m[62:0] AND XCR0[62:0] +/// FOR i := 0 TO 62 +/// IF mask[i] == 1 +/// CASE (i) OF +/// 0: save X87 FPU state +/// 1: save SSE state +/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i] +/// FI +/// ENDFOR +/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c XSAVEC64 instruction. +/// +/// \param __p +/// Pointer to the save area; must be 64-byte aligned. +/// \param __m +/// A 64-bit mask indicating what state should be saved. +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec64(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/clang/lib/clang/16.0.0/include/xsaveintrin.h b/third_party/clang/lib/clang/17.0.1/include/xsaveintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/xsaveintrin.h rename to third_party/clang/lib/clang/17.0.1/include/xsaveintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/xsaveoptintrin.h b/third_party/clang/lib/clang/17.0.1/include/xsaveoptintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/xsaveoptintrin.h rename to third_party/clang/lib/clang/17.0.1/include/xsaveoptintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/xsavesintrin.h b/third_party/clang/lib/clang/17.0.1/include/xsavesintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/xsavesintrin.h rename to third_party/clang/lib/clang/17.0.1/include/xsavesintrin.h diff --git a/third_party/clang/lib/clang/16.0.0/include/xtestintrin.h b/third_party/clang/lib/clang/17.0.1/include/xtestintrin.h similarity index 100% rename from third_party/clang/lib/clang/16.0.0/include/xtestintrin.h rename to third_party/clang/lib/clang/17.0.1/include/xtestintrin.h diff --git a/third_party/jedi_deps/jedi b/third_party/jedi_deps/jedi index eaab706038..4e175ca82b 160000 --- a/third_party/jedi_deps/jedi +++ b/third_party/jedi_deps/jedi @@ -1 +1 @@ -Subproject commit eaab7060388c8f438f1dbe6502e47c6639aa9bac +Subproject commit 4e175ca82bbd680cb90f462545c10a8b1c0b5f2e diff --git a/third_party/tern_runtime/package.json b/third_party/tern_runtime/package.json index 9c902edc8a..32513f924c 100644 --- a/third_party/tern_runtime/package.json +++ b/third_party/tern_runtime/package.json @@ -1,6 +1,6 @@ { "description": "ycmd tern runtime area with required tern version and plugins", "dependencies": { - "tern": "0.21.0" + "tern": "0.24.3" } } diff --git a/third_party/tsserver/package.json b/third_party/tsserver/package.json index 3dc6d6c18a..6e44a76e2d 100644 --- a/third_party/tsserver/package.json +++ b/third_party/tsserver/package.json @@ -1,6 +1,6 @@ { - "description": "ycmd tern runtime area with required typescript version and plugins", + "description": "ycmd tsserver runtime area with required typescript version and plugins", "dependencies": { - "typescript": "4.7.4" + "typescript": "5.1.6" } } diff --git a/ycmd/completers/cpp/clangd_completer.py b/ycmd/completers/cpp/clangd_completer.py index 2cfc3c33d9..2fe9d690e4 100644 --- a/ycmd/completers/cpp/clangd_completer.py +++ b/ycmd/completers/cpp/clangd_completer.py @@ -34,7 +34,7 @@ PathsToAllParentFolders, re ) -# NOTES: We currently bundle 14.0.0, but as this is very new, we still allow the +# NOTES: We currently bundle 17.0.1, but as this is very new, we still allow the # use of earlier version to avoid breaking users who have set # g:ycm_clangd_binary_path. In general, we should only update this if we make # changes to this CLangdCompleter that would not be backward compatible. @@ -313,6 +313,9 @@ def GetCustomSubcommands( self ): def GoToAlternateFile( self, request_data ): + if not self.ServerIsReady(): + raise RuntimeError( 'Server is initializing. Please wait.' ) + request_id = self.GetConnection().NextRequestId() uri = lsp.FilePathToUri( request_data[ 'filepath' ] ) request = lsp.BuildRequest( request_id, diff --git a/ycmd/completers/cs/solutiondetection.py b/ycmd/completers/cs/solutiondetection.py index e136306dd1..8b13035a00 100644 --- a/ycmd/completers/cs/solutiondetection.py +++ b/ycmd/completers/cs/solutiondetection.py @@ -93,7 +93,7 @@ def _SolutionTestCheckHeuristics( candidates, tokens, i ): # 1. is there a solution named just like the subdirectory with the source? if ( not selection and i < len( tokens ) - 1 and f'{ tokens[ i + 1 ] }.sln' in candidates ): - selection = os.path.join( path, f'{ tokens[ i + 1] }.sln' ) + selection = os.path.join( path, f'{ tokens[ i + 1 ] }.sln' ) LOGGER.info( 'Selected solution file %s as it matches source subfolder', selection ) diff --git a/ycmd/completers/go/go_completer.py b/ycmd/completers/go/go_completer.py index a415a3744b..50990a1df8 100644 --- a/ycmd/completers/go/go_completer.py +++ b/ycmd/completers/go/go_completer.py @@ -78,25 +78,6 @@ def SupportedFiletypes( self ): return [ 'go' ] - def _SetUpSemanticTokenAtlas( self, capabilities: dict ): - if 'semanticTokensProvider' not in capabilities: - # gopls is broken and doesn't provide a legend, instead assuming the - # tokens specified by the client are the legend. This is broken, but - # easily worked around: - # - # https://github.com/golang/go/issues/54531 - import ycmd.completers.language_server.language_server_protocol as lsp - capabilities[ 'semanticTokensProvider' ] = { - 'full': True, - 'legend': { - 'tokenTypes': lsp.TOKEN_TYPES, - 'tokenModifiers': lsp.TOKEN_MODIFIERS - } - } - - return super()._SetUpSemanticTokenAtlas( capabilities ) - - def GetDoc( self, request_data ): assert self._settings[ 'ls' ][ 'hoverKind' ] == 'Structured' try: diff --git a/ycmd/completers/language_server/language_server_completer.py b/ycmd/completers/language_server/language_server_completer.py index 0e1298f610..6e7004071d 100644 --- a/ycmd/completers/language_server/language_server_completer.py +++ b/ycmd/completers/language_server/language_server_completer.py @@ -1504,7 +1504,8 @@ def SignatureHelpAvailable( self ): if not self.ServerIsReady(): return responses.SignatureHelpAvailalability.PENDING - if bool( self._server_capabilities.get( 'signatureHelpProvider' ) ): + if _IsCapabilityProvided( self._server_capabilities, + 'signatureHelpProvider' ): return responses.SignatureHelpAvailalability.AVAILABLE else: return responses.SignatureHelpAvailalability.NOT_AVAILABLE @@ -1514,7 +1515,8 @@ def ComputeSignaturesInner( self, request_data ): if not self.ServerIsReady(): return {} - if not self._server_capabilities.get( 'signatureHelpProvider' ): + if not _IsCapabilityProvided( self._server_capabilities, + 'signatureHelpProvider' ): return {} self._UpdateServerWithCurrentFileContents( request_data ) @@ -1559,8 +1561,9 @@ def ComputeSemanticTokens( self, request_data ): if not self._semantic_token_atlas: return {} - range_supported = self._server_capabilities[ 'semanticTokensProvider' ].get( - 'range', False ) + range_supported = _IsCapabilityProvided( + self._server_capabilities[ 'semanticTokensProvider' ], + 'range' ) self._UpdateServerWithCurrentFileContents( request_data ) @@ -1596,7 +1599,8 @@ def ComputeInlayHints( self, request_data ): if not self._ServerIsInitialized(): return [] - if 'inlayHintProvider' not in self._server_capabilities: + if not _IsCapabilityProvided( self._server_capabilities, + 'inlayHintProvider' ): return [] self._UpdateServerWithCurrentFileContents( request_data ) @@ -1771,7 +1775,8 @@ def GetSubcommandsMap( self ): ) if ( self._server_capabilities and - 'callHierarchyProvider' in self._server_capabilities ): + _IsCapabilityProvided( self._server_capabilities, + 'callHierarchyProvider' ) ): commands[ 'GoToCallees' ] = ( lambda self, request_data, args: self.CallHierarchy( request_data, [ 'outgoing' ] ) @@ -1795,9 +1800,10 @@ def _GetSubcommandProvider( self, provider_list ): for providers in provider_list: if isinstance( providers, tuple ): - if all( capabilities.get( provider ) for provider in providers ): + if all( _IsCapabilityProvided( capabilities, provider ) + for provider in providers ): return providers - if capabilities.get( providers ): + if _IsCapabilityProvided( capabilities, providers ): return providers return None @@ -2210,9 +2216,9 @@ def OnFileSave( self, request_data ): if not self.ServerIsReady(): return - if 'textDocumentSync' in self._server_capabilities: - sync = self._server_capabilities[ 'textDocumentSync' ] - if isinstance( sync, dict ) and sync.get( 'save' ) not in [ None, False ]: + sync = self._server_capabilities.get( 'textDocumentSync' ) + if sync is not None: + if isinstance( sync, dict ) and _IsCapabilityProvided( sync, 'save' ): save = sync[ 'save' ] file_name = request_data[ 'filepath' ] contents = None @@ -2355,11 +2361,7 @@ def _SetUpSemanticTokenAtlas( self, capabilities: dict ): if server_config is None: return - server_full_support = server_config.get( 'full' ) - if server_full_support == {}: - server_full_support = True - - if not server_full_support: + if not _IsCapabilityProvided( server_config, 'full' ): return self._semantic_token_atlas = TokenAtlas( server_config[ 'legend' ] ) @@ -2384,8 +2386,8 @@ def _HandleInitializeInPollThread( self, response ): self._SetUpSemanticTokenAtlas( self._server_capabilities ) - if 'textDocumentSync' in self._server_capabilities: - sync = self._server_capabilities[ 'textDocumentSync' ] + sync = self._server_capabilities.get( 'textDocumentSync' ) + if sync is not None: SYNC_TYPE = [ 'None', 'Full', @@ -2851,6 +2853,9 @@ def ResolveFixit( self, request_data ): def ExecuteCommand( self, request_data, args ): + if not self.ServerIsReady(): + raise RuntimeError( 'Server is initializing. Please wait.' ) + if not args: raise ValueError( 'Must specify a command to execute' ) @@ -3609,6 +3614,11 @@ def DecodeModifiers( self, tokenModifiers ): return tokens +def _IsCapabilityProvided( capabilities, query ): + capability = capabilities.get( query ) + return bool( capability ) or capability == {} + + def RetryOnFailure( expected_error_codes, num_retries = 3 ): for i in range( num_retries ): try: diff --git a/ycmd/completers/typescript/typescript_completer.py b/ycmd/completers/typescript/typescript_completer.py index 6db904a577..99439421da 100644 --- a/ycmd/completers/typescript/typescript_completer.py +++ b/ycmd/completers/typescript/typescript_completer.py @@ -214,7 +214,7 @@ def _StartServerNoLock( self ): return self._logfile = utils.CreateLogfile( LOGFILE_FORMAT ) - tsserver_log = f'-file { self._logfile } -level {_LogLevel()}' + tsserver_log = f'-file { self._logfile } -level { _LogLevel() }' # TSServer gets the configuration for the log file through the # environment variable 'TSS_LOG'. This seems to be undocumented but # looking at the source code it seems like this is the way: @@ -911,7 +911,7 @@ def _GetDoc( self, request_data ): 'offset': request_data[ 'column_codepoint' ] } ) - message = f'{ info[ "displayString" ] }\n\n{info[ "documentation" ]}' + message = f'{ info[ "displayString" ] }\n\n{ info[ "documentation" ] }' return responses.BuildDetailedInfoResponse( message ) diff --git a/ycmd/tests/clangd/diagnostics_test.py b/ycmd/tests/clangd/diagnostics_test.py index 11d8be4058..452e69062a 100644 --- a/ycmd/tests/clangd/diagnostics_test.py +++ b/ycmd/tests/clangd/diagnostics_test.py @@ -532,7 +532,18 @@ def test_Diagnostics_UpdatedOnBufferVisit( self, app ): contains_string( source_file ), 2, 20 ), 'location_extent': RangeMatcher( contains_string( source_file ), ( 2, 20 ), ( 2, 21 ) ) - } ) + } ), + has_entries( { + 'kind': equal_to( 'WARNING' ), + 'text': "Included header header.h is not used directly " + "(fix available) [unused-includes]", + 'ranges': contains_exactly( RangeMatcher( + contains_string( source_file ), ( 1, 1 ), ( 1, 20 ) ) ), + 'location': LocationMatcher( + contains_string( source_file ), 1, 1 ), + 'location_extent': RangeMatcher( + contains_string( source_file ), ( 1, 1 ), ( 1, 20 ) ) + } ), ) } ) ) break diff --git a/ycmd/tests/clangd/subcommands_test.py b/ycmd/tests/clangd/subcommands_test.py index 1187669707..de37789ce4 100644 --- a/ycmd/tests/clangd/subcommands_test.py +++ b/ycmd/tests/clangd/subcommands_test.py @@ -42,6 +42,7 @@ LineColMatcher, LocationMatcher, ErrorMatcher, + UnixOnly, WithRetry, WaitUntilCompleterServerReady ) from ycmd.utils import ReadFile @@ -605,6 +606,7 @@ def test_Subcommands_DefinedSubcommands( self, app ): @SharedYcmd def test_Subcommands_ServerNotInitialized( self, app ): for cmd in [ + 'ExecuteCommand', 'FixIt', 'Format', 'GetDoc', @@ -616,10 +618,14 @@ def test_Subcommands_ServerNotInitialized( self, app ): 'GoToCallers', 'GoToDeclaration', 'GoToDefinition', - 'GoToInclude', + 'GoToDocumentOutline', + 'GoToImprecise', 'GoToImplementation', + 'GoToInclude', 'GoToReferences', + 'GoToType', 'RefactorRename', + 'GoToAlternateFile', ]: with self.subTest( cmd = cmd ): completer = handlers._server_state.GetFiletypeCompleter( [ 'cpp' ] ) @@ -1073,7 +1079,8 @@ def test_Subcommands_GetType( self, app ): test[ 2 ] ) - @SharedYcmd + @UnixOnly + @IsolatedYcmd() def test_Subcommands_GetDoc( self, app ): for test, cmd in itertools.product( [ # from local file @@ -1085,7 +1092,8 @@ def test_Subcommands_GetDoc( self, app ): # from header [ { 'line_num': 6, 'column_num': 10 }, has_entry( 'detailed_info', equal_to( - 'function docstring_from_header_file\n\n→ void\ndocstring\n\n' + 'function docstring_from_header_file\nprovided by "docstring.h"' + '\n\n→ void\ndocstring\n\n' 'void docstring_from_header_file()' ) ), requests.codes.ok ], # no docstring