diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 020fadc2c7..9377bd7150 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -38,7 +38,7 @@ jobs:
submodules: recursive
fetch-depth: 0
- - uses: actions/cache@v2
+ - uses: actions/cache@v3
if: matrix.libclang == true && matrix.benchmark == false
with:
key: v3-libclang-${{ runner.os }}-${{ hashFiles( 'cpp/ycm/CMakeLists.txt' ) }}
@@ -51,7 +51,7 @@ jobs:
clang_archives
name: Cache libclang
- - uses: actions/cache@v2
+ - uses: actions/cache@v3
if: matrix.benchmark == false
with:
key: v2-deps-${{ runner.os }}-${{ hashFiles( 'build.py' ) }}
@@ -65,7 +65,7 @@ jobs:
third_party/omnisharp-roslyn/v[0-9]*
name: Cache dependencies
- - uses: actions/cache@v2
+ - uses: actions/cache@v3
if: matrix.benchmark == false
with:
key: v2-testdeps-${{ runner.os }}-${{ hashFiles( 'run_tests.py' ) }}
@@ -79,21 +79,22 @@ jobs:
- name: Install Java
if: matrix.benchmark == false
- uses: actions/setup-java@v2
+ uses: actions/setup-java@v3
with:
java-version: 17
distribution: 'adopt'
- name: Install Python
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: Install Go
if: matrix.benchmark == false
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v4
with:
- stable: true
+ go-version: stable
+ cache: false
- name: Install GCC
if: runner.os == 'Linux' && matrix.compiler != 'clang'
run: |
@@ -127,6 +128,7 @@ jobs:
if: matrix.benchmark == false
with:
name: "${{ matrix.runs-on }}-${{ matrix.name_suffix }}-tests"
+ token: ${{ secrets.CODECOV_TOKEN }}
gcov: true
linux_lint:
@@ -196,7 +198,7 @@ jobs:
submodules: recursive
fetch-depth: 0
- - uses: actions/cache@v2
+ - uses: actions/cache@v3
if: matrix.libclang == true && matrix.benchmark == false
with:
key: v3-libclang-${{ runner.os }}-${{ hashFiles( 'cpp/ycm/CMakeLists.txt' ) }}
@@ -209,7 +211,7 @@ jobs:
clang_archives
name: Cache libclang
- - uses: actions/cache@v2
+ - uses: actions/cache@v3
if: matrix.benchmark == false
with:
key: v2-deps-${{ runner.os }}-${{ hashFiles( 'build.py' ) }}
@@ -223,7 +225,7 @@ jobs:
third_party/omnisharp-roslyn/v[0-9]*
name: Cache dependencies
- - uses: actions/cache@v2
+ - uses: actions/cache@v3
if: matrix.benchmark == false
with:
key: v2-testdeps-${{ runner.os }}-${{ hashFiles( 'run_tests.py' ) }}
@@ -237,20 +239,21 @@ jobs:
- name: Install Java
if: matrix.benchmark == false
- uses: actions/setup-java@v2
+ uses: actions/setup-java@v3
with:
java-version: 17
distribution: 'temurin'
- name: Install Python
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: '3.8'
architecture: ${{ matrix.python-arch }}
- name: Install Go
if: matrix.benchmark == false
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v4
with:
- stable: true
+ go-version: stable
+ cache: false
- name: Run pip and prepare coverage
if: matrix.benchmark == false
run: |
@@ -271,4 +274,4 @@ jobs:
if: matrix.benchmark == false
with:
name: "${{ matrix.runs-on }}-${{ matrix.name_suffix }}-tests"
- gcov: true
+ token: ${{ secrets.CODECOV_TOKEN }}
diff --git a/.ycm_extra_conf.py b/.ycm_extra_conf.py
index aa5e153b5e..d97014841c 100644
--- a/.ycm_extra_conf.py
+++ b/.ycm_extra_conf.py
@@ -28,7 +28,7 @@
#
# For more information, please refer to
-from distutils.sysconfig import get_python_inc
+from sysconfig import get_path
import platform
import os.path as p
import subprocess
@@ -71,7 +71,7 @@
'-isystem',
'cpp/BoostParts',
'-isystem',
-get_python_inc(),
+get_path( 'include' ),
'-isystem',
'cpp/llvm/include',
'-isystem',
diff --git a/build.py b/build.py
index bc9427a96b..f4a95deee8 100755
--- a/build.py
+++ b/build.py
@@ -95,7 +95,7 @@ def Exit( self ):
'ba5fe5ee3b2a8395287e24aef20ce6e17834cf8e877117e6caacac6a688a6c53'
)
-DEFAULT_RUST_TOOLCHAIN = 'nightly-2023-05-11'
+DEFAULT_RUST_TOOLCHAIN = 'nightly-2023-08-18'
RUST_ANALYZER_DIR = p.join( DIR_OF_THIRD_PARTY, 'rust-analyzer' )
BUILD_ERROR_MESSAGE = (
@@ -107,7 +107,7 @@ def Exit( self ):
'issue tracker, including the entire output of this script (with --verbose) '
'and the invocation line used to run it.' )
-CLANGD_VERSION = '16.0.1'
+CLANGD_VERSION = '17.0.1'
CLANGD_BINARIES_ERROR_MESSAGE = (
'No prebuilt Clang {version} binaries for {platform}. '
'You\'ll have to compile Clangd {version} from source '
@@ -132,23 +132,23 @@ def FindLatestMSVC( quiet ):
try:
latest_v = int( latest_full_v.split( '.' )[ 0 ] )
except ValueError:
- raise ValueError( f"{latest_full_v} is not a version number." )
+ raise ValueError( f"{ latest_full_v } is not a version number." )
if not quiet:
- print( f'vswhere -latest returned version {latest_full_v}' )
+ print( f'vswhere -latest returned version { latest_full_v }' )
if latest_v not in ACCEPTABLE_VERSIONS:
if latest_v > 17:
if not quiet:
- print( f'MSVC Version {latest_full_v} is newer than expected.' )
+ print( f'MSVC Version { latest_full_v } is newer than expected.' )
else:
raise ValueError(
- f'vswhere returned {latest_full_v} which is unexpected.'
+ f'vswhere returned { latest_full_v } which is unexpected.'
'Pass --msvc argument.' )
return latest_v
else:
if not quiet:
- print( f'vswhere returned nothing usable, {latest_full_v}' )
+ print( f'vswhere returned nothing usable, { latest_full_v }' )
# Fall back to registry parsing, which works at least until MSVC 2019 (16)
# but is likely failing on MSVC 2022 (17)
@@ -161,11 +161,11 @@ def FindLatestMSVC( quiet ):
for i in ACCEPTABLE_VERSIONS:
if not quiet:
print( 'Trying to find '
- rf'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\{i}.0' )
+ rf'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\{ i }.0' )
try:
- winreg.OpenKey( handle, rf'SOFTWARE\Microsoft\VisualStudio\{i}.0' )
+ winreg.OpenKey( handle, rf'SOFTWARE\Microsoft\VisualStudio\{ i }.0' )
if not quiet:
- print( f"Found MSVC version {i}" )
+ print( f"Found MSVC version { i }" )
msvc = i
break
except FileNotFoundError:
@@ -939,7 +939,7 @@ def EnableGoCompleter( args ):
new_env.pop( 'GOROOT', None )
new_env[ 'GOBIN' ] = p.join( new_env[ 'GOPATH' ], 'bin' )
- gopls = 'golang.org/x/tools/gopls@v0.9.4'
+ gopls = 'golang.org/x/tools/gopls@v0.13.2'
CheckCall( [ go, 'install', gopls ],
env = new_env,
quiet = args.quiet,
@@ -978,7 +978,7 @@ def EnableRustCompleter( switches ):
req_toolchain_version = switches.rust_toolchain_version
if switches.quiet:
- sys.stdout.write( f'Installing rust-analyzer "{req_toolchain_version}" '
+ sys.stdout.write( f'Installing rust-analyzer "{ req_toolchain_version }" '
'for Rust support...' )
sys.stdout.flush()
@@ -1145,30 +1145,30 @@ def GetClangdTarget():
if OnWindows():
return [
( 'clangd-{version}-win64',
- 'a0a7b16f6f92d545c84baff5e4bdb56897e955689ffc7407c915cc9d3c69a945' ),
+ '66a1e4d527b451d1e9f21183416fd53ef7f395266bbf7fd74b470ec326d19c98' ),
( 'clangd-{version}-win32',
- '870de4d2a45380eba7c6b6640e2cb870219dd2025ed3bcb58101fd1d17f51d75' ) ]
+ 'c4c351da9f528a2cfacbc669cfb656ef34791ed637aeed051274adf611f3ba5a' ) ]
if OnMac():
if OnArm():
return [
( 'clangd-{version}-arm64-apple-darwin',
- 'c5b0a314c00e4ce839ce1f4ee1ed46116f839949b7874affa759e10589340948' ) ]
+ '38b0335306193cfe7978af9b2bb9dffc48406739b23f19158e7f000f910df5b0' ) ]
return [
( 'clangd-{version}-x86_64-apple-darwin',
- '826c85889a1c288418e2c05b91e40158cde06f2e79f1e951d4983de2652a6d2c' ) ]
+ 'e3dcbefda4a10d7e1e2f8ce8db820219d78ac48ade247048fc0c6a821105ca26' ) ]
if OnAArch64():
return [
( 'clangd-{version}-aarch64-linux-gnu',
- '79f4a0a20342479c0e29573cf58810e0daabbf00178cf042edf6e1acb20a8602' ) ]
+ 'a3074a5d3c955b3326881617d36438e2cf36140d8de4b5f7d98e73eda92797a8' ) ]
if OnArm():
return [
None, # First list index is for 64bit archives. ARMv7 is 32bit only.
( 'clangd-{version}-armv7a-linux-gnueabihf',
- 'e521f21021885aaeb94e631949db6c0a65cc9c5c9c708afe4a42a058eb91ebca' ) ]
+ 'f167c13d3741ad7869a6ee57621af2cb9c2477bb300ab2fac91ea64c19f8df43' ) ]
if OnX86_64():
return [
( 'clangd-{version}-x86_64-unknown-linux-gnu',
- '51e69f6f5394ed6990cd7d938c53135ef2b5f8d2da1026eb291ffb3c81968847' ) ]
+ '70a9cf4c9e288941f0193dbfe0ab164e1805b622c2df522ea7319dabdeae3b4c' ) ]
raise InstallationFailed(
CLANGD_BINARIES_ERROR_MESSAGE.format( version = CLANGD_VERSION,
platform = 'this system' ) )
diff --git a/cpp/llvm/include/clang-c/Index.h b/cpp/llvm/include/clang-c/Index.h
index a3e54285f8..601b91f67d 100644
--- a/cpp/llvm/include/clang-c/Index.h
+++ b/cpp/llvm/include/clang-c/Index.h
@@ -34,7 +34,7 @@
* compatible, thus CINDEX_VERSION_MAJOR is expected to remain stable.
*/
#define CINDEX_VERSION_MAJOR 0
-#define CINDEX_VERSION_MINOR 63
+#define CINDEX_VERSION_MINOR 64
#define CINDEX_VERSION_ENCODE(major, minor) (((major)*10000) + ((minor)*1))
@@ -48,6 +48,10 @@
#define CINDEX_VERSION_STRING \
CINDEX_VERSION_STRINGIZE(CINDEX_VERSION_MAJOR, CINDEX_VERSION_MINOR)
+#ifndef __has_feature
+#define __has_feature(feature) 0
+#endif
+
LLVM_CLANG_C_EXTERN_C_BEGIN
/** \defgroup CINDEX libclang: C Interface to Clang
@@ -275,6 +279,22 @@ CINDEX_LINKAGE CXIndex clang_createIndex(int excludeDeclarationsFromPCH,
*/
CINDEX_LINKAGE void clang_disposeIndex(CXIndex index);
+typedef enum {
+ /**
+ * Use the default value of an option that may depend on the process
+ * environment.
+ */
+ CXChoice_Default = 0,
+ /**
+ * Enable the option.
+ */
+ CXChoice_Enabled = 1,
+ /**
+ * Disable the option.
+ */
+ CXChoice_Disabled = 2
+} CXChoice;
+
typedef enum {
/**
* Used to indicate that no special CXIndex options are needed.
@@ -309,9 +329,131 @@ typedef enum {
} CXGlobalOptFlags;
+/**
+ * Index initialization options.
+ *
+ * 0 is the default value of each member of this struct except for Size.
+ * Initialize the struct in one of the following three ways to avoid adapting
+ * code each time a new member is added to it:
+ * \code
+ * CXIndexOptions Opts;
+ * memset(&Opts, 0, sizeof(Opts));
+ * Opts.Size = sizeof(CXIndexOptions);
+ * \endcode
+ * or explicitly initialize the first data member and zero-initialize the rest:
+ * \code
+ * CXIndexOptions Opts = { sizeof(CXIndexOptions) };
+ * \endcode
+ * or to prevent the -Wmissing-field-initializers warning for the above version:
+ * \code
+ * CXIndexOptions Opts{};
+ * Opts.Size = sizeof(CXIndexOptions);
+ * \endcode
+ */
+typedef struct CXIndexOptions {
+ /**
+ * The size of struct CXIndexOptions used for option versioning.
+ *
+ * Always initialize this member to sizeof(CXIndexOptions), or assign
+ * sizeof(CXIndexOptions) to it right after creating a CXIndexOptions object.
+ */
+ unsigned Size;
+ /**
+ * A CXChoice enumerator that specifies the indexing priority policy.
+ * \sa CXGlobalOpt_ThreadBackgroundPriorityForIndexing
+ */
+ unsigned char ThreadBackgroundPriorityForIndexing;
+ /**
+ * A CXChoice enumerator that specifies the editing priority policy.
+ * \sa CXGlobalOpt_ThreadBackgroundPriorityForEditing
+ */
+ unsigned char ThreadBackgroundPriorityForEditing;
+ /**
+ * \see clang_createIndex()
+ */
+ unsigned ExcludeDeclarationsFromPCH : 1;
+ /**
+ * \see clang_createIndex()
+ */
+ unsigned DisplayDiagnostics : 1;
+ /**
+ * Store PCH in memory. If zero, PCH are stored in temporary files.
+ */
+ unsigned StorePreamblesInMemory : 1;
+ unsigned /*Reserved*/ : 13;
+
+ /**
+ * The path to a directory, in which to store temporary PCH files. If null or
+ * empty, the default system temporary directory is used. These PCH files are
+ * deleted on clean exit but stay on disk if the program crashes or is killed.
+ *
+ * This option is ignored if \a StorePreamblesInMemory is non-zero.
+ *
+ * Libclang does not create the directory at the specified path in the file
+ * system. Therefore it must exist, or storing PCH files will fail.
+ */
+ const char *PreambleStoragePath;
+ /**
+ * Specifies a path which will contain log files for certain libclang
+ * invocations. A null value implies that libclang invocations are not logged.
+ */
+ const char *InvocationEmissionPath;
+} CXIndexOptions;
+
+/**
+ * Provides a shared context for creating translation units.
+ *
+ * Call this function instead of clang_createIndex() if you need to configure
+ * the additional options in CXIndexOptions.
+ *
+ * \returns The created index or null in case of error, such as an unsupported
+ * value of options->Size.
+ *
+ * For example:
+ * \code
+ * CXIndex createIndex(const char *ApplicationTemporaryPath) {
+ * const int ExcludeDeclarationsFromPCH = 1;
+ * const int DisplayDiagnostics = 1;
+ * CXIndex Idx;
+ * #if CINDEX_VERSION_MINOR >= 64
+ * CXIndexOptions Opts;
+ * memset(&Opts, 0, sizeof(Opts));
+ * Opts.Size = sizeof(CXIndexOptions);
+ * Opts.ThreadBackgroundPriorityForIndexing = 1;
+ * Opts.ExcludeDeclarationsFromPCH = ExcludeDeclarationsFromPCH;
+ * Opts.DisplayDiagnostics = DisplayDiagnostics;
+ * Opts.PreambleStoragePath = ApplicationTemporaryPath;
+ * Idx = clang_createIndexWithOptions(&Opts);
+ * if (Idx)
+ * return Idx;
+ * fprintf(stderr,
+ * "clang_createIndexWithOptions() failed. "
+ * "CINDEX_VERSION_MINOR = %d, sizeof(CXIndexOptions) = %u\n",
+ * CINDEX_VERSION_MINOR, Opts.Size);
+ * #else
+ * (void)ApplicationTemporaryPath;
+ * #endif
+ * Idx = clang_createIndex(ExcludeDeclarationsFromPCH, DisplayDiagnostics);
+ * clang_CXIndex_setGlobalOptions(
+ * Idx, clang_CXIndex_getGlobalOptions(Idx) |
+ * CXGlobalOpt_ThreadBackgroundPriorityForIndexing);
+ * return Idx;
+ * }
+ * \endcode
+ *
+ * \sa clang_createIndex()
+ */
+CINDEX_LINKAGE CXIndex
+clang_createIndexWithOptions(const CXIndexOptions *options);
+
/**
* Sets general options associated with a CXIndex.
*
+ * This function is DEPRECATED. Set
+ * CXIndexOptions::ThreadBackgroundPriorityForIndexing and/or
+ * CXIndexOptions::ThreadBackgroundPriorityForEditing and call
+ * clang_createIndexWithOptions() instead.
+ *
* For example:
* \code
* CXIndex idx = ...;
@@ -327,6 +469,9 @@ CINDEX_LINKAGE void clang_CXIndex_setGlobalOptions(CXIndex, unsigned options);
/**
* Gets the general options associated with a CXIndex.
*
+ * This function allows to obtain the final option values used by libclang after
+ * specifying the option policies via CXChoice enumerators.
+ *
* \returns A bitmask of options, a bitwise OR of CXGlobalOpt_XXX flags that
* are associated with the given CXIndex object.
*/
@@ -335,6 +480,9 @@ CINDEX_LINKAGE unsigned clang_CXIndex_getGlobalOptions(CXIndex);
/**
* Sets the invocation emission path option in a CXIndex.
*
+ * This function is DEPRECATED. Set CXIndexOptions::InvocationEmissionPath and
+ * call clang_createIndexWithOptions() instead.
+ *
* The invocation emission path specifies a path which will contain log
* files for certain libclang invocations. A null value (default) implies that
* libclang invocations are not logged..
@@ -2787,10 +2935,15 @@ enum CXTypeKind {
CXType_OCLIntelSubgroupAVCImeResult = 169,
CXType_OCLIntelSubgroupAVCRefResult = 170,
CXType_OCLIntelSubgroupAVCSicResult = 171,
+ CXType_OCLIntelSubgroupAVCImeResultSingleReferenceStreamout = 172,
+ CXType_OCLIntelSubgroupAVCImeResultDualReferenceStreamout = 173,
+ CXType_OCLIntelSubgroupAVCImeSingleReferenceStreamin = 174,
+ CXType_OCLIntelSubgroupAVCImeDualReferenceStreamin = 175,
+
+ /* Old aliases for AVC OpenCL extension types. */
CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = 172,
CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = 173,
CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = 174,
-
CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175,
CXType_ExtVector = 176,
@@ -2888,9 +3041,25 @@ CINDEX_LINKAGE unsigned long long
clang_getEnumConstantDeclUnsignedValue(CXCursor C);
/**
- * Retrieve the bit width of a bit field declaration as an integer.
+ * Returns non-zero if the cursor specifies a Record member that is a bit-field.
+ */
+CINDEX_LINKAGE unsigned clang_Cursor_isBitField(CXCursor C);
+
+/**
+ * Retrieve the bit width of a bit-field declaration as an integer.
+ *
+ * If the cursor does not reference a bit-field, or if the bit-field's width
+ * expression cannot be evaluated, -1 is returned.
*
- * If a cursor that is not a bit field declaration is passed in, -1 is returned.
+ * For example:
+ * \code
+ * if (clang_Cursor_isBitField(Cursor)) {
+ * int Width = clang_getFieldDeclBitWidth(Cursor);
+ * if (Width != -1) {
+ * // The bit-field width is not value-dependent.
+ * }
+ * }
+ * \endcode
*/
CINDEX_LINKAGE int clang_getFieldDeclBitWidth(CXCursor C);
@@ -3519,12 +3688,6 @@ CINDEX_LINKAGE CXType clang_Type_getTemplateArgumentAsType(CXType T,
*/
CINDEX_LINKAGE enum CXRefQualifierKind clang_Type_getCXXRefQualifier(CXType T);
-/**
- * Returns non-zero if the cursor specifies a Record member that is a
- * bitfield.
- */
-CINDEX_LINKAGE unsigned clang_Cursor_isBitField(CXCursor C);
-
/**
* Returns 1 if the base class specified by the cursor with kind
* CX_CXXBaseSpecifier is virtual.
@@ -3697,8 +3860,6 @@ typedef enum CXChildVisitResult (*CXCursorVisitor)(CXCursor cursor,
CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
CXCursorVisitor visitor,
CXClientData client_data);
-#ifdef __has_feature
-#if __has_feature(blocks)
/**
* Visitor invoked for each cursor found by a traversal.
*
@@ -3709,8 +3870,12 @@ CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
* The visitor should return one of the \c CXChildVisitResult values
* to direct clang_visitChildrenWithBlock().
*/
+#if __has_feature(blocks)
typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor,
CXCursor parent);
+#else
+typedef struct _CXChildVisitResult *CXCursorVisitorBlock;
+#endif
/**
* Visits the children of a cursor using the specified block. Behaves
@@ -3718,8 +3883,6 @@ typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor,
*/
CINDEX_LINKAGE unsigned
clang_visitChildrenWithBlock(CXCursor parent, CXCursorVisitorBlock block);
-#endif
-#endif
/**
* @}
@@ -4343,6 +4506,51 @@ CINDEX_LINKAGE unsigned clang_CXXMethod_isCopyAssignmentOperator(CXCursor C);
*/
CINDEX_LINKAGE unsigned clang_CXXMethod_isMoveAssignmentOperator(CXCursor C);
+/**
+ * Determines if a C++ constructor or conversion function was declared
+ * explicit, returning 1 if such is the case and 0 otherwise.
+ *
+ * Constructors or conversion functions are declared explicit through
+ * the use of the explicit specifier.
+ *
+ * For example, the following constructor and conversion function are
+ * not explicit as they lack the explicit specifier:
+ *
+ * class Foo {
+ * Foo();
+ * operator int();
+ * };
+ *
+ * While the following constructor and conversion function are
+ * explicit as they are declared with the explicit specifier.
+ *
+ * class Foo {
+ * explicit Foo();
+ * explicit operator int();
+ * };
+ *
+ * This function will return 0 when given a cursor pointing to one of
+ * the former declarations and it will return 1 for a cursor pointing
+ * to the latter declarations.
+ *
+ * The explicit specifier allows the user to specify a
+ * conditional compile-time expression whose value decides
+ * whether the marked element is explicit or not.
+ *
+ * For example:
+ *
+ * constexpr bool foo(int i) { return i % 2 == 0; }
+ *
+ * class Foo {
+ * explicit(foo(1)) Foo();
+ * explicit(foo(2)) operator int();
+ * }
+ *
+ * This function will return 0 for the constructor and 1 for
+ * the conversion function.
+ */
+CINDEX_LINKAGE unsigned clang_CXXMethod_isExplicit(CXCursor C);
+
/**
* Determine if a C++ record is abstract, i.e. whether a class or struct
* has a pure virtual member function.
@@ -5675,11 +5883,12 @@ CINDEX_LINKAGE CXResult clang_findReferencesInFile(
CINDEX_LINKAGE CXResult clang_findIncludesInFile(
CXTranslationUnit TU, CXFile file, CXCursorAndRangeVisitor visitor);
-#ifdef __has_feature
#if __has_feature(blocks)
-
typedef enum CXVisitorResult (^CXCursorAndRangeVisitorBlock)(CXCursor,
CXSourceRange);
+#else
+typedef struct _CXCursorAndRangeVisitorBlock *CXCursorAndRangeVisitorBlock;
+#endif
CINDEX_LINKAGE
CXResult clang_findReferencesInFileWithBlock(CXCursor, CXFile,
@@ -5689,9 +5898,6 @@ CINDEX_LINKAGE
CXResult clang_findIncludesInFileWithBlock(CXTranslationUnit, CXFile,
CXCursorAndRangeVisitorBlock);
-#endif
-#endif
-
/**
* The client's data object that is associated with a CXFile.
*/
@@ -6304,6 +6510,144 @@ typedef enum CXVisitorResult (*CXFieldVisitor)(CXCursor C,
CINDEX_LINKAGE unsigned clang_Type_visitFields(CXType T, CXFieldVisitor visitor,
CXClientData client_data);
+/**
+ * Describes the kind of binary operators.
+ */
+enum CXBinaryOperatorKind {
+ /** This value describes cursors which are not binary operators. */
+ CXBinaryOperator_Invalid,
+ /** C++ Pointer - to - member operator. */
+ CXBinaryOperator_PtrMemD,
+ /** C++ Pointer - to - member operator. */
+ CXBinaryOperator_PtrMemI,
+ /** Multiplication operator. */
+ CXBinaryOperator_Mul,
+ /** Division operator. */
+ CXBinaryOperator_Div,
+ /** Remainder operator. */
+ CXBinaryOperator_Rem,
+ /** Addition operator. */
+ CXBinaryOperator_Add,
+ /** Subtraction operator. */
+ CXBinaryOperator_Sub,
+ /** Bitwise shift left operator. */
+ CXBinaryOperator_Shl,
+ /** Bitwise shift right operator. */
+ CXBinaryOperator_Shr,
+ /** C++ three-way comparison (spaceship) operator. */
+ CXBinaryOperator_Cmp,
+ /** Less than operator. */
+ CXBinaryOperator_LT,
+ /** Greater than operator. */
+ CXBinaryOperator_GT,
+ /** Less or equal operator. */
+ CXBinaryOperator_LE,
+ /** Greater or equal operator. */
+ CXBinaryOperator_GE,
+ /** Equal operator. */
+ CXBinaryOperator_EQ,
+ /** Not equal operator. */
+ CXBinaryOperator_NE,
+ /** Bitwise AND operator. */
+ CXBinaryOperator_And,
+ /** Bitwise XOR operator. */
+ CXBinaryOperator_Xor,
+ /** Bitwise OR operator. */
+ CXBinaryOperator_Or,
+ /** Logical AND operator. */
+ CXBinaryOperator_LAnd,
+ /** Logical OR operator. */
+ CXBinaryOperator_LOr,
+ /** Assignment operator. */
+ CXBinaryOperator_Assign,
+ /** Multiplication assignment operator. */
+ CXBinaryOperator_MulAssign,
+ /** Division assignment operator. */
+ CXBinaryOperator_DivAssign,
+ /** Remainder assignment operator. */
+ CXBinaryOperator_RemAssign,
+ /** Addition assignment operator. */
+ CXBinaryOperator_AddAssign,
+ /** Subtraction assignment operator. */
+ CXBinaryOperator_SubAssign,
+ /** Bitwise shift left assignment operator. */
+ CXBinaryOperator_ShlAssign,
+ /** Bitwise shift right assignment operator. */
+ CXBinaryOperator_ShrAssign,
+ /** Bitwise AND assignment operator. */
+ CXBinaryOperator_AndAssign,
+ /** Bitwise XOR assignment operator. */
+ CXBinaryOperator_XorAssign,
+ /** Bitwise OR assignment operator. */
+ CXBinaryOperator_OrAssign,
+ /** Comma operator. */
+ CXBinaryOperator_Comma
+};
+
+/**
+ * Retrieve the spelling of a given CXBinaryOperatorKind.
+ */
+CINDEX_LINKAGE CXString
+clang_getBinaryOperatorKindSpelling(enum CXBinaryOperatorKind kind);
+
+/**
+ * Retrieve the binary operator kind of this cursor.
+ *
+ * If this cursor is not a binary operator then returns Invalid.
+ */
+CINDEX_LINKAGE enum CXBinaryOperatorKind
+clang_getCursorBinaryOperatorKind(CXCursor cursor);
+
+/**
+ * Describes the kind of unary operators.
+ */
+enum CXUnaryOperatorKind {
+ /** This value describes cursors which are not unary operators. */
+ CXUnaryOperator_Invalid,
+ /** Postfix increment operator. */
+ CXUnaryOperator_PostInc,
+ /** Postfix decrement operator. */
+ CXUnaryOperator_PostDec,
+ /** Prefix increment operator. */
+ CXUnaryOperator_PreInc,
+ /** Prefix decrement operator. */
+ CXUnaryOperator_PreDec,
+ /** Address of operator. */
+ CXUnaryOperator_AddrOf,
+ /** Dereference operator. */
+ CXUnaryOperator_Deref,
+ /** Plus operator. */
+ CXUnaryOperator_Plus,
+ /** Minus operator. */
+ CXUnaryOperator_Minus,
+ /** Not operator. */
+ CXUnaryOperator_Not,
+ /** LNot operator. */
+ CXUnaryOperator_LNot,
+ /** "__real expr" operator. */
+ CXUnaryOperator_Real,
+ /** "__imag expr" operator. */
+ CXUnaryOperator_Imag,
+ /** __extension__ marker operator. */
+ CXUnaryOperator_Extension,
+ /** C++ co_await operator. */
+ CXUnaryOperator_Coawait
+};
+
+/**
+ * Retrieve the spelling of a given CXUnaryOperatorKind.
+ */
+CINDEX_LINKAGE CXString
+clang_getUnaryOperatorKindSpelling(enum CXUnaryOperatorKind kind);
+
+/**
+ * Retrieve the unary operator kind of this cursor.
+ *
+ * If this cursor is not a unary operator then returns Invalid.
+ */
+CINDEX_LINKAGE enum CXUnaryOperatorKind
+clang_getCursorUnaryOperatorKind(CXCursor cursor);
+
/**
* @}
*/
diff --git a/cpp/llvm/include/clang-c/module.modulemap b/cpp/llvm/include/clang-c/module.modulemap
deleted file mode 100644
index 95a59d6234..0000000000
--- a/cpp/llvm/include/clang-c/module.modulemap
+++ /dev/null
@@ -1,4 +0,0 @@
-module Clang_C {
- umbrella "."
- module * { export * }
-}
diff --git a/cpp/ycm/CMakeLists.txt b/cpp/ycm/CMakeLists.txt
index 0b6a3cd7b6..1bad2a6bef 100644
--- a/cpp/ycm/CMakeLists.txt
+++ b/cpp/ycm/CMakeLists.txt
@@ -30,41 +30,41 @@ if ( USE_CLANG_COMPLETER AND
NOT PATH_TO_LLVM_ROOT AND
NOT EXTERNAL_LIBCLANG_PATH )
- set( CLANG_VERSION 16.0.1 )
+ set( CLANG_VERSION 17.0.1 )
if ( APPLE )
if ( "${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "arm64" )
set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-arm64-apple-darwin" )
set( LIBCLANG_SHA256
- "3fd9230f591fc2cb081b3088d0b640b3692812adc59d03fb015441a65c68c328" )
+ "e90a409dc408214fc553e3b3df2a71f6d67fdd34d9441b6c2be1a043e9542f06" )
else()
set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-x86_64-apple-darwin" )
set( LIBCLANG_SHA256
- "43f7e4e72bc1d661eb01ee61666ee3a62a97d2993586c0b98efa6f46a96e768f" )
+ "b70786d68e71b5988fda8c7c377e301a0817ea280f425639e976a573ef266473" )
endif()
elseif ( WIN32 )
if( 64_BIT_PLATFORM )
set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-win64" )
set( LIBCLANG_SHA256
- "06280c023ff339af29d68ea66366507607b03d01061d3d7066875b2ff4f78c29" )
+ "7bbb980c2bc5a69ca1b93b8a6a671abb1ad8cab5a5b9f7fff6f7fa300fc1bf07" )
else()
set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-win32" )
set( LIBCLANG_SHA256
- "8f424ac12623638d8f8fa5f410499fe02a4a4ea9d1e02facdc484db9f1b0f4d8" )
+ "ef50790e2b01bfb701cd14ec315431a60da7921fc78ac893c0af0b956d6e2223" )
endif()
elseif ( CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)" )
set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-aarch64-linux-gnu" )
set( LIBCLANG_SHA256
- "518725f324e425cc3d0eafd1897cbf9cc35d9e442983b5efaa19112b73ae0ebf" )
+ "829e4b81d9fddd70ed8bcbeffd1feea909369434b225612148e833fb9b16265b" )
elseif ( CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)" )
set( LIBCLANG_DIRNAME "libclang-${CLANG_VERSION}-armv7a-linux-gnueabihf" )
set( LIBCLANG_SHA256
- "5b45929923ec241bec18b714765554eb31365c4c90bf58529555665edd79a2f1" )
+ "fdc3df9ef3fe15868340bc0dcd4d0c74814edd06be1d79796b8a402db8aee723" )
elseif ( CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64)" )
set( LIBCLANG_DIRNAME
"libclang-${CLANG_VERSION}-x86_64-unknown-linux-gnu" )
set( LIBCLANG_SHA256
- "e0c69d229f6dd91d0530508fa28250f658cb27d7b8825394bf539f8cc1db8c9c" )
+ "bd1ab9ab8e8ccdb46064178bc54a45e7e980b5451cff4fa468596a414e1f7b46" )
else()
message( FATAL_ERROR
"No prebuilt Clang ${CLANG_VERSION} binaries for this system. "
@@ -418,6 +418,12 @@ endif()
#############################################################################
+if ( CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12 )
+ target_compile_options( ${PROJECT_NAME} PRIVATE "-Wno-bidi-chars" )
+endif()
+
+#############################################################################
+
if( SYSTEM_IS_SUNOS )
# SunOS needs this setting for thread support
target_compile_options( ${PROJECT_NAME} PUBLIC "-pthreads" )
diff --git a/third_party/clang/lib/clang/16.0.0/include/adxintrin.h b/third_party/clang/lib/clang/16.0.0/include/adxintrin.h
deleted file mode 100644
index 72b9ed08f4..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/adxintrin.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*===---- adxintrin.h - ADX intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use directly; include instead."
-#endif
-
-#ifndef __ADXINTRIN_H
-#define __ADXINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-
-/* Intrinsics that are available only if __ADX__ defined */
-static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
-_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
- return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
-}
-
-#ifdef __x86_64__
-static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
-_addcarryx_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
- return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
-}
-#endif
-
-/* Intrinsics that are also available if __ADX__ undefined */
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
- return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
-}
-
-#ifdef __x86_64__
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_addcarry_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
- return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
-}
-#endif
-
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
- return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
-}
-
-#ifdef __x86_64__
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_subborrow_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
- return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __ADXINTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx2intrin.h b/third_party/clang/lib/clang/16.0.0/include/avx2intrin.h
deleted file mode 100644
index f8521e7d72..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/avx2intrin.h
+++ /dev/null
@@ -1,1148 +0,0 @@
-/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use directly; include instead."
-#endif
-
-#ifndef __AVX2INTRIN_H
-#define __AVX2INTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128)))
-
-/* SSE4 Multiple Packed Sums of Absolute Difference. */
-#define _mm256_mpsadbw_epu8(X, Y, M) \
- ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
- (__v32qi)(__m256i)(Y), (int)(M)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi8(__m256i __a)
-{
- return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi16(__m256i __a)
-{
- return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_abs_epi32(__m256i __a)
-{
- return (__m256i)__builtin_elementwise_abs((__v8si)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packs_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packs_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packus_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_packus_epi32(__m256i __V1, __m256i __V2)
-{
- return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v32qu)__a + (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v16hu)__a + (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v8su)__a + (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_add_epi64(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v4du)__a + (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epu8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_adds_epu16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
-}
-
-#define _mm256_alignr_epi8(a, b, n) \
- ((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (n)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_and_si256(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v4du)__a & (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_andnot_si256(__m256i __a, __m256i __b)
-{
- return (__m256i)(~(__v4du)__a & (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_avg_epu8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_avg_epu16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
-{
- return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
- (__v32qi)__M);
-}
-
-#define _mm256_blend_epi16(V1, V2, M) \
- ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
- (__v16hi)(__m256i)(V2), (int)(M)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v32qi)__a == (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v16hi)__a == (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v8si)__a == (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v4di)__a == (__v4di)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
-{
- /* This function always performs a signed comparison, but __v32qi is a char
- which may be signed or unsigned, so use __v32qs. */
- return (__m256i)((__v32qs)__a > (__v32qs)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v16hi)__a > (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v8si)__a > (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v4di)__a > (__v4di)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadd_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadd_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadds_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsub_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsub_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsubs_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maddubs_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_madd_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_max_epu32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_min_epu32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS256
-_mm256_movemask_epi8(__m256i __a)
-{
- return __builtin_ia32_pmovmskb256((__v32qi)__a);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi16(__m128i __V)
-{
- /* This function always performs a signed extension, but __v16qi is a char
- which may be signed or unsigned, so use __v16qs. */
- return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi32(__m128i __V)
-{
- /* This function always performs a signed extension, but __v16qi is a char
- which may be signed or unsigned, so use __v16qs. */
- return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi8_epi64(__m128i __V)
-{
- /* This function always performs a signed extension, but __v16qi is a char
- which may be signed or unsigned, so use __v16qs. */
- return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi32(__m128i __V)
-{
- return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi16_epi64(__m128i __V)
-{
- return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepi32_epi64(__m128i __V)
-{
- return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi16(__m128i __V)
-{
- return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi32(__m128i __V)
-{
- return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu8_epi64(__m128i __V)
-{
- return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_epi32(__m128i __V)
-{
- return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu16_epi64(__m128i __V)
-{
- return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtepu32_epi64(__m128i __V)
-{
- return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mul_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhi_epu16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhi_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mullo_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v16hu)__a * (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mullo_epi32 (__m256i __a, __m256i __b)
-{
- return (__m256i)((__v8su)__a * (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mul_epu32(__m256i __a, __m256i __b)
-{
- return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_or_si256(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v4du)__a | (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sad_epu8(__m256i __a, __m256i __b)
-{
- return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shuffle_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
-}
-
-#define _mm256_shuffle_epi32(a, imm) \
- ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)))
-
-#define _mm256_shufflehi_epi16(a, imm) \
- ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm)))
-
-#define _mm256_shufflelo_epi16(a, imm) \
- ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sign_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
-}
-
-#define _mm256_slli_si256(a, imm) \
- ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
-
-#define _mm256_bslli_epi128(a, imm) \
- ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi16(__m256i __a, int __count)
-{
- return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi16(__m256i __a, __m128i __count)
-{
- return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi32(__m256i __a, int __count)
-{
- return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi32(__m256i __a, __m128i __count)
-{
- return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_slli_epi64(__m256i __a, int __count)
-{
- return __builtin_ia32_psllqi256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sll_epi64(__m256i __a, __m128i __count)
-{
- return __builtin_ia32_psllq256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi16(__m256i __a, int __count)
-{
- return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sra_epi16(__m256i __a, __m128i __count)
-{
- return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srai_epi32(__m256i __a, int __count)
-{
- return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sra_epi32(__m256i __a, __m128i __count)
-{
- return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
-}
-
-#define _mm256_srli_si256(a, imm) \
- ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
-
-#define _mm256_bsrli_epi128(a, imm) \
- ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi16(__m256i __a, int __count)
-{
- return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi16(__m256i __a, __m128i __count)
-{
- return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi32(__m256i __a, int __count)
-{
- return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi32(__m256i __a, __m128i __count)
-{
- return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srli_epi64(__m256i __a, int __count)
-{
- return __builtin_ia32_psrlqi256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srl_epi64(__m256i __a, __m128i __count)
-{
- return __builtin_ia32_psrlq256((__v4di)__a, __count);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v32qu)__a - (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v16hu)__a - (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v8su)__a - (__v8su)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sub_epi64(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v4du)__a - (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epu8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_subs_epu16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_xor_si256(__m256i __a, __m256i __b)
-{
- return (__m256i)((__v4du)__a ^ (__v4du)__b);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_stream_load_si256(__m256i const *__V)
-{
- typedef __v4di __v4di_aligned __attribute__((aligned(32)));
- return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_broadcastss_ps(__m128 __X)
-{
- return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_broadcastsd_pd(__m128d __a)
-{
- return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_broadcastss_ps(__m128 __X)
-{
- return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_broadcastsd_pd(__m128d __X)
-{
- return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastsi128_si256(__m128i __X)
-{
- return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
-}
-
-#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
-
-#define _mm_blend_epi32(V1, V2, M) \
- ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
- (__v4si)(__m128i)(V2), (int)(M)))
-
-#define _mm256_blend_epi32(V1, V2, M) \
- ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
- (__v8si)(__m256i)(V2), (int)(M)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastb_epi8(__m128i __X)
-{
- return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastw_epi16(__m128i __X)
-{
- return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastd_epi32(__m128i __X)
-{
- return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_broadcastq_epi64(__m128i __X)
-{
- return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastb_epi8(__m128i __X)
-{
- return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastw_epi16(__m128i __X)
-{
- return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastd_epi32(__m128i __X)
-{
- return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_broadcastq_epi64(__m128i __X)
-{
- return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
-}
-
-#define _mm256_permute4x64_pd(V, M) \
- ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
-{
- return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
-}
-
-#define _mm256_permute4x64_epi64(V, M) \
- ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)))
-
-#define _mm256_permute2x128_si256(V1, V2, M) \
- ((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M)))
-
-#define _mm256_extracti128_si256(V, M) \
- ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)))
-
-#define _mm256_inserti128_si256(V1, V2, M) \
- ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
- (__v2di)(__m128i)(V2), (int)(M)))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskload_epi32(int const *__X, __m256i __M)
-{
- return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskload_epi64(long long const *__X, __m256i __M)
-{
- return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskload_epi32(int const *__X, __m128i __M)
-{
- return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskload_epi64(long long const *__X, __m128i __M)
-{
- return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
-{
- __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
-{
- __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
-{
- __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
-{
- __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sllv_epi32(__m256i __X, __m256i __Y)
-{
- return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sllv_epi32(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_sllv_epi64(__m256i __X, __m256i __Y)
-{
- return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_sllv_epi64(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srav_epi32(__m256i __X, __m256i __Y)
-{
- return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srav_epi32(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srlv_epi32(__m256i __X, __m256i __Y)
-{
- return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srlv_epi32(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_srlv_epi64(__m256i __X, __m256i __Y)
-{
- return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_srlv_epi64(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
-}
-
-#define _mm_mask_i32gather_pd(a, m, i, mask, s) \
- ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
- (double const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v2df)(__m128d)(mask), (s)))
-
-#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \
- ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
- (double const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4df)(__m256d)(mask), (s)))
-
-#define _mm_mask_i64gather_pd(a, m, i, mask, s) \
- ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
- (double const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v2df)(__m128d)(mask), (s)))
-
-#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \
- ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
- (double const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4df)(__m256d)(mask), (s)))
-
-#define _mm_mask_i32gather_ps(a, m, i, mask, s) \
- ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
- (float const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4sf)(__m128)(mask), (s)))
-
-#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \
- ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
- (float const *)(m), \
- (__v8si)(__m256i)(i), \
- (__v8sf)(__m256)(mask), (s)))
-
-#define _mm_mask_i64gather_ps(a, m, i, mask, s) \
- ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
- (float const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v4sf)(__m128)(mask), (s)))
-
-#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \
- ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
- (float const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4sf)(__m128)(mask), (s)))
-
-#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \
- ((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
- (int const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4si)(__m128i)(mask), (s)))
-
-#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \
- ((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
- (int const *)(m), \
- (__v8si)(__m256i)(i), \
- (__v8si)(__m256i)(mask), (s)))
-
-#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \
- ((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
- (int const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v4si)(__m128i)(mask), (s)))
-
-#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \
- ((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
- (int const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4si)(__m128i)(mask), (s)))
-
-#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \
- ((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
- (long long const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v2di)(__m128i)(mask), (s)))
-
-#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \
- ((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
- (long long const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4di)(__m256i)(mask), (s)))
-
-#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \
- ((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
- (long long const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v2di)(__m128i)(mask), (s)))
-
-#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \
- ((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
- (long long const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4di)(__m256i)(mask), (s)))
-
-#define _mm_i32gather_pd(m, i, s) \
- ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
- (double const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
- _mm_setzero_pd()), \
- (s)))
-
-#define _mm256_i32gather_pd(m, i, s) \
- ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
- (double const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
- _mm256_setzero_pd(), \
- _CMP_EQ_OQ), \
- (s)))
-
-#define _mm_i64gather_pd(m, i, s) \
- ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
- (double const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
- _mm_setzero_pd()), \
- (s)))
-
-#define _mm256_i64gather_pd(m, i, s) \
- ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
- (double const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
- _mm256_setzero_pd(), \
- _CMP_EQ_OQ), \
- (s)))
-
-#define _mm_i32gather_ps(m, i, s) \
- ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
- (float const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
- _mm_setzero_ps()), \
- (s)))
-
-#define _mm256_i32gather_ps(m, i, s) \
- ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
- (float const *)(m), \
- (__v8si)(__m256i)(i), \
- (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
- _mm256_setzero_ps(), \
- _CMP_EQ_OQ), \
- (s)))
-
-#define _mm_i64gather_ps(m, i, s) \
- ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
- (float const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
- _mm_setzero_ps()), \
- (s)))
-
-#define _mm256_i64gather_ps(m, i, s) \
- ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
- (float const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
- _mm_setzero_ps()), \
- (s)))
-
-#define _mm_i32gather_epi32(m, i, s) \
- ((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
- (int const *)(m), (__v4si)(__m128i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s)))
-
-#define _mm256_i32gather_epi32(m, i, s) \
- ((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
- (int const *)(m), (__v8si)(__m256i)(i), \
- (__v8si)_mm256_set1_epi32(-1), (s)))
-
-#define _mm_i64gather_epi32(m, i, s) \
- ((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
- (int const *)(m), (__v2di)(__m128i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s)))
-
-#define _mm256_i64gather_epi32(m, i, s) \
- ((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
- (int const *)(m), (__v4di)(__m256i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s)))
-
-#define _mm_i32gather_epi64(m, i, s) \
- ((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
- (long long const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v2di)_mm_set1_epi64x(-1), (s)))
-
-#define _mm256_i32gather_epi64(m, i, s) \
- ((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
- (long long const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4di)_mm256_set1_epi64x(-1), (s)))
-
-#define _mm_i64gather_epi64(m, i, s) \
- ((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
- (long long const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v2di)_mm_set1_epi64x(-1), (s)))
-
-#define _mm256_i64gather_epi64(m, i, s) \
- ((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
- (long long const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4di)_mm256_set1_epi64x(-1), (s)))
-
-#undef __DEFAULT_FN_ATTRS256
-#undef __DEFAULT_FN_ATTRS128
-
-#endif /* __AVX2INTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/bmi2intrin.h b/third_party/clang/lib/clang/16.0.0/include/bmi2intrin.h
deleted file mode 100644
index 0b56aed5f4..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/bmi2intrin.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use directly; include instead."
-#endif
-
-#ifndef __BMI2INTRIN_H
-#define __BMI2INTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_bzhi_u32(unsigned int __X, unsigned int __Y)
-{
- return __builtin_ia32_bzhi_si(__X, __Y);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_pdep_u32(unsigned int __X, unsigned int __Y)
-{
- return __builtin_ia32_pdep_si(__X, __Y);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_pext_u32(unsigned int __X, unsigned int __Y)
-{
- return __builtin_ia32_pext_si(__X, __Y);
-}
-
-#ifdef __x86_64__
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_bzhi_u64(unsigned long long __X, unsigned long long __Y)
-{
- return __builtin_ia32_bzhi_di(__X, __Y);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_pdep_u64(unsigned long long __X, unsigned long long __Y)
-{
- return __builtin_ia32_pdep_di(__X, __Y);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_pext_u64(unsigned long long __X, unsigned long long __Y)
-{
- return __builtin_ia32_pext_di(__X, __Y);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_mulx_u64 (unsigned long long __X, unsigned long long __Y,
- unsigned long long *__P)
-{
- unsigned __int128 __res = (unsigned __int128) __X * __Y;
- *__P = (unsigned long long) (__res >> 64);
- return (unsigned long long) __res;
-}
-
-#else /* !__x86_64__ */
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
-{
- unsigned long long __res = (unsigned long long) __X * __Y;
- *__P = (unsigned int) (__res >> 32);
- return (unsigned int) __res;
-}
-
-#endif /* !__x86_64__ */
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __BMI2INTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/fmaintrin.h b/third_party/clang/lib/clang/16.0.0/include/fmaintrin.h
deleted file mode 100644
index d889b7c5e2..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/fmaintrin.h
+++ /dev/null
@@ -1,216 +0,0 @@
-/*===---- fmaintrin.h - FMA intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use directly; include instead."
-#endif
-
-#ifndef __FMAINTRIN_H
-#define __FMAINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256)))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
-{
- return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
-{
- return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS256
-_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
-{
- return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
-}
-
-static __inline__ __m256d __DEFAULT_FN_ATTRS256
-_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
-{
- return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
-}
-
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS256
-
-#endif /* __FMAINTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/hlsl/hlsl_intrinsics.h b/third_party/clang/lib/clang/16.0.0/include/hlsl/hlsl_intrinsics.h
deleted file mode 100644
index d811a28a43..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/hlsl/hlsl_intrinsics.h
+++ /dev/null
@@ -1,223 +0,0 @@
-//===----- hlsl_intrinsics.h - HLSL definitions for intrinsics ----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _HLSL_HLSL_INTRINSICS_H_
-#define _HLSL_HLSL_INTRINSICS_H_
-
-namespace hlsl {
-
-__attribute__((availability(shadermodel, introduced = 6.0)))
-__attribute__((clang_builtin_alias(__builtin_hlsl_wave_active_count_bits))) uint
-WaveActiveCountBits(bool bBit);
-
-// abs builtins
-#ifdef __HLSL_ENABLE_16_BIT
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-int16_t abs(int16_t);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-int16_t2 abs(int16_t2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-int16_t3 abs(int16_t3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-int16_t4 abs(int16_t4);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) half abs(half);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-half2 abs(half2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-half3 abs(half3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-half4 abs(half4);
-#endif
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int abs(int);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int2 abs(int2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int3 abs(int3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) int4 abs(int4);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) float
-abs(float);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-float2 abs(float2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-float3 abs(float3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-float4 abs(float4);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-int64_t abs(int64_t);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-int64_t2 abs(int64_t2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-int64_t3 abs(int64_t3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-int64_t4 abs(int64_t4);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs))) double
-abs(double);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-double2 abs(double2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-double3 abs(double3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_abs)))
-double4 abs(double4);
-
-// sqrt builtins
-__attribute__((clang_builtin_alias(__builtin_sqrt))) double sqrt(double In);
-__attribute__((clang_builtin_alias(__builtin_sqrtf))) float sqrt(float In);
-
-#ifdef __HLSL_ENABLE_16_BIT
-__attribute__((clang_builtin_alias(__builtin_sqrtf16))) half sqrt(half In);
-#endif
-
-// ceil builtins
-#ifdef __HLSL_ENABLE_16_BIT
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-half ceil(half);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-half2 ceil(half2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-half3 ceil(half3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-half4 ceil(half4);
-#endif
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) float
-ceil(float);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-float2 ceil(float2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-float3 ceil(float3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-float4 ceil(float4);
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil))) double
-ceil(double);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-double2 ceil(double2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-double3 ceil(double3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_ceil)))
-double4 ceil(double4);
-
-// floor builtins
-#ifdef __HLSL_ENABLE_16_BIT
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-half floor(half);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-half2 floor(half2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-half3 floor(half3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-half4 floor(half4);
-#endif
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) float
-floor(float);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-float2 floor(float2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-float3 floor(float3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-float4 floor(float4);
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor))) double
-floor(double);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-double2 floor(double2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-double3 floor(double3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_floor)))
-double4 floor(double4);
-
-// cos builtins
-#ifdef __HLSL_ENABLE_16_BIT
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) half cos(half);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-half2 cos(half2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-half3 cos(half3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-half4 cos(half4);
-#endif
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) float
-cos(float);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-float2 cos(float2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-float3 cos(float3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-float4 cos(float4);
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos))) double
-cos(double);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-double2 cos(double2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-double3 cos(double3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_cos)))
-double4 cos(double4);
-
-// sin builtins
-#ifdef __HLSL_ENABLE_16_BIT
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) half sin(half);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-half2 sin(half2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-half3 sin(half3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-half4 sin(half4);
-#endif
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) float
-sin(float);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-float2 sin(float2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-float3 sin(float3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-float4 sin(float4);
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin))) double
-sin(double);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-double2 sin(double2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-double3 sin(double3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_sin)))
-double4 sin(double4);
-
-// trunc builtins
-#ifdef __HLSL_ENABLE_16_BIT
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-half trunc(half);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-half2 trunc(half2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-half3 trunc(half3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-half4 trunc(half4);
-#endif
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) float
-trunc(float);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-float2 trunc(float2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-float3 trunc(float3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-float4 trunc(float4);
-
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc))) double
-trunc(double);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-double2 trunc(double2);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-double3 trunc(double3);
-__attribute__((clang_builtin_alias(__builtin_elementwise_trunc)))
-double4 trunc(double4);
-
-} // namespace hlsl
-#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/third_party/clang/lib/clang/16.0.0/include/mwaitxintrin.h b/third_party/clang/lib/clang/16.0.0/include/mwaitxintrin.h
deleted file mode 100644
index ed485380af..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/mwaitxintrin.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __X86INTRIN_H
-#error "Never use directly; include instead."
-#endif
-
-#ifndef __MWAITXINTRIN_H
-#define __MWAITXINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx")))
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints)
-{
- __builtin_ia32_monitorx(__p, __extensions, __hints);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock)
-{
- __builtin_ia32_mwaitx(__extensions, __hints, __clock);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __MWAITXINTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/rdseedintrin.h b/third_party/clang/lib/clang/16.0.0/include/rdseedintrin.h
deleted file mode 100644
index 405bc2451e..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/rdseedintrin.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use directly; include instead."
-#endif
-
-#ifndef __RDSEEDINTRIN_H
-#define __RDSEEDINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_rdseed16_step(unsigned short *__p)
-{
- return (int) __builtin_ia32_rdseed16_step(__p);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_rdseed32_step(unsigned int *__p)
-{
- return (int) __builtin_ia32_rdseed32_step(__p);
-}
-
-#ifdef __x86_64__
-static __inline__ int __DEFAULT_FN_ATTRS
-_rdseed64_step(unsigned long long *__p)
-{
- return (int) __builtin_ia32_rdseed64_step(__p);
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __RDSEEDINTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/shaintrin.h b/third_party/clang/lib/clang/16.0.0/include/shaintrin.h
deleted file mode 100644
index 08b1fb1dc1..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/shaintrin.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*===---- shaintrin.h - SHA intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use directly; include instead."
-#endif
-
-#ifndef __SHAINTRIN_H
-#define __SHAINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128)))
-
-#define _mm_sha1rnds4_epu32(V1, V2, M) \
- __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M))
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
-{
- return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y);
-}
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __SHAINTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/xsavecintrin.h b/third_party/clang/lib/clang/16.0.0/include/xsavecintrin.h
deleted file mode 100644
index 5524947fa9..0000000000
--- a/third_party/clang/lib/clang/16.0.0/include/xsavecintrin.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*===---- xsavecintrin.h - XSAVEC intrinsic --------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#error "Never use directly; include instead."
-#endif
-
-#ifndef __XSAVECINTRIN_H
-#define __XSAVECINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_xsavec(void *__p, unsigned long long __m) {
- __builtin_ia32_xsavec(__p, __m);
-}
-
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS
-_xsavec64(void *__p, unsigned long long __m) {
- __builtin_ia32_xsavec64(__p, __m);
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_builtin_vars.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_builtin_vars.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_builtin_vars.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_cmath.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_cmath.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_cmath.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_cmath.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_complex_builtins.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_complex_builtins.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_complex_builtins.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_device_functions.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_device_functions.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_device_functions.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_device_functions.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_intrinsics.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_intrinsics.h
similarity index 76%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_intrinsics.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_intrinsics.h
index b87413e12a..3c3948863c 100644
--- a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_intrinsics.h
+++ b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_intrinsics.h
@@ -513,6 +513,197 @@ __device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {
return __nv_cvta_generic_to_shared_impl(__ptr);
}
} // extern "C"
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
+__device__ inline unsigned __reduce_add_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_add(__mask, __value);
+}
+__device__ inline unsigned __reduce_min_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_umin(__mask, __value);
+}
+__device__ inline unsigned __reduce_max_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_umax(__mask, __value);
+}
+__device__ inline int __reduce_min_sync(unsigned __mask, int __value) {
+ return __nvvm_redux_sync_min(__mask, __value);
+}
+__device__ inline int __reduce_max_sync(unsigned __mask, int __value) {
+ return __nvvm_redux_sync_max(__mask, __value);
+}
+__device__ inline unsigned __reduce_or_sync(unsigned __mask, unsigned __value) {
+ return __nvvm_redux_sync_or(__mask, __value);
+}
+__device__ inline unsigned __reduce_and_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_and(__mask, __value);
+}
+__device__ inline unsigned __reduce_xor_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_xor(__mask, __value);
+}
+
+__device__ inline void __nv_memcpy_async_shared_global_4(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_4(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+__device__ inline void __nv_memcpy_async_shared_global_8(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_8(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+__device__ inline void __nv_memcpy_async_shared_global_16(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_16(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+
+__device__ inline void *
+__nv_associate_access_property(const void *__ptr, unsigned long long __prop) {
+ // TODO: it appears to provide compiler with some sort of a hint. We do not
+ // know what exactly it is supposed to do. However, CUDA headers suggest that
+ // just passing through __ptr should not affect correctness. They do so on
+ // pre-sm80 GPUs where this builtin is not available.
+ return (void*)__ptr;
+}
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
+__device__ inline unsigned __isCtaShared(const void *ptr) {
+ return __isShared(ptr);
+}
+
+__device__ inline unsigned __isClusterShared(const void *__ptr) {
+ return __nvvm_isspacep_shared_cluster(__ptr);
+}
+
+__device__ inline void *__cluster_map_shared_rank(const void *__ptr,
+ unsigned __rank) {
+ return __nvvm_mapa((void *)__ptr, __rank);
+}
+
+__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) {
+ return __nvvm_getctarank((void *)__ptr);
+}
+
+__device__ inline uint2
+__cluster_map_shared_multicast(const void *__ptr,
+ unsigned int __cluster_cta_mask) {
+ return make_uint2((unsigned)__cvta_generic_to_shared(__ptr),
+ __cluster_cta_mask);
+}
+
+__device__ inline unsigned __clusterDimIsSpecified() {
+ return __nvvm_is_explicit_cluster();
+}
+
+__device__ inline dim3 __clusterDim() {
+ return dim3(__nvvm_read_ptx_sreg_cluster_nctaid_x(),
+ __nvvm_read_ptx_sreg_cluster_nctaid_y(),
+ __nvvm_read_ptx_sreg_cluster_nctaid_z());
+}
+
+__device__ inline dim3 __clusterRelativeBlockIdx() {
+ return dim3(__nvvm_read_ptx_sreg_cluster_ctaid_x(),
+ __nvvm_read_ptx_sreg_cluster_ctaid_y(),
+ __nvvm_read_ptx_sreg_cluster_ctaid_z());
+}
+
+__device__ inline dim3 __clusterGridDimInClusters() {
+ return dim3(__nvvm_read_ptx_sreg_nclusterid_x(),
+ __nvvm_read_ptx_sreg_nclusterid_y(),
+ __nvvm_read_ptx_sreg_nclusterid_z());
+}
+
+__device__ inline dim3 __clusterIdx() {
+ return dim3(__nvvm_read_ptx_sreg_clusterid_x(),
+ __nvvm_read_ptx_sreg_clusterid_y(),
+ __nvvm_read_ptx_sreg_clusterid_z());
+}
+
+__device__ inline unsigned __clusterRelativeBlockRank() {
+ return __nvvm_read_ptx_sreg_cluster_ctarank();
+}
+
+__device__ inline unsigned __clusterSizeInBlocks() {
+ return __nvvm_read_ptx_sreg_cluster_nctarank();
+}
+
+__device__ inline void __cluster_barrier_arrive() {
+ __nvvm_barrier_cluster_arrive();
+}
+
+__device__ inline void __cluster_barrier_arrive_relaxed() {
+ __nvvm_barrier_cluster_arrive_relaxed();
+}
+
+__device__ inline void __cluster_barrier_wait() {
+ __nvvm_barrier_cluster_wait();
+}
+
+__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); }
+
+__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__("atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__(
+ "atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__(
+ "atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)
+ :);
+ return __ret;
+}
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
#endif // CUDA_VERSION >= 11000
#endif // defined(__CLANG_CUDA_INTRINSICS_H__)
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_libdevice_declares.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_libdevice_declares.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_libdevice_declares.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math.h
similarity index 99%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math.h
index e447590393..6166317f8f 100644
--- a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math.h
+++ b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math.h
@@ -36,7 +36,7 @@
// because the OpenMP overlay requires constexpr functions here but prior to
// c++14 void return functions could not be constexpr.
#pragma push_macro("__DEVICE_VOID__")
-#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
+#if defined(__OPENMP_NVPTX__) && defined(__cplusplus) && __cplusplus < 201402L
#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
#else
#define __DEVICE_VOID__ __DEVICE__
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math_forward_declares.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_math_forward_declares.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_math_forward_declares.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_runtime_wrapper.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_runtime_wrapper.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_runtime_wrapper.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_runtime_wrapper.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_cuda_texture_intrinsics.h b/third_party/clang/lib/clang/17.0.1/include/__clang_cuda_texture_intrinsics.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_cuda_texture_intrinsics.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_cuda_texture_intrinsics.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_cmath.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_cmath.h
similarity index 99%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_cmath.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_cmath.h
index d488db0a94..b52d6b7816 100644
--- a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_cmath.h
+++ b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_cmath.h
@@ -171,7 +171,7 @@ __DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); }
// Other functions.
__DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y,
_Float16 __z) {
- return __ocml_fma_f16(__x, __y, __z);
+ return __builtin_fmaf16(__x, __y, __z);
}
__DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) {
return __ocml_pown_f16(__base, __iexp);
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_libdevice_declares.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_libdevice_declares.h
similarity index 95%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_libdevice_declares.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_libdevice_declares.h
index be25f4b4a0..f15198b3d9 100644
--- a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_libdevice_declares.h
+++ b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_libdevice_declares.h
@@ -10,6 +10,10 @@
#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
+#if !defined(__HIPCC_RTC__) && __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -137,23 +141,6 @@ __device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
-
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_cos_f32(float __x) {
- return __builtin_amdgcn_cosf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_rcp_f32(float __x) {
- return __builtin_amdgcn_rcpf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_rsq_f32(float __x) {
- return __builtin_amdgcn_rsqf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_sin_f32(float __x) {
- return __builtin_amdgcn_sinf(__x);
-}
// END INTRINSICS
// END FLOAT
@@ -277,15 +264,6 @@ __device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
double);
-__device__ inline __attribute__((const)) double
-__llvm_amdgcn_rcp_f64(double __x) {
- return __builtin_amdgcn_rcp(__x);
-}
-__device__ inline __attribute__((const)) double
-__llvm_amdgcn_rsq_f64(double __x) {
- return __builtin_amdgcn_rsq(__x);
-}
-
__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
__device__ _Float16 __ocml_cos_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_cvtrtn_f16_f32(float);
@@ -305,7 +283,6 @@ __device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
__device__ _Float16 __ocml_sin_f16(_Float16);
@@ -316,8 +293,15 @@ __device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int);
typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
typedef short __2i16 __attribute__((ext_vector_type(2)));
+// We need to match C99's bool and get an i1 in the IR.
+#ifdef __cplusplus
+typedef bool __ockl_bool;
+#else
+typedef _Bool __ockl_bool;
+#endif
+
__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
- float c, bool s);
+ float c, __ockl_bool s);
__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
__device__ __2f16 __ocml_cos_2f16(__2f16);
@@ -332,11 +316,29 @@ __device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
+
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 560
+#define __DEPRECATED_SINCE_HIP_560(X) __attribute__((deprecated(X)))
+#else
+#define __DEPRECATED_SINCE_HIP_560(X)
+#endif
+
+// Deprecated, should be removed when rocm releases using it are no longer
+// relevant.
+__DEPRECATED_SINCE_HIP_560("use ((_Float16)1.0) / ")
+__device__ inline _Float16 __llvm_amdgcn_rcp_f16(_Float16 x) {
+ return ((_Float16)1.0f) / x;
+}
+
+__DEPRECATED_SINCE_HIP_560("use ((__2f16)1.0) / ")
__device__ inline __2f16
-__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
+__llvm_amdgcn_rcp_2f16(__2f16 __x)
{
- return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y));
+ return ((__2f16)1.0f) / __x;
}
+
+#undef __DEPRECATED_SINCE_HIP_560
+
__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
__device__ __2f16 __ocml_sin_2f16(__2f16);
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_math.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_math.h
similarity index 88%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_math.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_math.h
index 537dd0fca8..a47dda3327 100644
--- a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_math.h
+++ b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_math.h
@@ -182,10 +182,10 @@ __DEVICE__
float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
__DEVICE__
-float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+float ceilf(float __x) { return __builtin_ceilf(__x); }
__DEVICE__
-float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
+float copysignf(float __x, float __y) { return __builtin_copysignf(__x, __y); }
__DEVICE__
float cosf(float __x) { return __ocml_cos_f32(__x); }
@@ -221,10 +221,10 @@ __DEVICE__
float exp10f(float __x) { return __ocml_exp10_f32(__x); }
__DEVICE__
-float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+float exp2f(float __x) { return __builtin_exp2f(__x); }
__DEVICE__
-float expf(float __x) { return __ocml_exp_f32(__x); }
+float expf(float __x) { return __builtin_expf(__x); }
__DEVICE__
float expm1f(float __x) { return __ocml_expm1_f32(__x); }
@@ -239,33 +239,25 @@ __DEVICE__
float fdividef(float __x, float __y) { return __x / __y; }
__DEVICE__
-float floorf(float __x) { return __ocml_floor_f32(__x); }
+float floorf(float __x) { return __builtin_floorf(__x); }
__DEVICE__
float fmaf(float __x, float __y, float __z) {
- return __ocml_fma_f32(__x, __y, __z);
+ return __builtin_fmaf(__x, __y, __z);
}
__DEVICE__
-float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+float fmaxf(float __x, float __y) { return __builtin_fmaxf(__x, __y); }
__DEVICE__
-float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+float fminf(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
__DEVICE__
float frexpf(float __x, int *__nptr) {
- int __tmp;
-#ifdef __OPENMP_AMDGCN__
-#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
-#endif
- float __r =
- __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
- *__nptr = __tmp;
-
- return __r;
+ return __builtin_frexpf(__x, __nptr);
}
__DEVICE__
@@ -275,13 +267,13 @@ __DEVICE__
int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
__DEVICE__
-__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
+__RETURN_TYPE __finitef(float __x) { return __builtin_isfinite(__x); }
__DEVICE__
-__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
+__RETURN_TYPE __isinff(float __x) { return __builtin_isinf(__x); }
__DEVICE__
-__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
+__RETURN_TYPE __isnanf(float __x) { return __builtin_isnan(__x); }
__DEVICE__
float j0f(float __x) { return __ocml_j0_f32(__x); }
@@ -311,37 +303,37 @@ float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
}
__DEVICE__
-float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+float ldexpf(float __x, int __e) { return __builtin_amdgcn_ldexpf(__x, __e); }
__DEVICE__
float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
__DEVICE__
-long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+long long int llrintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
-long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+long long int llroundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
-float log10f(float __x) { return __ocml_log10_f32(__x); }
+float log10f(float __x) { return __builtin_log10f(__x); }
__DEVICE__
float log1pf(float __x) { return __ocml_log1p_f32(__x); }
__DEVICE__
-float log2f(float __x) { return __ocml_log2_f32(__x); }
+float log2f(float __x) { return __builtin_log2f(__x); }
__DEVICE__
float logbf(float __x) { return __ocml_logb_f32(__x); }
__DEVICE__
-float logf(float __x) { return __ocml_log_f32(__x); }
+float logf(float __x) { return __builtin_logf(__x); }
__DEVICE__
-long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+long int lrintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
-long int lroundf(float __x) { return __ocml_round_f32(__x); }
+long int lroundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float modff(float __x, float *__iptr) {
@@ -377,7 +369,7 @@ float nanf(const char *__tagp __attribute__((nonnull))) {
}
__DEVICE__
-float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+float nearbyintf(float __x) { return __builtin_nearbyintf(__x); }
__DEVICE__
float nextafterf(float __x, float __y) {
@@ -443,7 +435,7 @@ __DEVICE__
float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
__DEVICE__
-float rintf(float __x) { return __ocml_rint_f32(__x); }
+float rintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
float rnorm3df(float __x, float __y, float __z) {
@@ -468,22 +460,22 @@ float rnormf(int __dim,
}
__DEVICE__
-float roundf(float __x) { return __ocml_round_f32(__x); }
+float roundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
__DEVICE__
float scalblnf(float __x, long int __n) {
- return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+ return (__n < INT_MAX) ? __builtin_amdgcn_ldexpf(__x, __n)
: __ocml_scalb_f32(__x, __n);
}
__DEVICE__
-float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+float scalbnf(float __x, int __n) { return __builtin_amdgcn_ldexpf(__x, __n); }
__DEVICE__
-__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
+__RETURN_TYPE __signbitf(float __x) { return __builtin_signbitf(__x); }
__DEVICE__
void sincosf(float __x, float *__sinptr, float *__cosptr) {
@@ -529,7 +521,7 @@ __DEVICE__
float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
__DEVICE__
-float truncf(float __x) { return __ocml_trunc_f32(__x); }
+float truncf(float __x) { return __builtin_truncf(__x); }
__DEVICE__
float y0f(float __x) { return __ocml_y0_f32(__x); }
@@ -621,7 +613,7 @@ float __fmaf_rz(float __x, float __y, float __z) {
#else
__DEVICE__
float __fmaf_rn(float __x, float __y, float __z) {
- return __ocml_fma_f32(__x, __y, __z);
+ return __builtin_fmaf(__x, __y, __z);
}
#endif
@@ -654,7 +646,7 @@ float __frcp_rn(float __x) { return 1.0f / __x; }
#endif
__DEVICE__
-float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+float __frsqrt_rn(float __x) { return __builtin_amdgcn_rsqf(__x); }
#if defined OCML_BASIC_ROUNDED_OPERATIONS
__DEVICE__
@@ -739,11 +731,11 @@ __DEVICE__
double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
__DEVICE__
-double ceil(double __x) { return __ocml_ceil_f64(__x); }
+double ceil(double __x) { return __builtin_ceil(__x); }
__DEVICE__
double copysign(double __x, double __y) {
- return __ocml_copysign_f64(__x, __y);
+ return __builtin_copysign(__x, __y);
}
__DEVICE__
@@ -795,32 +787,25 @@ __DEVICE__
double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
__DEVICE__
-double floor(double __x) { return __ocml_floor_f64(__x); }
+double floor(double __x) { return __builtin_floor(__x); }
__DEVICE__
double fma(double __x, double __y, double __z) {
- return __ocml_fma_f64(__x, __y, __z);
+ return __builtin_fma(__x, __y, __z);
}
__DEVICE__
-double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+double fmax(double __x, double __y) { return __builtin_fmax(__x, __y); }
__DEVICE__
-double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+double fmin(double __x, double __y) { return __builtin_fmin(__x, __y); }
__DEVICE__
double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
__DEVICE__
double frexp(double __x, int *__nptr) {
- int __tmp;
-#ifdef __OPENMP_AMDGCN__
-#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
-#endif
- double __r =
- __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
- *__nptr = __tmp;
- return __r;
+ return __builtin_frexp(__x, __nptr);
}
__DEVICE__
@@ -830,13 +815,13 @@ __DEVICE__
int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
__DEVICE__
-__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
+__RETURN_TYPE __finite(double __x) { return __builtin_isfinite(__x); }
__DEVICE__
-__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
+__RETURN_TYPE __isinf(double __x) { return __builtin_isinf(__x); }
__DEVICE__
-__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
+__RETURN_TYPE __isnan(double __x) { return __builtin_isnan(__x); }
__DEVICE__
double j0(double __x) { return __ocml_j0_f64(__x); }
@@ -866,16 +851,16 @@ double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
}
__DEVICE__
-double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+double ldexp(double __x, int __e) { return __builtin_amdgcn_ldexp(__x, __e); }
__DEVICE__
double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
__DEVICE__
-long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+long long int llrint(double __x) { return __builtin_rint(__x); }
__DEVICE__
-long long int llround(double __x) { return __ocml_round_f64(__x); }
+long long int llround(double __x) { return __builtin_round(__x); }
__DEVICE__
double log(double __x) { return __ocml_log_f64(__x); }
@@ -893,10 +878,10 @@ __DEVICE__
double logb(double __x) { return __ocml_logb_f64(__x); }
__DEVICE__
-long int lrint(double __x) { return __ocml_rint_f64(__x); }
+long int lrint(double __x) { return __builtin_rint(__x); }
__DEVICE__
-long int lround(double __x) { return __ocml_round_f64(__x); }
+long int lround(double __x) { return __builtin_round(__x); }
__DEVICE__
double modf(double __x, double *__iptr) {
@@ -940,7 +925,7 @@ double nan(const char *__tagp) {
}
__DEVICE__
-double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+double nearbyint(double __x) { return __builtin_nearbyint(__x); }
__DEVICE__
double nextafter(double __x, double __y) {
@@ -1006,7 +991,7 @@ __DEVICE__
double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
__DEVICE__
-double rint(double __x) { return __ocml_rint_f64(__x); }
+double rint(double __x) { return __builtin_rint(__x); }
__DEVICE__
double rnorm(int __dim,
@@ -1031,21 +1016,21 @@ double rnorm4d(double __x, double __y, double __z, double __w) {
}
__DEVICE__
-double round(double __x) { return __ocml_round_f64(__x); }
+double round(double __x) { return __builtin_round(__x); }
__DEVICE__
double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
__DEVICE__
double scalbln(double __x, long int __n) {
- return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+ return (__n < INT_MAX) ? __builtin_amdgcn_ldexp(__x, __n)
: __ocml_scalb_f64(__x, __n);
}
__DEVICE__
-double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
+double scalbn(double __x, int __n) { return __builtin_amdgcn_ldexp(__x, __n); }
__DEVICE__
-__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
+__RETURN_TYPE __signbit(double __x) { return __builtin_signbit(__x); }
__DEVICE__
double sin(double __x) { return __ocml_sin_f64(__x); }
@@ -1091,7 +1076,7 @@ __DEVICE__
double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
__DEVICE__
-double trunc(double __x) { return __ocml_trunc_f64(__x); }
+double trunc(double __x) { return __builtin_trunc(__x); }
__DEVICE__
double y0(double __x) { return __ocml_y0_f64(__x); }
@@ -1258,7 +1243,7 @@ double __fma_rz(double __x, double __y, double __z) {
#else
__DEVICE__
double __fma_rn(double __x, double __y, double __z) {
- return __ocml_fma_f64(__x, __y, __z);
+ return __builtin_fma(__x, __y, __z);
}
#endif
// END INTRINSICS
@@ -1290,16 +1275,16 @@ __DEVICE__ int max(int __arg1, int __arg2) {
}
__DEVICE__
-float max(float __x, float __y) { return fmaxf(__x, __y); }
+float max(float __x, float __y) { return __builtin_fmaxf(__x, __y); }
__DEVICE__
-double max(double __x, double __y) { return fmax(__x, __y); }
+double max(double __x, double __y) { return __builtin_fmax(__x, __y); }
__DEVICE__
-float min(float __x, float __y) { return fminf(__x, __y); }
+float min(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
-double min(double __x, double __y) { return fmin(__x, __y); }
+double min(double __x, double __y) { return __builtin_fmin(__x, __y); }
#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
__host__ inline static int min(int __arg1, int __arg2) {
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_runtime_wrapper.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_runtime_wrapper.h
similarity index 85%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_runtime_wrapper.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_runtime_wrapper.h
index 0508731de1..e8817073ef 100644
--- a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_runtime_wrapper.h
+++ b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_runtime_wrapper.h
@@ -80,12 +80,25 @@ extern "C" {
#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
extern "C" __device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
extern "C" __device__ void __ockl_dm_dealloc(unsigned long long __addr);
+#if __has_feature(address_sanitizer)
+extern "C" __device__ unsigned long long __asan_malloc_impl(unsigned long long __size, unsigned long long __pc);
+extern "C" __device__ void __asan_free_impl(unsigned long long __addr, unsigned long long __pc);
+__attribute__((noinline, weak)) __device__ void *malloc(__hip_size_t __size) {
+ unsigned long long __pc = (unsigned long long)__builtin_return_address(0);
+ return (void *)__asan_malloc_impl(__size, __pc);
+}
+__attribute__((noinline, weak)) __device__ void free(void *__ptr) {
+ unsigned long long __pc = (unsigned long long)__builtin_return_address(0);
+ __asan_free_impl((unsigned long long)__ptr, __pc);
+}
+#else
__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
return (void *) __ockl_dm_alloc(__size);
}
__attribute__((weak)) inline __device__ void free(void *__ptr) {
__ockl_dm_dealloc((unsigned long long)__ptr);
}
+#endif // __has_feature(address_sanitizer)
#else // HIP version check
#if __HIP_ENABLE_DEVICE_MALLOC__
__device__ void *__hip_malloc(__hip_size_t __size);
diff --git a/third_party/clang/lib/clang/16.0.0/include/__clang_hip_stdlib.h b/third_party/clang/lib/clang/17.0.1/include/__clang_hip_stdlib.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__clang_hip_stdlib.h
rename to third_party/clang/lib/clang/17.0.1/include/__clang_hip_stdlib.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__stddef_max_align_t.h b/third_party/clang/lib/clang/17.0.1/include/__stddef_max_align_t.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__stddef_max_align_t.h
rename to third_party/clang/lib/clang/17.0.1/include/__stddef_max_align_t.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__wmmintrin_aes.h b/third_party/clang/lib/clang/17.0.1/include/__wmmintrin_aes.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__wmmintrin_aes.h
rename to third_party/clang/lib/clang/17.0.1/include/__wmmintrin_aes.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/__wmmintrin_pclmul.h b/third_party/clang/lib/clang/17.0.1/include/__wmmintrin_pclmul.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/__wmmintrin_pclmul.h
rename to third_party/clang/lib/clang/17.0.1/include/__wmmintrin_pclmul.h
diff --git a/third_party/clang/lib/clang/17.0.1/include/adxintrin.h b/third_party/clang/lib/clang/17.0.1/include/adxintrin.h
new file mode 100644
index 0000000000..20f6211e56
--- /dev/null
+++ b/third_party/clang/lib/clang/17.0.1/include/adxintrin.h
@@ -0,0 +1,227 @@
+/*===---- adxintrin.h - ADX intrinsics -------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use directly; include instead."
+#endif
+
+#ifndef __ADXINTRIN_H
+#define __ADXINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+/* Use C++ inline semantics in C++, GNU inline for C mode. */
+#if defined(__cplusplus)
+#define __INLINE __inline
+#else
+#define __INLINE static __inline
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Intrinsics that are available only if __ADX__ is defined. */
+
+/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c ADCX instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 32-bit unsigned addend.
+/// \param __y
+/// A 32-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char
+ __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+ _addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
+ unsigned int *__p) {
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c ADCX instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 64-bit unsigned addend.
+/// \param __y
+/// A 64-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char
+ __attribute__((__always_inline__, __nodebug__, __target__("adx")))
+ _addcarryx_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p) {
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
+}
+#endif
+
+/* Intrinsics that are also available if __ADX__ is undefined. */
+
+/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 32-bit unsigned addend.
+/// \param __y
+/// A 32-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarry_u32(unsigned char __cf,
+ unsigned int __x,
+ unsigned int __y,
+ unsigned int *__p) {
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 64-bit unsigned addend.
+/// \param __y
+/// A 64-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS
+_addcarry_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p) {
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
+}
+#endif
+
+/// Adds unsigned 32-bit integer \a __y to 0 or 1 as indicated by the carry
+/// flag \a __cf, and subtracts the result from unsigned 32-bit integer
+/// \a __x. Stores the unsigned 32-bit difference in the memory at \a __p,
+/// and returns the 8-bit carry-out (carry or overflow flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x - (__y + temp))
+/// result := CF
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c SBB instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// The 32-bit unsigned minuend.
+/// \param __y
+/// The 32-bit unsigned subtrahend.
+/// \param __p
+/// Pointer to memory for storing the difference.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS _subborrow_u32(unsigned char __cf,
+ unsigned int __x,
+ unsigned int __y,
+ unsigned int *__p) {
+ return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+/// Adds unsigned 64-bit integer \a __y to 0 or 1 as indicated by the carry
+/// flag \a __cf, and subtracts the result from unsigned 64-bit integer
+/// \a __x. Stores the unsigned 64-bit difference in the memory at \a __p,
+/// and returns the 8-bit carry-out (carry or overflow flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x - (__y + temp))
+/// result := CF
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// The 64-bit unsigned minuend.
+/// \param __y
+/// The 64-bit unsigned subtrahend.
+/// \param __p
+/// Pointer to memory for storing the difference.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS
+_subborrow_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p) {
+ return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
+}
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __ADXINTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/altivec.h b/third_party/clang/lib/clang/17.0.1/include/altivec.h
similarity index 98%
rename from third_party/clang/lib/clang/16.0.0/include/altivec.h
rename to third_party/clang/lib/clang/17.0.1/include/altivec.h
index f50466ec96..c036f5ebba 100644
--- a/third_party/clang/lib/clang/16.0.0/include/altivec.h
+++ b/third_party/clang/lib/clang/17.0.1/include/altivec.h
@@ -3202,71 +3202,79 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
// the XL-compatible signatures are used for those functions.
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctf(__a, __b) \
- _Generic( \
- (__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)), \
- vector unsigned long long \
- : (vector float)(__builtin_vsx_xvcvuxdsp( \
- (vector unsigned long long)(__a)) * \
- (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
- vector signed long long \
- : (vector float)(__builtin_vsx_xvcvsxdsp( \
- (vector signed long long)(__a)) * \
- (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
+ _Generic((__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)), \
+ vector unsigned long long: ( \
+ vector float)(__builtin_vsx_xvcvuxdsp( \
+ (vector unsigned long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - \
+ ((__b)&0x1F)) \
+ << 23)), \
+ vector signed long long: ( \
+ vector float)(__builtin_vsx_xvcvsxdsp( \
+ (vector signed long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - \
+ ((__b)&0x1F)) \
+ << 23)))
#else // __XL_COMPAT_ALTIVEC__
-#define vec_ctf(__a, __b) \
- _Generic( \
- (__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)), \
- vector unsigned long long \
- : (vector float)(__builtin_convertvector( \
- (vector unsigned long long)(__a), vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - \
- (__b)) \
- << 52)), \
- vector signed long long \
- : (vector float)(__builtin_convertvector((vector signed long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - \
- (__b)) \
- << 52)))
+#define vec_ctf(__a, __b) \
+ _Generic( \
+ (__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)), \
+ vector unsigned long long: ( \
+ vector float)(__builtin_convertvector( \
+ (vector unsigned long long)(__a), vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector signed long long: ( \
+ vector float)(__builtin_convertvector( \
+ (vector signed long long)(__a), vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)))
#endif // __XL_COMPAT_ALTIVEC__
#else
#define vec_ctf(__a, __b) \
- _Generic((__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)))
+ _Generic((__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)))
#endif
/* vec_ctd */
#ifdef __VSX__
#define vec_ctd(__a, __b) \
- _Generic((__a), vector signed int \
- : (vec_doublee((vector signed int)(__a)) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector unsigned int \
- : (vec_doublee((vector unsigned int)(__a)) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector unsigned long long \
- : (__builtin_convertvector((vector unsigned long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector signed long long \
- : (__builtin_convertvector((vector signed long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)))
+ _Generic((__a), \
+ vector signed int: ( \
+ vec_doublee((vector signed int)(__a)) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector unsigned int: ( \
+ vec_doublee((vector unsigned int)(__a)) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector unsigned long long: ( \
+ __builtin_convertvector((vector unsigned long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector signed long long: ( \
+ __builtin_convertvector((vector signed long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)))
#endif // __VSX__
/* vec_vcfsx */
@@ -3281,27 +3289,27 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
- _Generic((__a), vector float \
- : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \
- (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector signed int)__builtin_altivec_vctsxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
(vector signed long long)__builtin_vsx_xvcvdpsxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
- _Generic((__a), vector float \
- : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \
- (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector signed int)__builtin_altivec_vctsxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
(vector signed long long)__builtin_convertvector( \
__ret, vector signed long long); \
@@ -3320,27 +3328,27 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
- _Generic((__a), vector float \
- : (vector unsigned int)__builtin_altivec_vctuxs( \
- (vector float)(__a), (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector unsigned int)__builtin_altivec_vctuxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
(vector unsigned long long)__builtin_vsx_xvcvdpuxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
- _Generic((__a), vector float \
- : (vector unsigned int)__builtin_altivec_vctuxs( \
- (vector float)(__a), (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector unsigned int)__builtin_altivec_vctuxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
(vector unsigned long long)__builtin_convertvector( \
__ret, vector unsigned long long); \
@@ -3355,60 +3363,62 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#define vec_ctsl(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspsxds( \
- __builtin_vsx_xxsldwi(__ret, __ret, 1)); \
- }), \
- vector double \
- : __extension__({ \
- vector double __ret = \
- (vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
- << 52); \
- __builtin_convertvector(__ret, vector signed long long); \
- }))
+ _Generic( \
+ (__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspsxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
+ << 52); \
+ __builtin_convertvector(__ret, vector signed long long); \
+ }))
/* vec_ctul */
#define vec_ctul(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspuxds( \
- __builtin_vsx_xxsldwi(__ret, __ret, 1)); \
- }), \
- vector double \
- : __extension__({ \
- vector double __ret = \
- (vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
- << 52); \
- __builtin_convertvector(__ret, vector unsigned long long); \
- }))
+ _Generic( \
+ (__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspuxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
+ << 52); \
+ __builtin_convertvector(__ret, vector unsigned long long); \
+ }))
#endif
#else // __LITTLE_ENDIAN__
/* vec_ctsl */
#ifdef __VSX__
#define vec_ctsl(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspsxds(__ret); \
- }), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspsxds(__ret); \
+ }), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector signed long long); \
}))
@@ -3420,14 +3430,16 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) \
+ << 23); \
__builtin_vsx_xvcvspuxds(__ret); \
}), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector unsigned long long); \
}))
diff --git a/third_party/clang/lib/clang/16.0.0/include/ammintrin.h b/third_party/clang/lib/clang/17.0.1/include/ammintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/ammintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/ammintrin.h
diff --git a/third_party/clang/lib/clang/17.0.1/include/amxcomplexintrin.h b/third_party/clang/lib/clang/17.0.1/include/amxcomplexintrin.h
new file mode 100644
index 0000000000..84ef972fca
--- /dev/null
+++ b/third_party/clang/lib/clang/17.0.1/include/amxcomplexintrin.h
@@ -0,0 +1,169 @@
+/*===--------- amxcomplexintrin.h - AMXCOMPLEX intrinsics -*- C++ -*---------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===------------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use directly; include instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AMX_COMPLEXINTRIN_H
+#define __AMX_COMPLEXINTRIN_H
+#ifdef __x86_64__
+
+#define __DEFAULT_FN_ATTRS_COMPLEX \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-complex")))
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles \a a and \a b is interpreted as a complex number
+/// with FP16 real part and FP16 imaginary part.
+/// Calculates the imaginary part of the result. For each possible combination
+/// of (row of \a a, column of \a b), it performs a set of multiplication
+/// and accumulations on all corresponding complex numbers (one from \a a
+/// and one from \a b). The imaginary part of the \a a element is multiplied
+/// with the real part of the corresponding \a b element, and the real part
+/// of the \a a element is multiplied with the imaginary part of the
+/// corresponding \a b elements. The two accumulated results are added, and
+/// then accumulated into the corresponding row and column of \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// void _tile_cmmimfp16ps(__tile dst, __tile a, __tile b);
+/// \endcode
+///
+/// \code{.operation}
+/// FOR m := 0 TO dst.rows - 1
+/// tmp := dst.row[m]
+/// FOR k := 0 TO (a.colsb / 4) - 1
+/// FOR n := 0 TO (dst.colsb / 4) - 1
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1])
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0])
+/// ENDFOR
+/// ENDFOR
+/// write_row_and_zero(dst, m, tmp, dst.colsb)
+/// ENDFOR
+/// zero_upper_rows(dst, dst.rows)
+/// zero_tileconfig_start()
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param a
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param b
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_cmmimfp16ps(dst, a, b) __builtin_ia32_tcmmimfp16ps(dst, a, b)
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles \a a and \a b is interpreted as a complex number
+/// with FP16 real part and FP16 imaginary part.
+/// Calculates the real part of the result. For each possible combination
+/// of (row of \a a, column of \a b), it performs a set of multiplication
+/// and accumulations on all corresponding complex numbers (one from \a a
+/// and one from \a b). The real part of the \a a element is multiplied
+/// with the real part of the corresponding \a b element, and the negated
+/// imaginary part of the \a a element is multiplied with the imaginary
+/// part of the corresponding \a b elements. The two accumulated results
+/// are added, and then accumulated into the corresponding row and column
+/// of \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// void _tile_cmmrlfp16ps(__tile dst, __tile a, __tile b);
+/// \endcode
+///
+/// \code{.operation}
+/// FOR m := 0 TO dst.rows - 1
+/// tmp := dst.row[m]
+/// FOR k := 0 TO (a.colsb / 4) - 1
+/// FOR n := 0 TO (dst.colsb / 4) - 1
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0])
+/// tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1])
+/// ENDFOR
+/// ENDFOR
+/// write_row_and_zero(dst, m, tmp, dst.colsb)
+/// ENDFOR
+/// zero_upper_rows(dst, dst.rows)
+/// zero_tileconfig_start()
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param a
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param b
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b)
+
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
+_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tcmmimfp16ps_internal(m, n, k, dst, src1, src2);
+}
+
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
+_tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tcmmrlfp16ps_internal(m, n, k, dst, src1, src2);
+}
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles src0 and src1 is interpreted as a complex number with
+/// FP16 real part and FP16 imaginary part.
+/// This function calculates the imaginary part of the result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the TCMMIMFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_COMPLEX
+static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col,
+ dst->tile, src0.tile, src1.tile);
+}
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles src0 and src1 is interpreted as a complex number with
+/// FP16 real part and FP16 imaginary part.
+/// This function calculates the real part of the result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the TCMMRLFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_COMPLEX
+static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col,
+ dst->tile, src0.tile, src1.tile);
+}
+
+#endif // __x86_64__
+#endif // __AMX_COMPLEXINTRIN_H
diff --git a/third_party/clang/lib/clang/16.0.0/include/amxfp16intrin.h b/third_party/clang/lib/clang/17.0.1/include/amxfp16intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/amxfp16intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/amxfp16intrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/amxintrin.h b/third_party/clang/lib/clang/17.0.1/include/amxintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/amxintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/amxintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/arm64intr.h b/third_party/clang/lib/clang/17.0.1/include/arm64intr.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/arm64intr.h
rename to third_party/clang/lib/clang/17.0.1/include/arm64intr.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/arm_acle.h b/third_party/clang/lib/clang/17.0.1/include/arm_acle.h
similarity index 97%
rename from third_party/clang/lib/clang/16.0.0/include/arm_acle.h
rename to third_party/clang/lib/clang/17.0.1/include/arm_acle.h
index e086f1f02d..c208512bab 100644
--- a/third_party/clang/lib/clang/16.0.0/include/arm_acle.h
+++ b/third_party/clang/lib/clang/17.0.1/include/arm_acle.h
@@ -138,28 +138,32 @@ __rorl(unsigned long __x, uint32_t __y) {
/* CLZ */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clz(uint32_t __t) {
- return (uint32_t)__builtin_clz(__t);
+ return __builtin_arm_clz(__t);
}
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clzl(unsigned long __t) {
- return (unsigned long)__builtin_clzl(__t);
+#if __SIZEOF_LONG__ == 4
+ return __builtin_arm_clz(__t);
+#else
+ return __builtin_arm_clz64(__t);
+#endif
}
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clzll(uint64_t __t) {
- return (uint64_t)__builtin_clzll(__t);
+ return __builtin_arm_clz64(__t);
}
/* CLS */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__cls(uint32_t __t) {
return __builtin_arm_cls(__t);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clsl(unsigned long __t) {
#if __SIZEOF_LONG__ == 4
return __builtin_arm_cls(__t);
@@ -168,7 +172,7 @@ __clsl(unsigned long __t) {
#endif
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clsll(uint64_t __t) {
return __builtin_arm_cls64(__t);
}
diff --git a/third_party/clang/lib/clang/16.0.0/include/arm_cmse.h b/third_party/clang/lib/clang/17.0.1/include/arm_cmse.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/arm_cmse.h
rename to third_party/clang/lib/clang/17.0.1/include/arm_cmse.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/arm_neon_sve_bridge.h b/third_party/clang/lib/clang/17.0.1/include/arm_neon_sve_bridge.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/arm_neon_sve_bridge.h
rename to third_party/clang/lib/clang/17.0.1/include/arm_neon_sve_bridge.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/armintr.h b/third_party/clang/lib/clang/17.0.1/include/armintr.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/armintr.h
rename to third_party/clang/lib/clang/17.0.1/include/armintr.h
diff --git a/third_party/clang/lib/clang/17.0.1/include/avx2intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx2intrin.h
new file mode 100644
index 0000000000..8f2de05674
--- /dev/null
+++ b/third_party/clang/lib/clang/17.0.1/include/avx2intrin.h
@@ -0,0 +1,5263 @@
+/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use directly; include instead."
+#endif
+
+#ifndef __AVX2INTRIN_H
+#define __AVX2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128)))
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+/// Computes sixteen sum of absolute difference (SAD) operations on sets of
+/// four unsigned 8-bit integers from the 256-bit integer vectors \a X and
+/// \a Y.
+///
+/// Eight SAD results are computed using the lower half of the input
+/// vectors, and another eight using the upper half. These 16-bit values
+/// are returned in the lower and upper halves of the 256-bit result,
+/// respectively.
+///
+/// A single SAD operation selects four bytes from \a X and four bytes from
+/// \a Y as input. It computes the differences between each \a X byte and
+/// the corresponding \a Y byte, takes the absolute value of each
+/// difference, and sums these four values to form one 16-bit result. The
+/// intrinsic computes 16 of these results with different sets of input
+/// bytes.
+///
+/// For each set of eight results, the SAD operations use the same four
+/// bytes from \a Y; the starting bit position for these four bytes is
+/// specified by \a M[1:0] times 32. The eight operations use successive
+/// sets of four bytes from \a X; the starting bit position for the first
+/// set of four bytes is specified by \a M[2] times 32. These bit positions
+/// are all relative to the 128-bit lane for each set of eight operations.
+///
+/// \code{.operation}
+/// r := 0
+/// FOR i := 0 TO 1
+/// j := i*3
+/// Ybase := M[j+1:j]*32 + i*128
+/// Xbase := M[j+2]*32 + i*128
+/// FOR k := 0 TO 3
+/// temp0 := ABS(X[Xbase+7:Xbase] - Y[Ybase+7:Ybase])
+/// temp1 := ABS(X[Xbase+15:Xbase+8] - Y[Ybase+15:Ybase+8])
+/// temp2 := ABS(X[Xbase+23:Xbase+16] - Y[Ybase+23:Ybase+16])
+/// temp3 := ABS(X[Xbase+31:Xbase+24] - Y[Ybase+31:Ybase+24])
+/// result[r+15:r] := temp0 + temp1 + temp2 + temp3
+/// Xbase := Xbase + 8
+/// r := r + 16
+/// ENDFOR
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_mpsadbw_epu8(__m256i X, __m256i Y, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VMPSADBW instruction.
+///
+/// \param X
+/// A 256-bit integer vector containing one of the inputs.
+/// \param Y
+/// A 256-bit integer vector containing one of the inputs.
+/// \param M
+/// An unsigned immediate value specifying the starting positions of the
+/// bytes to operate on.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+#define _mm256_mpsadbw_epu8(X, Y, M) \
+ ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
+ (__v32qi)(__m256i)(Y), (int)(M)))
+
+/// Computes the absolute value of each signed byte in the 256-bit integer
+/// vector \a __a and returns each value in the corresponding byte of
+/// the result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPABSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_abs_epi8(__m256i __a)
+{
+ return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
+}
+
+/// Computes the absolute value of each signed 16-bit element in the 256-bit
+/// vector of [16 x i16] in \a __a and returns each value in the
+/// corresponding element of the result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPABSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_abs_epi16(__m256i __a)
+{
+ return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
+}
+
+/// Computes the absolute value of each signed 32-bit element in the 256-bit
+/// vector of [8 x i32] in \a __a and returns each value in the
+/// corresponding element of the result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPABSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_abs_epi32(__m256i __a)
+{
+ return (__m256i)__builtin_elementwise_abs((__v8si)__a);
+}
+
+/// Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit
+/// integers using signed saturation, and returns the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*8
+/// result[7+k:k] := SATURATE8(__a[15+j:j])
+/// result[71+k:64+k] := SATURATE8(__b[15+j:j])
+/// result[135+k:128+k] := SATURATE8(__a[143+j:128+j])
+/// result[199+k:192+k] := SATURATE8(__b[143+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPACKSSWB instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [16 x i16] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_packs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Converts the elements of two 256-bit vectors of [8 x i32] to 16-bit
+/// integers using signed saturation, and returns the resulting 256-bit
+/// vector of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// k := i*16
+/// result[15+k:k] := SATURATE16(__a[31+j:j])
+/// result[79+k:64+k] := SATURATE16(__b[31+j:j])
+/// result[143+k:128+k] := SATURATE16(__a[159+j:128+j])
+/// result[207+k:192+k] := SATURATE16(__b[159+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPACKSSDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [8 x i32] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_packs_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
+}
+
+/// Converts elements from two 256-bit vectors of [16 x i16] to 8-bit integers
+/// using unsigned saturation, and returns the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*8
+/// result[7+k:k] := SATURATE8U(__a[15+j:j])
+/// result[71+k:64+k] := SATURATE8U(__b[15+j:j])
+/// result[135+k:128+k] := SATURATE8U(__a[143+j:128+j])
+/// result[199+k:192+k] := SATURATE8U(__b[143+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPACKUSWB instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [16 x i16] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_packus_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Converts elements from two 256-bit vectors of [8 x i32] to 16-bit integers
+/// using unsigned saturation, and returns the resulting 256-bit vector of
+/// [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// k := i*16
+/// result[15+k:k] := SATURATE16U(__V1[31+j:j])
+/// result[79+k:64+k] := SATURATE16U(__V2[31+j:j])
+/// result[143+k:128+k] := SATURATE16U(__V1[159+j:128+j])
+/// result[207+k:192+k] := SATURATE16U(__V2[159+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPACKUSDW instruction.
+///
+/// \param __V1
+/// A 256-bit vector of [8 x i32] used to generate result[63:0] and
+/// result[191:128].
+/// \param __V2
+/// A 256-bit vector of [8 x i32] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_packus_epi32(__m256i __V1, __m256i __V2)
+{
+ return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
+}
+
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors and returns the lower 8 bits of each sum in the corresponding
+/// byte of the 256-bit integer vector result (overflow is ignored).
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPADDB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_add_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qu)__a + (__v32qu)__b);
+}
+
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] and returns the lower 16 bits of each sum in the
+/// corresponding element of the [16 x i16] result (overflow is ignored).
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPADDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_add_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hu)__a + (__v16hu)__b);
+}
+
+/// Adds 32-bit integers from corresponding elements of two 256-bit vectors of
+/// [8 x i32] and returns the lower 32 bits of each sum in the corresponding
+/// element of the [8 x i32] result (overflow is ignored).
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPADDD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_add_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a + (__v8su)__b);
+}
+
+/// Adds 64-bit integers from corresponding elements of two 256-bit vectors of
+/// [4 x i64] and returns the lower 64 bits of each sum in the corresponding
+/// element of the [4 x i64] result (overflow is ignored).
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPADDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_add_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a + (__v4du)__b);
+}
+
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using signed saturation, and returns each sum in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPADDSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_adds_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
+}
+
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] using signed saturation, and returns the [16 x i16] result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPADDSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_adds_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using unsigned saturation, and returns each sum in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPADDUSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_adds_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
+}
+
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] using unsigned saturation, and returns the [16 x i16] result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPADDUSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_adds_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
+}
+
+/// Uses the lower half of the 256-bit vector \a a as the upper half of a
+/// temporary 256-bit value, and the lower half of the 256-bit vector \a b
+/// as the lower half of the temporary value. Right-shifts the temporary
+/// value by \a n bytes, and uses the lower 16 bytes of the shifted value
+/// as the lower 16 bytes of the result. Uses the upper halves of \a a and
+/// \a b to make another temporary value, right shifts by \a n, and uses
+/// the lower 16 bytes of the shifted value as the upper 16 bytes of the
+/// result.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_alignr_epi8(__m256i a, __m256i b, const int n);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPALIGNR instruction.
+///
+/// \param a
+/// A 256-bit integer vector containing source values.
+/// \param b
+/// A 256-bit integer vector containing source values.
+/// \param n
+/// An immediate value specifying the number of bytes to shift.
+/// \returns A 256-bit integer vector containing the result.
+#define _mm256_alignr_epi8(a, b, n) \
+ ((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (n)))
+
+/// Computes the bitwise AND of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPAND instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_and_si256(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a & (__v4du)__b);
+}
+
+/// Computes the bitwise AND of the 256-bit integer vector in \a __b with
+/// the bitwise NOT of the 256-bit integer vector in \a __a.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPANDN instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_andnot_si256(__m256i __a, __m256i __b)
+{
+ return (__m256i)(~(__v4du)__a & (__v4du)__b);
+}
+
+/// Computes the averages of the corresponding unsigned bytes in the two
+/// 256-bit integer vectors in \a __a and \a __b and returns each
+/// average in the corresponding byte of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] + __b[j+7:j] + 1) >> 1
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPAVGB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_avg_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
+}
+
+/// Computes the averages of the corresponding unsigned 16-bit integers in
+/// the two 256-bit vectors of [16 x i16] in \a __a and \a __b and returns
+/// each average in the corresponding element of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] + __b[j+15:j] + 1) >> 1
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPAVGW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_avg_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Merges 8-bit integer values from either of the two 256-bit vectors
+/// \a __V1 or \a __V2, as specified by the 256-bit mask \a __M and returns
+/// the resulting 256-bit integer vector.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// IF __M[7+i] == 0
+/// result[7+j:j] := __V1[7+j:j]
+/// ELSE
+/// result[7+j:j] := __V2[7+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBLENDVB instruction.
+///
+/// \param __V1
+/// A 256-bit integer vector containing source values.
+/// \param __V2
+/// A 256-bit integer vector containing source values.
+/// \param __M
+/// A 256-bit integer vector, with bit [7] of each byte specifying the
+/// source for each corresponding byte of the result. When the mask bit
+/// is 0, the byte is copied from \a __V1; otherwise, it is copied from
+/// \a __V2.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
+ (__v32qi)__M);
+}
+
+/// Merges 16-bit integer values from either of the two 256-bit vectors
+/// \a V1 or \a V2, as specified by the immediate integer operand \a M,
+/// and returns the resulting 256-bit vector of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// IF M[i] == 0
+/// result[7+j:j] := V1[7+j:j]
+/// result[135+j:128+j] := V1[135+j:128+j]
+/// ELSE
+/// result[7+j:j] := V2[7+j:j]
+/// result[135+j:128+j] := V2[135+j:128+j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_blend_epi16(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDW instruction.
+///
+/// \param V1
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param V2
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [7:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is copied from \a V2.
+/// \a M[0] determines the source for elements 0 and 8, \a M[1] for
+/// elements 1 and 9, and so forth.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+#define _mm256_blend_epi16(V1, V2, M) \
+ ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
+ (__v16hi)(__m256i)(V2), (int)(M)))
+
+/// Compares corresponding bytes in the 256-bit integer vectors in \a __a and
+/// \a __b for equality and returns the outcomes in the corresponding
+/// bytes of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] == __b[j+7:j]) ? 0xFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPCMPEQB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the inputs.
+/// \param __b
+/// A 256-bit integer vector containing one of the inputs.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qi)__a == (__v32qi)__b);
+}
+
+/// Compares corresponding elements in the 256-bit vectors of [16 x i16] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] == __b[j+15:j]) ? 0xFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPCMPEQW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a == (__v16hi)__b);
+}
+
+/// Compares corresponding elements in the 256-bit vectors of [8 x i32] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := (__a[j+31:j] == __b[j+31:j]) ? 0xFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPCMPEQD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a == (__v8si)__b);
+}
+
+/// Compares corresponding elements in the 256-bit vectors of [4 x i64] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := (__a[j+63:j] == __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPCMPEQQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4di)__a == (__v4di)__b);
+}
+
+/// Compares corresponding signed bytes in the 256-bit integer vectors in
+/// \a __a and \a __b for greater-than and returns the outcomes in the
+/// corresponding bytes of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] > __b[j+7:j]) ? 0xFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPCMPGTB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the inputs.
+/// \param __b
+/// A 256-bit integer vector containing one of the inputs.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
+{
+ /* This function always performs a signed comparison, but __v32qi is a char
+ which may be signed or unsigned, so use __v32qs. */
+ return (__m256i)((__v32qs)__a > (__v32qs)__b);
+}
+
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [16 x i16] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] > __b[j+15:j]) ? 0xFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPCMPGTW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hi)__a > (__v16hi)__b);
+}
+
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [8 x i32] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := (__a[j+31:j] > __b[j+31:j]) ? 0xFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPCMPGTD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8si)__a > (__v8si)__b);
+}
+
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [4 x i64] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := (__a[j+63:j] > __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPCMPGTQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4di)__a > (__v4di)__b);
+}
+
+/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] and returns the lower 16 bits of each sum in an
+/// element of the [16 x i16] result (overflow is ignored). Sums from
+/// \a __a are returned in the lower 64 bits of each 128-bit half of the
+/// result; sums from \a __b are returned in the upper 64 bits of each
+/// 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := __a[j+15:j] + __a[j+31:j+16]
+/// result[j+31:j+16] := __a[j+47:j+32] + __a[j+63:j+48]
+/// result[j+47:j+32] := __a[j+79:j+64] + __a[j+95:j+80]
+/// result[j+63:j+48] := __a[j+111:j+96] + __a[j+127:j+112]
+/// result[j+79:j+64] := __b[j+15:j] + __b[j+31:j+16]
+/// result[j+95:j+80] := __b[j+47:j+32] + __b[j+63:j+48]
+/// result[j+111:j+96] := __b[j+79:j+64] + __b[j+95:j+80]
+/// result[j+127:j+112] := __b[j+111:j+96] + __b[j+127:j+112]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPHADDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_hadd_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Horizontally adds the adjacent pairs of 32-bit integers from two 256-bit
+/// vectors of [8 x i32] and returns the lower 32 bits of each sum in an
+/// element of the [8 x i32] result (overflow is ignored). Sums from \a __a
+/// are returned in the lower 64 bits of each 128-bit half of the result;
+/// sums from \a __b are returned in the upper 64 bits of each 128-bit half
+/// of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+31:j] := __a[j+31:j] + __a[j+63:j+32]
+/// result[j+63:j+32] := __a[j+95:j+64] + __a[j+127:j+96]
+/// result[j+95:j+64] := __b[j+31:j] + __b[j+63:j+32]
+/// result[j+127:j+96] := __b[j+95:j+64] + __b[j+127:j+96]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPHADDD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_hadd_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
+}
+
+/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] using signed saturation and returns each sum in
+/// an element of the [16 x i16] result. Sums from \a __a are returned in
+/// the lower 64 bits of each 128-bit half of the result; sums from \a __b
+/// are returned in the upper 64 bits of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := SATURATE16(__a[j+15:j] + __a[j+31:j+16])
+/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] + __a[j+63:j+48])
+/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] + __a[j+95:j+80])
+/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] + __a[j+127:j+112])
+/// result[j+79:j+64] := SATURATE16(__b[j+15:j] + __b[j+31:j+16])
+/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] + __b[j+63:j+48])
+/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] + __b[j+95:j+80])
+/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] + __b[j+127:j+112])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPHADDSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_hadds_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] and returns the lower 16 bits of each difference
+/// in an element of the [16 x i16] result (overflow is ignored).
+/// Differences from \a __a are returned in the lower 64 bits of each
+/// 128-bit half of the result; differences from \a __b are returned in the
+/// upper 64 bits of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := __a[j+15:j] - __a[j+31:j+16]
+/// result[j+31:j+16] := __a[j+47:j+32] - __a[j+63:j+48]
+/// result[j+47:j+32] := __a[j+79:j+64] - __a[j+95:j+80]
+/// result[j+63:j+48] := __a[j+111:j+96] - __a[j+127:j+112]
+/// result[j+79:j+64] := __b[j+15:j] - __b[j+31:j+16]
+/// result[j+95:j+80] := __b[j+47:j+32] - __b[j+63:j+48]
+/// result[j+111:j+96] := __b[j+79:j+64] - __b[j+95:j+80]
+/// result[j+127:j+112] := __b[j+111:j+96] - __b[j+127:j+112]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPHSUBW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_hsub_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Horizontally subtracts adjacent pairs of 32-bit integers from two 256-bit
+/// vectors of [8 x i32] and returns the lower 32 bits of each difference in
+/// an element of the [8 x i32] result (overflow is ignored). Differences
+/// from \a __a are returned in the lower 64 bits of each 128-bit half of
+/// the result; differences from \a __b are returned in the upper 64 bits
+/// of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+31:j] := __a[j+31:j] - __a[j+63:j+32]
+/// result[j+63:j+32] := __a[j+95:j+64] - __a[j+127:j+96]
+/// result[j+95:j+64] := __b[j+31:j] - __b[j+63:j+32]
+/// result[j+127:j+96] := __b[j+95:j+64] - __b[j+127:j+96]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPHSUBD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_hsub_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
+}
+
+/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] using signed saturation and returns each sum in
+/// an element of the [16 x i16] result. Differences from \a __a are
+/// returned in the lower 64 bits of each 128-bit half of the result;
+/// differences from \a __b are returned in the upper 64 bits of each
+/// 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := SATURATE16(__a[j+15:j] - __a[j+31:j+16])
+/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] - __a[j+63:j+48])
+/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] - __a[j+95:j+80])
+/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] - __a[j+127:j+112])
+/// result[j+79:j+64] := SATURATE16(__b[j+15:j] - __b[j+31:j+16])
+/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] - __b[j+63:j+48])
+/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] - __b[j+95:j+80])
+/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] - __b[j+127:j+112])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPHSUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_hsubs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Multiplies each unsigned byte from the 256-bit integer vector in \a __a
+/// with the corresponding signed byte from the 256-bit integer vector in
+/// \a __b, forming signed 16-bit intermediate products. Adds adjacent
+/// pairs of those products using signed saturation to form 16-bit sums
+/// returned as elements of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// temp1 := __a[j+7:j] * __b[j+7:j]
+/// temp2 := __a[j+15:j+8] * __b[j+15:j+8]
+/// result[j+15:j] := SATURATE16(temp1 + temp2)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMADDUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector containing one of the source operands.
+/// \param __b
+/// A 256-bit vector containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maddubs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
+}
+
+/// Multiplies corresponding 16-bit elements of two 256-bit vectors of
+/// [16 x i16], forming 32-bit intermediate products, and adds pairs of
+/// those products to form 32-bit sums returned as elements of the
+/// [8 x i32] result.
+///
+/// There is only one wraparound case: when all four of the 16-bit sources
+/// are \c 0x8000, the result will be \c 0x80000000.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// temp1 := __a[j+15:j] * __b[j+15:j]
+/// temp2 := __a[j+31:j+16] * __b[j+31:j+16]
+/// result[j+31:j] := temp1 + temp2
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMADDWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_madd_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Compares the corresponding signed bytes in the two 256-bit integer vectors
+/// in \a __a and \a __b and returns the larger of each pair in the
+/// corresponding byte of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMAXSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_max_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
+}
+
+/// Compares the corresponding signed 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMAXSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_max_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Compares the corresponding signed 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMAXSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_max_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
+}
+
+/// Compares the corresponding unsigned bytes in the two 256-bit integer
+/// vectors in \a __a and \a __b and returns the larger of each pair in
+/// the corresponding byte of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMAXUB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_max_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
+}
+
+/// Compares the corresponding unsigned 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMAXUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_max_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
+}
+
+/// Compares the corresponding unsigned 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMAXUD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_max_epu32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
+}
+
+/// Compares the corresponding signed bytes in the two 256-bit integer vectors
+/// in \a __a and \a __b and returns the smaller of each pair in the
+/// corresponding byte of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMINSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_min_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
+}
+
+/// Compares the corresponding signed 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMINSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_min_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Compares the corresponding signed 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMINSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_min_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
+}
+
+/// Compares the corresponding unsigned bytes in the two 256-bit integer
+/// vectors in \a __a and \a __b and returns the smaller of each pair in
+/// the corresponding byte of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMINUB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_min_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
+}
+
+/// Compares the corresponding unsigned 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMINUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_min_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
+}
+
+/// Compares the corresponding unsigned 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMINUD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_min_epu32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS256
+_mm256_movemask_epi8(__m256i __a)
+{
+ return __builtin_ia32_pmovmskb256((__v32qi)__a);
+}
+
+/// Sign-extends bytes from the 128-bit integer vector in \a __V and returns
+/// the 16-bit values in the corresponding elements of a 256-bit vector
+/// of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*8
+/// k := i*16
+/// result[k+15:k] := SignExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVSXBW instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [16 x i16] containing the sign-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepi8_epi16(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
+}
+
+/// Sign-extends bytes from the lower half of the 128-bit integer vector in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*8
+/// k := i*32
+/// result[k+31:k] := SignExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVSXBD instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepi8_epi32(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
+}
+
+/// Sign-extends the first four bytes from the 128-bit integer vector in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[7:0])
+/// result[127:64] := SignExtend(__V[15:8])
+/// result[191:128] := SignExtend(__V[23:16])
+/// result[255:192] := SignExtend(__V[31:24])
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVSXBQ instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepi8_epi64(__m128i __V)
+{
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
+}
+
+/// Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*32
+/// result[k+31:k] := SignExtend(__V[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVSXWD instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepi16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
+}
+
+/// Sign-extends 16-bit elements from the lower half of the 128-bit vector of
+/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding
+/// elements of a 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[15:0])
+/// result[127:64] := SignExtend(__V[31:16])
+/// result[191:128] := SignExtend(__V[47:32])
+/// result[255:192] := SignExtend(__V[64:48])
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVSXWQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepi16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
+}
+
+/// Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[31:0])
+/// result[127:64] := SignExtend(__V[63:32])
+/// result[191:128] := SignExtend(__V[95:64])
+/// result[255:192] := SignExtend(__V[127:96])
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVSXDQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [4 x i32] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepi32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
+}
+
+/// Zero-extends bytes from the 128-bit integer vector in \a __V and returns
+/// the 16-bit values in the corresponding elements of a 256-bit vector
+/// of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*8
+/// k := i*16
+/// result[k+15:k] := ZeroExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVZXBW instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [16 x i16] containing the zero-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepu8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
+}
+
+/// Zero-extends bytes from the lower half of the 128-bit integer vector in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*8
+/// k := i*32
+/// result[k+31:k] := ZeroExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVZXBD instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepu8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
+}
+
+/// Zero-extends the first four bytes from the 128-bit integer vector in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[7:0])
+/// result[127:64] := ZeroExtend(__V[15:8])
+/// result[191:128] := ZeroExtend(__V[23:16])
+/// result[255:192] := ZeroExtend(__V[31:24])
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVZXBQ instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepu8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
+}
+
+/// Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*32
+/// result[k+31:k] := ZeroExtend(__V[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVZXWD instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepu16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
+}
+
+/// Zero-extends 16-bit elements from the lower half of the 128-bit vector of
+/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding
+/// elements of a 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[15:0])
+/// result[127:64] := ZeroExtend(__V[31:16])
+/// result[191:128] := ZeroExtend(__V[47:32])
+/// result[255:192] := ZeroExtend(__V[64:48])
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVSXWQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepu16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
+}
+
+/// Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[31:0])
+/// result[127:64] := ZeroExtend(__V[63:32])
+/// result[191:128] := ZeroExtend(__V[95:64])
+/// result[255:192] := ZeroExtend(__V[127:96])
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMOVZXDQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [4 x i32] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtepu32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
+}
+
+/// Multiplies signed 32-bit integers from even-numbered elements of two
+/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the
+/// [4 x i64] result.
+///
+/// \code{.operation}
+/// result[63:0] := __a[31:0] * __b[31:0]
+/// result[127:64] := __a[95:64] * __b[95:64]
+/// result[191:128] := __a[159:128] * __b[159:128]
+/// result[255:192] := __a[223:192] * __b[223:192]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMULDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the products.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mul_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
+}
+
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], truncates the 32-bit results to the most significant 18
+/// bits, rounds by adding 1, and returns bits [16:1] of each rounded
+/// product in the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// temp := ((__a[j+15:j] * __b[j+15:j]) >> 14) + 1
+/// result[j+15:j] := temp[16:1]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMULHRSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the rounded products.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Multiplies unsigned 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMULHUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mulhi_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMULHW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mulhi_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the lower 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMULLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mullo_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hu)__a * (__v16hu)__b);
+}
+
+/// Multiplies signed 32-bit integer elements of two 256-bit vectors of
+/// [8 x i32], and returns the lower 32 bits of each 64-bit product in the
+/// [8 x i32] result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMULLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the products.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mullo_epi32 (__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a * (__v8su)__b);
+}
+
+/// Multiplies unsigned 32-bit integers from even-numered elements of two
+/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the
+/// [4 x i64] result.
+///
+/// \code{.operation}
+/// result[63:0] := __a[31:0] * __b[31:0]
+/// result[127:64] := __a[95:64] * __b[95:64]
+/// result[191:128] := __a[159:128] * __b[159:128]
+/// result[255:192] := __a[223:192] * __b[223:192]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMULUDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the products.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mul_epu32(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
+}
+
+/// Computes the bitwise OR of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPOR instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_or_si256(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a | (__v4du)__b);
+}
+
+/// Computes four sum of absolute difference (SAD) operations on sets of eight
+/// unsigned 8-bit integers from the 256-bit integer vectors \a __a and
+/// \a __b.
+///
+/// One SAD result is computed for each set of eight bytes from \a __a and
+/// eight bytes from \a __b. The zero-extended SAD value is returned in the
+/// corresponding 64-bit element of the result.
+///
+/// A single SAD operation takes the differences between the corresponding
+/// bytes of \a __a and \a __b, takes the absolute value of each difference,
+/// and sums these eight values to form one 16-bit result. This operation
+/// is repeated four times with successive sets of eight bytes.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// temp0 := ABS(__a[j+7:j] - __b[j+7:j])
+/// temp1 := ABS(__a[j+15:j+8] - __b[j+15:j+8])
+/// temp2 := ABS(__a[j+23:j+16] - __b[j+23:j+16])
+/// temp3 := ABS(__a[j+31:j+24] - __b[j+31:j+24])
+/// temp4 := ABS(__a[j+39:j+32] - __b[j+39:j+32])
+/// temp5 := ABS(__a[j+47:j+40] - __b[j+47:j+40])
+/// temp6 := ABS(__a[j+55:j+48] - __b[j+55:j+48])
+/// temp7 := ABS(__a[j+63:j+56] - __b[j+63:j+56])
+/// result[j+15:j] := temp0 + temp1 + temp2 + temp3 +
+/// temp4 + temp5 + temp6 + temp7
+/// result[j+63:j+16] := 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSADBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sad_epu8(__m256i __a, __m256i __b)
+{
+ return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
+}
+
+/// Shuffles 8-bit integers in the 256-bit integer vector \a __a according
+/// to control information in the 256-bit integer vector \a __b, and
+/// returns the 256-bit result. In effect there are two separate 128-bit
+/// shuffles in the lower and upper halves.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// IF __b[j+7] == 1
+/// result[j+7:j] := 0
+/// ELSE
+/// k := __b[j+3:j] * 8
+/// IF i > 15
+/// k := k + 128
+/// FI
+/// result[j+7:j] := __a[k+7:k]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSHUFB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing source values.
+/// \param __b
+/// A 256-bit integer vector containing control information to determine
+/// what goes into the corresponding byte of the result. If bit 7 of the
+/// control byte is 1, the result byte is 0; otherwise, bits 3:0 of the
+/// control byte specify the index (within the same 128-bit half) of \a __a
+/// to copy to the result byte.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_shuffle_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
+}
+
+/// Shuffles 32-bit integers from the 256-bit vector of [8 x i32] in \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit result. In effect there are two parallel 128-bit
+/// shuffles in the lower and upper halves.
+///
+/// \code{.operation}
+/// FOR i := 0 to 3
+/// j := i*32
+/// k := (imm >> i*2)[1:0] * 32
+/// result[j+31:j] := a[k+31:k]
+/// result[128+j+31:128+j] := a[128+k+31:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_shuffle_epi32(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFB instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 0 and 4 of the
+/// result, \a imm[3:2] specifies the index for elements 1 and 5, and so
+/// forth.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+#define _mm256_shuffle_epi32(a, imm) \
+ ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)))
+
+/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] in \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit result. The upper 64 bits of each 128-bit half
+/// are shuffled in parallel; the lower 64 bits of each 128-bit half are
+/// copied from \a a unchanged.
+///
+/// \code{.operation}
+/// result[63:0] := a[63:0]
+/// result[191:128] := a[191:128]
+/// FOR i := 0 TO 3
+/// j := i * 16 + 64
+/// k := (imm >> i*2)[1:0] * 16 + 64
+/// result[j+15:j] := a[k+15:k]
+/// result[128+j+15:128+j] := a[128+k+15:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_shufflehi_epi16(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFHW instruction.
+///
+/// \param a
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 4 and 8 of the
+/// result, \a imm[3:2] specifies the index for elements 5 and 9, and so
+/// forth. Indexes are offset by 4 (so 0 means index 4, and so forth).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+#define _mm256_shufflehi_epi16(a, imm) \
+ ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm)))
+
+/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit [16 x i16] result. The lower 64 bits of each
+/// 128-bit half are shuffled; the upper 64 bits of each 128-bit half are
+/// copied from \a a unchanged.
+///
+/// \code{.operation}
+/// result[127:64] := a[127:64]
+/// result[255:192] := a[255:192]
+/// FOR i := 0 TO 3
+/// j := i * 16
+/// k := (imm >> i*2)[1:0] * 16
+/// result[j+15:j] := a[k+15:k]
+/// result[128+j+15:128+j] := a[128+k+15:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_shufflelo_epi16(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFLW instruction.
+///
+/// \param a
+/// A 256-bit vector of [16 x i16] to use as a source of data for the
+/// result.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 0 and 8 of the
+/// result, \a imm[3:2] specifies the index for elements 1 and 9, and so
+/// forth.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+#define _mm256_shufflelo_epi16(a, imm) \
+ ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm)))
+
+/// Sets each byte of the result to the corresponding byte of the 256-bit
+/// integer vector in \a __a, the negative of that byte, or zero, depending
+/// on whether the corresponding byte of the 256-bit integer vector in
+/// \a __b is greater than zero, less than zero, or equal to zero,
+/// respectively.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSIGNB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector].
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sign_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
+}
+
+/// Sets each element of the result to the corresponding element of the
+/// 256-bit vector of [16 x i16] in \a __a, the negative of that element,
+/// or zero, depending on whether the corresponding element of the 256-bit
+/// vector of [16 x i16] in \a __b is greater than zero, less than zero, or
+/// equal to zero, respectively.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSIGNW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sign_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Sets each element of the result to the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __a, the negative of that element, or
+/// zero, depending on whether the corresponding element of the 256-bit
+/// vector of [8 x i32] in \a __b is greater than zero, less than zero, or
+/// equal to zero, respectively.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSIGND instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sign_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
+}
+
+/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm
+/// is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_slli_si256(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSLLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
+#define _mm256_slli_si256(a, imm) \
+ ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
+
+/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm
+/// is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_bslli_epi128(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSLLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
+#define _mm256_bslli_epi128(a, imm) \
+ ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
+
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_slli_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
+}
+
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// left by the number of bits specified by the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 15, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sll_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 31, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_slli_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// left by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 31, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sll_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
+}
+
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 63, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_slli_epi64(__m256i __a, int __count)
+{
+ return __builtin_ia32_psllqi256((__v4di)__a, __count);
+}
+
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// left by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 63, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sll_epi64(__m256i __a, __m128i __count)
+{
+ return __builtin_ia32_psllq256((__v4di)__a, __count);
+}
+
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by \a __count bits, shifting in sign bits, and returns the result.
+/// If \a __count is greater than 15, each element of the result is either
+/// 0 or -1 according to the corresponding input sign bit.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRAW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srai_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
+}
+
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in sign bits, and returns the result. If \a __count is greater
+/// than 15, each element of the result is either 0 or -1 according to the
+/// corresponding input sign bit.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRAW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sra_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by \a __count bits, shifting in sign bits, and returns the result.
+/// If \a __count is greater than 31, each element of the result is either
+/// 0 or -1 according to the corresponding input sign bit.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRAD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srai_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in sign bits, and returns the result. If \a __count is greater
+/// than 31, each element of the result is either 0 or -1 according to the
+/// corresponding input sign bit.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRAD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sra_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
+}
+
+/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If
+/// \a imm is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_srli_si256(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSRLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
+#define _mm256_srli_si256(a, imm) \
+ ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
+
+/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If
+/// \a imm is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_bsrli_epi128(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSRLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
+#define _mm256_bsrli_epi128(a, imm) \
+ ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
+
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srli_epi16(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
+}
+
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 15, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srl_epi16(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 31, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srli_epi32(__m256i __a, int __count)
+{
+ return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 31, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srl_epi32(__m256i __a, __m128i __count)
+{
+ return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
+}
+
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 63, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srli_epi64(__m256i __a, int __count)
+{
+ return __builtin_ia32_psrlqi256((__v4di)__a, __count);
+}
+
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 63, the returned result is all zeroes.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srl_epi64(__m256i __a, __m128i __count)
+{
+ return __builtin_ia32_psrlq256((__v4di)__a, __count);
+}
+
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors. Returns the lower 8 bits of each difference in the
+/// corresponding byte of the 256-bit integer vector result (overflow is
+/// ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := __a[j+7:j] - __b[j+7:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSUBB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sub_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v32qu)__a - (__v32qu)__b);
+}
+
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16]. Returns the lower 16 bits of each difference in
+/// the corresponding element of the [16 x i16] result (overflow is
+/// ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := __a[j+15:j] - __b[j+15:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSUBW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sub_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v16hu)__a - (__v16hu)__b);
+}
+
+/// Subtracts 32-bit integers from corresponding elements of two 256-bit
+/// vectors of [8 x i32]. Returns the lower 32 bits of each difference in
+/// the corresponding element of the [8 x i32] result (overflow is ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := __a[j+31:j] - __b[j+31:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSUBD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing the subtrahends.
+/// \returns A 256-bit vector of [8 x i32] containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sub_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a - (__v8su)__b);
+}
+
+/// Subtracts 64-bit integers from corresponding elements of two 256-bit
+/// vectors of [4 x i64]. Returns the lower 64 bits of each difference in
+/// the corresponding element of the [4 x i64] result (overflow is ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := __a[j+63:j] - __b[j+63:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSUBQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing the subtrahends.
+/// \returns A 256-bit vector of [4 x i64] containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sub_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a - (__v4du)__b);
+}
+
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using signed saturation, and returns each differences in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := SATURATE8(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSUBSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_subs_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
+}
+
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16] using signed saturation, and returns each
+/// difference in the corresponding element of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+7:j] := SATURATE16(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_subs_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using unsigned saturation, and returns each difference in the
+/// corresponding byte of the 256-bit integer vector result. For each byte,
+/// computes result = __a - __b .
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := SATURATE8U(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSUBUSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_subs_epu8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
+}
+
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16] using unsigned saturation, and returns each
+/// difference in the corresponding element of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := SATURATE16U(__a[j+15:j] - __b[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSUBUSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_subs_epu16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
+}
+
+/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer
+/// vectors in \a __a and \a __b to form the 256-bit result. Specifically,
+/// uses the upper 64 bits of each 128-bit half of \a __a and \a __b as
+/// input; other bits in these parameters are ignored.
+///
+/// \code{.operation}
+/// result[7:0] := __a[71:64]
+/// result[15:8] := __b[71:64]
+/// result[23:16] := __a[79:72]
+/// result[31:24] := __b[79:72]
+/// . . .
+/// result[127:120] := __b[127:120]
+/// result[135:128] := __a[199:192]
+/// . . .
+/// result[255:248] := __b[255:248]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPUNPCKHBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector used as the source for the even-numbered bytes
+/// of the result.
+/// \param __b
+/// A 256-bit integer vector used as the source for the odd-numbered bytes
+/// of the result.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
+}
+
+/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors
+/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit
+/// vector of [16 x i16]. Specifically, uses the upper 64 bits of each
+/// 128-bit half of \a __a and \a __b as input; other bits in these
+/// parameters are ignored.
+///
+/// \code{.operation}
+/// result[15:0] := __a[79:64]
+/// result[31:16] := __b[79:64]
+/// result[47:32] := __a[95:80]
+/// result[63:48] := __b[95:80]
+/// . . .
+/// result[127:112] := __b[127:112]
+/// result[143:128] := __a[211:196]
+/// . . .
+/// result[255:240] := __b[255:240]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPUNPCKHWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors
+/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [8 x i32]. Specifically, uses the upper 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[31:0] := __a[95:64]
+/// result[63:32] := __b[95:64]
+/// result[95:64] := __a[127:96]
+/// result[127:96] := __b[127:96]
+/// result[159:128] := __a[223:192]
+/// result[191:160] := __b[223:192]
+/// result[223:192] := __a[255:224]
+/// result[255:224] := __b[255:224]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPUNPCKHDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
+}
+
+/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors
+/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [4 x i64]. Specifically, uses the upper 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[63:0] := __a[127:64]
+/// result[127:64] := __b[127:64]
+/// result[191:128] := __a[255:192]
+/// result[255:192] := __b[255:192]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPUNPCKHQDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
+}
+
+/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer
+/// vectors in \a __a and \a __b to form the 256-bit result. Specifically,
+/// uses the lower 64 bits of each 128-bit half of \a __a and \a __b as
+/// input; other bits in these parameters are ignored.
+///
+/// \code{.operation}
+/// result[7:0] := __a[7:0]
+/// result[15:8] := __b[7:0]
+/// result[23:16] := __a[15:8]
+/// result[31:24] := __b[15:8]
+/// . . .
+/// result[127:120] := __b[63:56]
+/// result[135:128] := __a[135:128]
+/// . . .
+/// result[255:248] := __b[191:184]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPUNPCKLBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector used as the source for the even-numbered bytes
+/// of the result.
+/// \param __b
+/// A 256-bit integer vector used as the source for the odd-numbered bytes
+/// of the result.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
+}
+
+/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors
+/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit
+/// vector of [16 x i16]. Specifically, uses the lower 64 bits of each
+/// 128-bit half of \a __a and \a __b as input; other bits in these
+/// parameters are ignored.
+///
+/// \code{.operation}
+/// result[15:0] := __a[15:0]
+/// result[31:16] := __b[15:0]
+/// result[47:32] := __a[31:16]
+/// result[63:48] := __b[31:16]
+/// . . .
+/// result[127:112] := __b[63:48]
+/// result[143:128] := __a[143:128]
+/// . . .
+/// result[255:239] := __b[191:176]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPUNPCKLWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
+}
+
+/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors
+/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [8 x i32]. Specifically, uses the lower 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[31:0] := __a[31:0]
+/// result[63:32] := __b[31:0]
+/// result[95:64] := __a[63:32]
+/// result[127:96] := __b[63:32]
+/// result[159:128] := __a[159:128]
+/// result[191:160] := __b[159:128]
+/// result[223:192] := __a[191:160]
+/// result[255:224] := __b[191:190]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPUNPCKLDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
+}
+
+/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors
+/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [4 x i64]. Specifically, uses the lower 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[63:0] := __a[63:0]
+/// result[127:64] := __b[63:0]
+/// result[191:128] := __a[191:128]
+/// result[255:192] := __b[191:128]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPUNPCKLQDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
+}
+
+/// Computes the bitwise XOR of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPXOR instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_xor_si256(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a ^ (__v4du)__b);
+}
+
+/// Loads the 256-bit integer vector from memory \a __V using a non-temporal
+/// memory hint and returns the vector. \a __V must be aligned on a 32-byte
+/// boundary.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VMOVNTDQA instruction.
+///
+/// \param __V
+/// A pointer to the 32-byte aligned memory containing the vector to load.
+/// \returns A 256-bit integer vector loaded from memory.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_stream_load_si256(__m256i const *__V)
+{
+ typedef __v4di __v4di_aligned __attribute__((aligned(32)));
+ return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);
+}
+
+/// Broadcasts the 32-bit floating-point value from the low element of the
+/// 128-bit vector of [4 x float] in \a __X to all elements of the result's
+/// 128-bit vector of [4 x float].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VBROADCASTSS instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x float] whose low element will be broadcast.
+/// \returns A 128-bit vector of [4 x float] containing the result.
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_broadcastss_ps(__m128 __X)
+{
+ return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
+}
+
+/// Broadcasts the 64-bit floating-point value from the low element of the
+/// 128-bit vector of [2 x double] in \a __a to both elements of the
+/// result's 128-bit vector of [2 x double].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c MOVDDUP instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] whose low element will be broadcast.
+/// \returns A 128-bit vector of [2 x double] containing the result.
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
+_mm_broadcastsd_pd(__m128d __a)
+{
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
+}
+
+/// Broadcasts the 32-bit floating-point value from the low element of the
+/// 128-bit vector of [4 x float] in \a __X to all elements of the
+/// result's 256-bit vector of [8 x float].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VBROADCASTSS instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x float] whose low element will be broadcast.
+/// \returns A 256-bit vector of [8 x float] containing the result.
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_broadcastss_ps(__m128 __X)
+{
+ return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/// Broadcasts the 64-bit floating-point value from the low element of the
+/// 128-bit vector of [2 x double] in \a __X to all elements of the
+/// result's 256-bit vector of [4 x double].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VBROADCASTSD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x double] whose low element will be broadcast.
+/// \returns A 256-bit vector of [4 x double] containing the result.
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
+_mm256_broadcastsd_pd(__m128d __X)
+{
+ return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
+}
+
+/// Broadcasts the 128-bit integer data from \a __X to both the lower and
+/// upper halves of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VBROADCASTI128 instruction.
+///
+/// \param __X
+/// A 128-bit integer vector to be broadcast.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_broadcastsi128_si256(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1);
+}
+
+#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
+
+/// Merges 32-bit integer elements from either of the two 128-bit vectors of
+/// [4 x i32] in \a V1 or \a V2 to the result's 128-bit vector of [4 x i32],
+/// as specified by the immediate integer operand \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF M[i] == 0
+/// result[31+j:j] := V1[31+j:j]
+/// ELSE
+/// result[31+j:j] := V2[32+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_blend_epi32(__m128i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDDD instruction.
+///
+/// \param V1
+/// A 128-bit vector of [4 x i32] containing source values.
+/// \param V2
+/// A 128-bit vector of [4 x i32] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [3:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is copied from \a V2.
+/// \returns A 128-bit vector of [4 x i32] containing the result.
+#define _mm_blend_epi32(V1, V2, M) \
+ ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
+ (__v4si)(__m128i)(V2), (int)(M)))
+
+/// Merges 32-bit integer elements from either of the two 256-bit vectors of
+/// [8 x i32] in \a V1 or \a V2 to return a 256-bit vector of [8 x i32],
+/// as specified by the immediate integer operand \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF M[i] == 0
+/// result[31+j:j] := V1[31+j:j]
+/// ELSE
+/// result[31+j:j] := V2[32+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_blend_epi32(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDDD instruction.
+///
+/// \param V1
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param V2
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [7:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is is copied from \a V2.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+#define _mm256_blend_epi32(V1, V2, M) \
+ ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), (int)(M)))
+
+/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all
+/// bytes of the 256-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBROADCASTB instruction.
+///
+/// \param __X
+/// A 128-bit integer vector whose low byte will be broadcast.
+/// \returns A 256-bit integer vector containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_broadcastb_epi8(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/// Broadcasts the low element from the 128-bit vector of [8 x i16] in \a __X
+/// to all elements of the result's 256-bit vector of [16 x i16].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBROADCASTW instruction.
+///
+/// \param __X
+/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_broadcastw_epi16(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X
+/// to all elements of the result's 256-bit vector of [8 x i32].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBROADCASTD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_broadcastd_epi32(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X
+/// to all elements of the result's 256-bit vector of [4 x i64].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBROADCASTQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_broadcastq_epi64(__m128i __X)
+{
+ return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
+}
+
+/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all
+/// bytes of the 128-bit result.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBROADCASTB instruction.
+///
+/// \param __X
+/// A 128-bit integer vector whose low byte will be broadcast.
+/// \returns A 128-bit integer vector containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_broadcastb_epi8(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/// Broadcasts the low element from the 128-bit vector of [8 x i16] in
+/// \a __X to all elements of the result's 128-bit vector of [8 x i16].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBROADCASTW instruction.
+///
+/// \param __X
+/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
+/// \returns A 128-bit vector of [8 x i16] containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_broadcastw_epi16(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
+}
+
+/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X
+/// to all elements of the result's vector of [4 x i32].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBROADCASTD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
+/// \returns A 128-bit vector of [4 x i32] containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_broadcastd_epi32(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
+}
+
+/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X
+/// to both elements of the result's 128-bit vector of [2 x i64].
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPBROADCASTQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
+/// \returns A 128-bit vector of [2 x i64] containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_broadcastq_epi64(__m128i __X)
+{
+ return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
+}
+
+/// Sets the result's 256-bit vector of [8 x i32] to copies of elements of the
+/// 256-bit vector of [8 x i32] in \a __a as specified by indexes in the
+/// elements of the 256-bit vector of [8 x i32] in \a __b.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// k := __b[j+2:j] * 32
+/// result[j+31:j] := __a[k+31:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPERMD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing the source values.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing indexes of values to use from
+/// \a __a.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
+}
+
+/// Sets the result's 256-bit vector of [4 x double] to copies of elements of
+/// the 256-bit vector of [4 x double] in \a V as specified by the
+/// immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// k := (M >> i*2)[1:0] * 64
+/// result[j+63:j] := V[k+63:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256d _mm256_permute4x64_pd(__m256d V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMPD instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x double] containing the source values.
+/// \param M
+/// An immediate 8-bit value specifying which elements to copy from \a V.
+/// \a M[1:0] specifies the index in \a a for element 0 of the result,
+/// \a M[3:2] specifies the index for element 1, and so forth.
+/// \returns A 256-bit vector of [4 x double] containing the result.
+#define _mm256_permute4x64_pd(V, M) \
+ ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)))
+
+/// Sets the result's 256-bit vector of [8 x float] to copies of elements of
+/// the 256-bit vector of [8 x float] in \a __a as specified by indexes in
+/// the elements of the 256-bit vector of [8 x i32] in \a __b.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// k := __b[j+2:j] * 32
+/// result[j+31:j] := __a[k+31:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPERMPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the source values.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing indexes of values to use from
+/// \a __a.
+/// \returns A 256-bit vector of [8 x float] containing the result.
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
+{
+ return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
+}
+
+/// Sets the result's 256-bit vector of [4 x i64] result to copies of elements
+/// of the 256-bit vector of [4 x i64] in \a V as specified by the
+/// immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// k := (M >> i*2)[1:0] * 64
+/// result[j+63:j] := V[k+63:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_permute4x64_epi64(__m256i V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMQ instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x i64] containing the source values.
+/// \param M
+/// An immediate 8-bit value specifying which elements to copy from \a V.
+/// \a M[1:0] specifies the index in \a a for element 0 of the result,
+/// \a M[3:2] specifies the index for element 1, and so forth.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+#define _mm256_permute4x64_epi64(V, M) \
+ ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)))
+
+/// Sets each half of the 256-bit result either to zero or to one of the
+/// four possible 128-bit halves of the 256-bit vectors \a V1 and \a V2,
+/// as specified by the immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// k := M >> (i*4)
+/// IF k[3] == 0
+/// CASE (k[1:0]) OF
+/// 0: result[127+j:j] := V1[127:0]
+/// 1: result[127+j:j] := V1[255:128]
+/// 2: result[127+j:j] := V2[127:0]
+/// 3: result[127+j:j] := V2[255:128]
+/// ESAC
+/// ELSE
+/// result[127+j:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_permute2x128_si256(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2I128 instruction.
+///
+/// \param V1
+/// A 256-bit integer vector containing source values.
+/// \param V2
+/// A 256-bit integer vector containing source values.
+/// \param M
+/// An immediate value specifying how to form the result. Bits [3:0]
+/// control the lower half of the result, bits [7:4] control the upper half.
+/// Within each 4-bit control value, if bit 3 is 1, the result is zero,
+/// otherwise bits [1:0] determine the source as follows. \n
+/// 0: the lower half of \a V1 \n
+/// 1: the upper half of \a V1 \n
+/// 2: the lower half of \a V2 \n
+/// 3: the upper half of \a V2
+/// \returns A 256-bit integer vector containing the result.
+#define _mm256_permute2x128_si256(V1, V2, M) \
+ ((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M)))
+
+/// Extracts half of the 256-bit vector \a V to the 128-bit result. If bit 0
+/// of the immediate \a M is zero, extracts the lower half of the result;
+/// otherwise, extracts the upper half.
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm256_extracti128_si256(__m256i V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VEXTRACTI128 instruction.
+///
+/// \param V
+/// A 256-bit integer vector containing the source values.
+/// \param M
+/// An immediate value specifying which half of \a V to extract.
+/// \returns A 128-bit integer vector containing the result.
+#define _mm256_extracti128_si256(V, M) \
+ ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)))
+
+/// Copies the 256-bit vector \a V1 to the result, then overwrites half of the
+/// result with the 128-bit vector \a V2. If bit 0 of the immediate \a M
+/// is zero, overwrites the lower half of the result; otherwise,
+/// overwrites the upper half.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_inserti128_si256(__m256i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VINSERTI128 instruction.
+///
+/// \param V1
+/// A 256-bit integer vector containing a source value.
+/// \param V2
+/// A 128-bit integer vector containing a source value.
+/// \param M
+/// An immediate value specifying where to put \a V2 in the result.
+/// \returns A 256-bit integer vector containing the result.
+#define _mm256_inserti128_si256(V1, V2, M) \
+ ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
+ (__v2di)(__m128i)(V2), (int)(M)))
+
+/// Conditionally loads eight 32-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 256-bit [8 x i32] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF __M[j+31] == 1
+/// result[j+31:j] := Load32(__X+(i*4))
+/// ELSE
+/// result[j+31:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 256-bit vector of [8 x i32] containing the mask bits.
+/// \returns A 256-bit vector of [8 x i32] containing the loaded or zeroed
+/// elements.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskload_epi32(int const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
+}
+
+/// Conditionally loads four 64-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 256-bit [4 x i64] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// IF __M[j+63] == 1
+/// result[j+63:j] := Load64(__X+(i*8))
+/// ELSE
+/// result[j+63:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 256-bit vector of [4 x i64] containing the mask bits.
+/// \returns A 256-bit vector of [4 x i64] containing the loaded or zeroed
+/// elements.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskload_epi64(long long const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
+}
+
+/// Conditionally loads four 32-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 128-bit [4 x i32] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF __M[j+31] == 1
+/// result[j+31:j] := Load32(__X+(i*4))
+/// ELSE
+/// result[j+31:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 128-bit vector of [4 x i32] containing the mask bits.
+/// \returns A 128-bit vector of [4 x i32] containing the loaded or zeroed
+/// elements.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskload_epi32(int const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
+}
+
+/// Conditionally loads two 64-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 128-bit [2 x i64] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*64
+/// IF __M[j+63] == 1
+/// result[j+63:j] := Load64(__X+(i*8))
+/// ELSE
+/// result[j+63:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 128-bit vector of [2 x i64] containing the mask bits.
+/// \returns A 128-bit vector of [2 x i64] containing the loaded or zeroed
+/// elements.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskload_epi64(long long const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
+}
+
+/// Conditionally stores eight 32-bit integer elements from the 256-bit vector
+/// of [8 x i32] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF __M[j+31] == 1
+/// Store32(__X+(i*4), __Y[j+31:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 256-bit vector of [8 x i32] containing the mask bits.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the values to store.
+static __inline__ void __DEFAULT_FN_ATTRS256
+_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
+}
+
+/// Conditionally stores four 64-bit integer elements from the 256-bit vector
+/// of [4 x i64] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// IF __M[j+63] == 1
+/// Store64(__X+(i*8), __Y[j+63:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 256-bit vector of [4 x i64] containing the mask bits.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the values to store.
+static __inline__ void __DEFAULT_FN_ATTRS256
+_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
+}
+
+/// Conditionally stores four 32-bit integer elements from the 128-bit vector
+/// of [4 x i32] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF __M[j+31] == 1
+/// Store32(__X+(i*4), __Y[j+31:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 128-bit vector of [4 x i32] containing the mask bits.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the values to store.
+static __inline__ void __DEFAULT_FN_ATTRS128
+_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
+}
+
+/// Conditionally stores two 64-bit integer elements from the 128-bit vector
+/// of [2 x i64] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*64
+/// IF __M[j+63] == 1
+/// Store64(__X+(i*8), __Y[j+63:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 128-bit vector of [2 x i64] containing the mask bits.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the values to store.
+static __inline__ void __DEFAULT_FN_ATTRS128
+_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sllv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
+}
+
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_sllv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
+}
+
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLVQ instruction.
+///
+/// \param __X
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sllv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
+}
+
+/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSLLVQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [2 x i64] containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_sllv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in sign bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is 0 or -1 according to the sign bit
+/// for that element.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRAVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srav_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
+}
+
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in sign bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is 0 or -1 according to the sign bit
+/// for that element.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRAVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_srav_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
+}
+
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srlv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
+}
+
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_srlv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
+}
+
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLVQ instruction.
+///
+/// \param __X
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_srlv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
+}
+
+/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VPSRLVQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [2 x i64] containing the result.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_srlv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
+}
+
+/// Conditionally gathers two 64-bit floating-point values, either from the
+/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [2 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128d _mm_mask_i32gather_pd(__m128d a, const double *m, __m128i i,
+/// __m128d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param mask
+/// A 128-bit vector of [2 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
+#define _mm_mask_i32gather_pd(a, m, i, mask, s) \
+ ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)))
+
+/// Conditionally gathers four 64-bit floating-point values, either from the
+/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector
+/// of [4 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256d _mm256_mask_i32gather_pd(__m256d a, const double *m, __m128i i,
+/// __m256d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
+#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \
+ ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)(__m256d)(mask), (s)))
+
+/// Conditionally gathers two 64-bit floating-point values, either from the
+/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [2 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128d _mm_mask_i64gather_pd(__m128d a, const double *m, __m128i i,
+/// __m128d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [2 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
+#define _mm_mask_i64gather_pd(a, m, i, mask, s) \
+ ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)))
+
+/// Conditionally gathers four 64-bit floating-point values, either from the
+/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector
+/// of [4 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256d _mm256_mask_i64gather_pd(__m256d a, const double *m, __m256i i,
+/// __m256d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
+#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \
+ ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)(__m256d)(mask), (s)))
+
+/// Conditionally gathers four 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128 _mm_mask_i32gather_ps(__m128 a, const float *m, __m128i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
+#define _mm_mask_i32gather_ps(a, m, i, mask, s) \
+ ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)))
+
+/// Conditionally gathers eight 32-bit floating-point values, either from the
+/// 256-bit vector of [8 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector
+/// of [8 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256 _mm256_mask_i32gather_ps(__m256 a, const float *m, __m256i i,
+/// __m256 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [8 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x float] containing the gathered values.
+#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \
+ ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)(__m256)(mask), (s)))
+
+/// Conditionally gathers two 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for the lower two
+/// elements. The upper two elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128 _mm_mask_i64gather_ps(__m128 a, const float *m, __m128i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero. Only the first two elements are used.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory. Only the first
+/// two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
+#define _mm_mask_i64gather_ps(a, m, i, mask, s) \
+ ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)))
+
+/// Conditionally gathers four 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128 _mm256_mask_i64gather_ps(__m128 a, const float *m, __m256i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
+#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \
+ ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)(__m128)(mask), (s)))
+
+/// Conditionally gathers four 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_mask_i32gather_epi32(__m128i a, const int *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
+#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \
+ ((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)))
+
+/// Conditionally gathers eight 32-bit integer values, either from the
+/// 256-bit vector of [8 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector
+/// of [8 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_mask_i32gather_epi32(__m256i a, const int *m, __m256i i,
+/// __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [8 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x i32] containing the gathered values.
+#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \
+ ((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
+ (int const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8si)(__m256i)(mask), (s)))
+
+/// Conditionally gathers two 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for the lower two
+/// elements. The upper two elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_mask_i64gather_epi32(__m128i a, const int *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero. Only the first two elements are used.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory. Only the first two elements
+/// are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
+#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \
+ ((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)))
+
+/// Conditionally gathers four 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm256_mask_i64gather_epi32(__m128i a, const int *m, __m256i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
+#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \
+ ((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4si)(__m128i)(mask), (s)))
+
+/// Conditionally gathers two 64-bit integer values, either from the
+/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [2 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_mask_i32gather_epi64(__m128i a, const long long *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param mask
+/// A 128-bit vector of [2 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
+#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \
+ ((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)))
+
+/// Conditionally gathers four 64-bit integer values, either from the
+/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector
+/// of [4 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_mask_i32gather_epi64(__m256i a, const long long *m,
+/// __m128i i, __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
+#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \
+ ((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)(__m256i)(mask), (s)))
+
+/// Conditionally gathers two 64-bit integer values, either from the
+/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [2 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_mask_i64gather_epi64(__m128i a, const long long *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [2 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
+#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \
+ ((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)))
+
+/// Conditionally gathers four 64-bit integer values, either from the
+/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector
+/// of [4 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_mask_i64gather_epi64(__m256i a, const long long *m,
+/// __m256i i, __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
+#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \
+ ((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)(__m256i)(mask), (s)))
+
+/// Gathers two 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128d _mm_i32gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
+#define _mm_i32gather_pd(m, i, s) \
+ ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)))
+
+/// Gathers four 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256d _mm256_i32gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
+#define _mm256_i32gather_pd(m, i, s) \
+ ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)))
+
+/// Gathers two 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128d _mm_i64gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
+#define _mm_i64gather_pd(m, i, s) \
+ ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)))
+
+/// Gathers four 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256d _mm256_i64gather_pd(const double *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
+#define _mm256_i64gather_pd(m, i, s) \
+ ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)))
+
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128 _mm_i32gather_ps(const float *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
+#define _mm_i32gather_ps(m, i, s) \
+ ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)))
+
+/// Gathers eight 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256 _mm256_i32gather_ps(const float *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x float] containing the gathered values.
+#define _mm256_i32gather_ps(m, i, s) \
+ ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
+ _mm256_setzero_ps(), \
+ _CMP_EQ_OQ), \
+ (s)))
+
+/// Gathers two 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The upper two
+/// elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128 _mm_i64gather_ps(const float *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
+#define _mm_i64gather_ps(m, i, s) \
+ ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)))
+
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+64:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128 _mm256_i64gather_ps(const float *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
+#define _mm256_i64gather_ps(m, i, s) \
+ ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)))
+
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_i32gather_epi32(const int *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
+#define _mm_i32gather_epi32(m, i, s) \
+ ((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4si)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)))
+
+/// Gathers eight 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_i32gather_epi32(const int *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x i32] containing the gathered values.
+#define _mm256_i32gather_epi32(m, i, s) \
+ ((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
+ (int const *)(m), (__v8si)(__m256i)(i), \
+ (__v8si)_mm256_set1_epi32(-1), (s)))
+
+/// Gathers two 32-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [2 x i64] in \a i. The upper two elements
+/// of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_i64gather_epi32(const int *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
+#define _mm_i64gather_epi32(m, i, s) \
+ ((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v2di)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)))
+
+/// Gathers four 32-bit integer values from memory \a m using scaled indexes
+/// from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm256_i64gather_epi32(const int *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
+#define _mm256_i64gather_epi32(m, i, s) \
+ ((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4di)(__m256i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)))
+
+/// Gathers two 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_i32gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
+#define _mm_i32gather_epi64(m, i, s) \
+ ((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)))
+
+/// Gathers four 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_i32gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
+#define _mm256_i32gather_epi64(m, i, s) \
+ ((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)))
+
+/// Gathers two 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [2 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_i64gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
+#define _mm_i64gather_epi64(m, i, s) \
+ ((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)))
+
+/// Gathers four 64-bit integer values from memory \a m using scaled indexes
+/// from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_i64gather_epi64(const long long *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
+#define _mm256_i64gather_epi64(m, i, s) \
+ ((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)))
+
+#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS128
+
+#endif /* __AVX2INTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512bf16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512bf16intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512bf16intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512bf16intrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512bitalgintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512bitalgintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512bitalgintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512bitalgintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512bwintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512bwintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512bwintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512bwintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512cdintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512cdintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512cdintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512cdintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512dqintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512dqintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512dqintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512dqintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512erintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512erintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512erintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512erintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512fintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512fintrin.h
similarity index 99%
rename from third_party/clang/lib/clang/16.0.0/include/avx512fintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512fintrin.h
index b19d2fb90f..88a8cebbee 100644
--- a/third_party/clang/lib/clang/16.0.0/include/avx512fintrin.h
+++ b/third_party/clang/lib/clang/17.0.1/include/avx512fintrin.h
@@ -397,14 +397,15 @@ _mm512_broadcastsd_pd(__m128d __A)
static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd256_pd512(__m256d __a)
{
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_castps256_ps512(__m256 __a)
{
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
- -1, -1, -1, -1, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline __m128d __DEFAULT_FN_ATTRS512
@@ -446,7 +447,10 @@ _mm512_castpd_si512 (__m512d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd128_pd512 (__m128d __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+ __m256d __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512d __DEFAULT_FN_ATTRS512
@@ -464,19 +468,25 @@ _mm512_castps_si512 (__m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_castps128_ps512 (__m128 __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __m256 __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi128_si512 (__m128i __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+ __m256i __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi256_si512 (__m256i __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector( __A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512fp16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512fp16intrin.h
similarity index 99%
rename from third_party/clang/lib/clang/16.0.0/include/avx512fp16intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512fp16intrin.h
index 5cdc37fde6..d326586578 100644
--- a/third_party/clang/lib/clang/16.0.0/include/avx512fp16intrin.h
+++ b/third_party/clang/lib/clang/17.0.1/include/avx512fp16intrin.h
@@ -192,22 +192,26 @@ _mm512_castph512_ph256(__m512h __a) {
static __inline__ __m256h __DEFAULT_FN_ATTRS256
_mm256_castph128_ph256(__m128h __a) {
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1,
- -1, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_castph128_ph512(__m128h __a) {
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __m256h __b = __builtin_nondeterministic_value(__b);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
+ __b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
}
static __inline__ __m512h __DEFAULT_FN_ATTRS512
_mm512_castph256_ph512(__m256h __a) {
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31);
}
/// Constructs a 256-bit floating-point vector of [16 x half] from a
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512ifmaintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512ifmaintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512ifmaintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512ifmaintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512ifmavlintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512ifmavlintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512ifmavlintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512ifmavlintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512pfintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512pfintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512pfintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512pfintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vbmi2intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vbmi2intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vbmi2intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vbmi2intrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vbmiintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vbmiintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vbmiintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vbmiintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vbmivlintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vbmivlintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vbmivlintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vbmivlintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlbf16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlbf16intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlbf16intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlbf16intrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlbitalgintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlbitalgintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlbitalgintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlbitalgintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlbwintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlbwintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlbwintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlbwintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlcdintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlcdintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlcdintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlcdintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vldqintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vldqintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vldqintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vldqintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlfp16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlfp16intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlfp16intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlfp16intrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlvbmi2intrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlvbmi2intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlvbmi2intrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlvnniintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlvnniintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlvnniintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlvnniintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vlvp2intersectintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vlvp2intersectintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vlvp2intersectintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vnniintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vnniintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vnniintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vnniintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vp2intersectintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vp2intersectintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vp2intersectintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vp2intersectintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vpopcntdqintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vpopcntdqintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vpopcntdqintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avx512vpopcntdqvlintrin.h b/third_party/clang/lib/clang/17.0.1/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avx512vpopcntdqvlintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avx512vpopcntdqvlintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avxifmaintrin.h b/third_party/clang/lib/clang/17.0.1/include/avxifmaintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avxifmaintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avxifmaintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avxintrin.h b/third_party/clang/lib/clang/17.0.1/include/avxintrin.h
similarity index 99%
rename from third_party/clang/lib/clang/16.0.0/include/avxintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avxintrin.h
index ee31569c16..94fac5e6c9 100644
--- a/third_party/clang/lib/clang/16.0.0/include/avxintrin.h
+++ b/third_party/clang/lib/clang/17.0.1/include/avxintrin.h
@@ -3017,8 +3017,11 @@ _mm256_zeroupper(void)
static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_broadcast_ss(float const *__a)
{
- float __f = *__a;
- return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f };
+ struct __mm_broadcast_ss_struct {
+ float __f;
+ } __attribute__((__packed__, __may_alias__));
+ float __f = ((const struct __mm_broadcast_ss_struct*)__a)->__f;
+ return __extension__ (__m128){ __f, __f, __f, __f };
}
/// Loads a scalar double-precision floating point value from the
@@ -3036,7 +3039,10 @@ _mm_broadcast_ss(float const *__a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_broadcast_sd(double const *__a)
{
- double __d = *__a;
+ struct __mm256_broadcast_sd_struct {
+ double __d;
+ } __attribute__((__packed__, __may_alias__));
+ double __d = ((const struct __mm256_broadcast_sd_struct*)__a)->__d;
return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };
}
@@ -3055,7 +3061,10 @@ _mm256_broadcast_sd(double const *__a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_broadcast_ss(float const *__a)
{
- float __f = *__a;
+ struct __mm256_broadcast_ss_struct {
+ float __f;
+ } __attribute__((__packed__, __may_alias__));
+ float __f = ((const struct __mm256_broadcast_ss_struct*)__a)->__f;
return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
}
@@ -4499,7 +4508,8 @@ _mm256_castsi256_si128(__m256i __a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_castpd128_pd256(__m128d __a)
{
- return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
+ return __builtin_shufflevector(
+ (__v2df)__a, (__v2df)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [8 x float] from a
@@ -4520,7 +4530,9 @@ _mm256_castpd128_pd256(__m128d __a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_castps128_ps256(__m128 __a)
{
- return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector((__v4sf)__a,
+ (__v4sf)__builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7);
}
/// Constructs a 256-bit integer vector from a 128-bit integer vector.
@@ -4539,7 +4551,8 @@ _mm256_castps128_ps256(__m128 __a)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_castsi128_si256(__m128i __a)
{
- return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
+ return __builtin_shufflevector(
+ (__v2di)__a, (__v2di)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [4 x double] from a
diff --git a/third_party/clang/lib/clang/16.0.0/include/avxneconvertintrin.h b/third_party/clang/lib/clang/17.0.1/include/avxneconvertintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avxneconvertintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avxneconvertintrin.h
diff --git a/third_party/clang/lib/clang/17.0.1/include/avxvnniint16intrin.h b/third_party/clang/lib/clang/17.0.1/include/avxvnniint16intrin.h
new file mode 100644
index 0000000000..e4d342a8b4
--- /dev/null
+++ b/third_party/clang/lib/clang/17.0.1/include/avxvnniint16intrin.h
@@ -0,0 +1,473 @@
+/*===----------- avxvnniint16intrin.h - AVXVNNIINT16 intrinsics-------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error \
+ "Never use directly; include instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AVXVNNIINT16INTRIN_H
+#define __AVXVNNIINT16INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \
+ __min_vector_width__(256)))
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_dpwsud_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsud_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwsud128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwsud256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsuds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwsuds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwsuds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_dpbusd_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUSD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusd_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwusd128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUSD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwusd256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_dpwusds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwusds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwusds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwusds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_dpwuud_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUUD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x unsigned int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuud_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwuud128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUUD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x unsigned int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwuud256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x unsigned int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuuds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwuuds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile
+///
+/// \code
+/// __m256i _mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x unsigned int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwuuds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __AVXVNNIINT16INTRIN_H
diff --git a/third_party/clang/lib/clang/16.0.0/include/avxvnniint8intrin.h b/third_party/clang/lib/clang/17.0.1/include/avxvnniint8intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avxvnniint8intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avxvnniint8intrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/avxvnniintrin.h b/third_party/clang/lib/clang/17.0.1/include/avxvnniintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/avxvnniintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/avxvnniintrin.h
diff --git a/third_party/clang/lib/clang/17.0.1/include/bmi2intrin.h b/third_party/clang/lib/clang/17.0.1/include/bmi2intrin.h
new file mode 100644
index 0000000000..f0a3343bef
--- /dev/null
+++ b/third_party/clang/lib/clang/17.0.1/include/bmi2intrin.h
@@ -0,0 +1,255 @@
+/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use directly; include instead."
+#endif
+
+#ifndef __BMI2INTRIN_H
+#define __BMI2INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
+
+/// Copies the unsigned 32-bit integer \a __X and zeroes the upper bits
+/// starting at bit number \a __Y.
+///
+/// \code{.operation}
+/// i := __Y[7:0]
+/// result := __X
+/// IF i < 32
+/// result[31:i] := 0
+/// FI
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c BZHI instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The lower 8 bits specify the bit number of the lowest bit to zero.
+/// \returns The partially zeroed 32-bit value.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_bzhi_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bzhi_si(__X, __Y);
+}
+
+/// Deposit (scatter) low-order bits from the unsigned 32-bit integer \a __X
+/// into the 32-bit result, according to the mask in the unsigned 32-bit
+/// integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 31
+/// IF __Y[m] == 1
+/// result[m] := __X[i]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c PDEP instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The 32-bit mask specifying where to deposit source bits.
+/// \returns The 32-bit result.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_pdep_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pdep_si(__X, __Y);
+}
+
+/// Extract (gather) bits from the unsigned 32-bit integer \a __X into the
+/// low-order bits of the 32-bit result, according to the mask in the
+/// unsigned 32-bit integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 31
+/// IF __Y[m] == 1
+/// result[i] := __X[m]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c PEXT instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The 32-bit mask specifying which source bits to extract.
+/// \returns The 32-bit result.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_pext_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pext_si(__X, __Y);
+}
+
+/// Multiplies the unsigned 32-bit integers \a __X and \a __Y to form a
+/// 64-bit product. Stores the upper 32 bits of the product in the
+/// memory at \a __P and returns the lower 32 bits.
+///
+/// \code{.operation}
+/// Store32(__P, (__X * __Y)[63:32])
+/// result := (__X * __Y)[31:0]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c MULX instruction.
+///
+/// \param __X
+/// An unsigned 32-bit multiplicand.
+/// \param __Y
+/// An unsigned 32-bit multiplicand.
+/// \param __P
+/// A pointer to memory for storing the upper half of the product.
+/// \returns The lower half of the product.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P)
+{
+ unsigned long long __res = (unsigned long long) __X * __Y;
+ *__P = (unsigned int)(__res >> 32);
+ return (unsigned int)__res;
+}
+
+#ifdef __x86_64__
+
+/// Copies the unsigned 64-bit integer \a __X and zeroes the upper bits
+/// starting at bit number \a __Y.
+///
+/// \code{.operation}
+/// i := __Y[7:0]
+/// result := __X
+/// IF i < 64
+/// result[63:i] := 0
+/// FI
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c BZHI instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The lower 8 bits specify the bit number of the lowest bit to zero.
+/// \returns The partially zeroed 64-bit value.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_bzhi_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bzhi_di(__X, __Y);
+}
+
+/// Deposit (scatter) low-order bits from the unsigned 64-bit integer \a __X
+/// into the 64-bit result, according to the mask in the unsigned 64-bit
+/// integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 63
+/// IF __Y[m] == 1
+/// result[m] := __X[i]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c PDEP instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The 64-bit mask specifying where to deposit source bits.
+/// \returns The 64-bit result.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_pdep_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pdep_di(__X, __Y);
+}
+
+/// Extract (gather) bits from the unsigned 64-bit integer \a __X into the
+/// low-order bits of the 64-bit result, according to the mask in the
+/// unsigned 64-bit integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 63
+/// IF __Y[m] == 1
+/// result[i] := __X[m]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c PEXT instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The 64-bit mask specifying which source bits to extract.
+/// \returns The 64-bit result.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_pext_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pext_di(__X, __Y);
+}
+
+/// Multiplies the unsigned 64-bit integers \a __X and \a __Y to form a
+/// 128-bit product. Stores the upper 64 bits of the product to the
+/// memory addressed by \a __P and returns the lower 64 bits.
+///
+/// \code{.operation}
+/// Store64(__P, (__X * __Y)[127:64])
+/// result := (__X * __Y)[63:0]
+/// \endcode
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c MULX instruction.
+///
+/// \param __X
+/// An unsigned 64-bit multiplicand.
+/// \param __Y
+/// An unsigned 64-bit multiplicand.
+/// \param __P
+/// A pointer to memory for storing the upper half of the product.
+/// \returns The lower half of the product.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mulx_u64 (unsigned long long __X, unsigned long long __Y,
+ unsigned long long *__P)
+{
+ unsigned __int128 __res = (unsigned __int128) __X * __Y;
+ *__P = (unsigned long long) (__res >> 64);
+ return (unsigned long long) __res;
+}
+
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __BMI2INTRIN_H */
diff --git a/third_party/clang/lib/clang/16.0.0/include/bmiintrin.h b/third_party/clang/lib/clang/17.0.1/include/bmiintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/bmiintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/bmiintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/builtins.h b/third_party/clang/lib/clang/17.0.1/include/builtins.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/builtins.h
rename to third_party/clang/lib/clang/17.0.1/include/builtins.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/cet.h b/third_party/clang/lib/clang/17.0.1/include/cet.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cet.h
rename to third_party/clang/lib/clang/17.0.1/include/cet.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/cetintrin.h b/third_party/clang/lib/clang/17.0.1/include/cetintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cetintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/cetintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/cldemoteintrin.h b/third_party/clang/lib/clang/17.0.1/include/cldemoteintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cldemoteintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/cldemoteintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/clflushoptintrin.h b/third_party/clang/lib/clang/17.0.1/include/clflushoptintrin.h
similarity index 72%
rename from third_party/clang/lib/clang/16.0.0/include/clflushoptintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/clflushoptintrin.h
index 060eb36f30..ae0a0244c4 100644
--- a/third_party/clang/lib/clang/16.0.0/include/clflushoptintrin.h
+++ b/third_party/clang/lib/clang/17.0.1/include/clflushoptintrin.h
@@ -17,6 +17,15 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt")))
+/// Invalidates all levels of the cache hierarchy and flushes modified data to
+/// memory for the cache line specified by the address \a __m.
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c CLFLUSHOPT instruction.
+///
+/// \param __m
+/// An address within the cache line to flush and invalidate.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_clflushopt(void const * __m) {
__builtin_ia32_clflushopt(__m);
diff --git a/third_party/clang/lib/clang/16.0.0/include/clwbintrin.h b/third_party/clang/lib/clang/17.0.1/include/clwbintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/clwbintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/clwbintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/clzerointrin.h b/third_party/clang/lib/clang/17.0.1/include/clzerointrin.h
similarity index 72%
rename from third_party/clang/lib/clang/16.0.0/include/clzerointrin.h
rename to third_party/clang/lib/clang/17.0.1/include/clzerointrin.h
index a180984a3f..acccfe94ff 100644
--- a/third_party/clang/lib/clang/16.0.0/include/clzerointrin.h
+++ b/third_party/clang/lib/clang/17.0.1/include/clzerointrin.h
@@ -6,7 +6,7 @@
*
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#ifndef __X86INTRIN_H
#error "Never use directly; include instead."
#endif
@@ -17,14 +17,16 @@
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("clzero")))
-/// Loads the cache line address and zero's out the cacheline
+/// Zeroes out the cache line for the address \a __line. This uses a
+/// non-temporal store. Calling \c _mm_sfence() afterward might be needed
+/// to enforce ordering.
///
-/// \headerfile
+/// \headerfile
///
-/// This intrinsic corresponds to the CLZERO instruction.
+/// This intrinsic corresponds to the \c CLZERO instruction.
///
/// \param __line
-/// A pointer to a cacheline which needs to be zeroed out.
+/// An address within the cache line to zero out.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_clzero (void * __line)
{
diff --git a/third_party/clang/lib/clang/16.0.0/include/cmpccxaddintrin.h b/third_party/clang/lib/clang/17.0.1/include/cmpccxaddintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cmpccxaddintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/cmpccxaddintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/cpuid.h b/third_party/clang/lib/clang/17.0.1/include/cpuid.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cpuid.h
rename to third_party/clang/lib/clang/17.0.1/include/cpuid.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/crc32intrin.h b/third_party/clang/lib/clang/17.0.1/include/crc32intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/crc32intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/crc32intrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/algorithm b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/algorithm
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/algorithm
rename to third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/algorithm
diff --git a/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/bits/shared_ptr_base.h b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/bits/shared_ptr_base.h
new file mode 100644
index 0000000000..10028dd7bd
--- /dev/null
+++ b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/bits/shared_ptr_base.h
@@ -0,0 +1,9 @@
+// CUDA headers define __noinline__ which interferes with libstdc++'s use of
+// `__attribute((__noinline__))`. In order to avoid compilation error,
+// temporarily unset __noinline__ when we include affected libstdc++ header.
+
+#pragma push_macro("__noinline__")
+#undef __noinline__
+#include_next "bits/shared_ptr_base.h"
+
+#pragma pop_macro("__noinline__")
diff --git a/third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/cmath b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/cmath
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/cmath
rename to third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/cmath
diff --git a/third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/complex b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/complex
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/complex
rename to third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/complex
diff --git a/third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/new b/third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/new
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/cuda_wrappers/new
rename to third_party/clang/lib/clang/17.0.1/include/cuda_wrappers/new
diff --git a/third_party/clang/lib/clang/16.0.0/include/emmintrin.h b/third_party/clang/lib/clang/17.0.1/include/emmintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/emmintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/emmintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/enqcmdintrin.h b/third_party/clang/lib/clang/17.0.1/include/enqcmdintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/enqcmdintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/enqcmdintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/f16cintrin.h b/third_party/clang/lib/clang/17.0.1/include/f16cintrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/f16cintrin.h
rename to third_party/clang/lib/clang/17.0.1/include/f16cintrin.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/float.h b/third_party/clang/lib/clang/17.0.1/include/float.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/float.h
rename to third_party/clang/lib/clang/17.0.1/include/float.h
diff --git a/third_party/clang/lib/clang/16.0.0/include/fma4intrin.h b/third_party/clang/lib/clang/17.0.1/include/fma4intrin.h
similarity index 100%
rename from third_party/clang/lib/clang/16.0.0/include/fma4intrin.h
rename to third_party/clang/lib/clang/17.0.1/include/fma4intrin.h
diff --git a/third_party/clang/lib/clang/17.0.1/include/fmaintrin.h b/third_party/clang/lib/clang/17.0.1/include/fmaintrin.h
new file mode 100644
index 0000000000..ea832fac4f
--- /dev/null
+++ b/third_party/clang/lib/clang/17.0.1/include/fmaintrin.h
@@ -0,0 +1,780 @@
+/*===---- fmaintrin.h - FMA intrinsics -------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use directly; include instead."
+#endif
+
+#ifndef __FMAINTRIN_H
+#define __FMAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256)))
+
+/// Computes a multiply-add of 128-bit vectors of [4 x float].
+/// For each element, computes (__A * __B) + __C .
+///
+/// \headerfile
+///
+/// This intrinsic corresponds to the \c VFMADD213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
+}
+
+/// Computes a multiply-add of 128-bit vectors of [2 x double].
+/// For each element, computes