From 1374f7bd9f07115a7c69908bf50ba22e77b0e149 Mon Sep 17 00:00:00 2001 From: Sam McCall Date: Tue, 3 Dec 2019 22:13:45 +0100 Subject: [PATCH 01/12] [clangd] Fix comparator const after c9c714c7054d555398c767cb39d7d97600b3d9d1 --- clang-tools-extra/clangd/Selection.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang-tools-extra/clangd/Selection.cpp b/clang-tools-extra/clangd/Selection.cpp index 6ff5eabe566ca..ffa48f3a57d96 100644 --- a/clang-tools-extra/clangd/Selection.cpp +++ b/clang-tools-extra/clangd/Selection.cpp @@ -112,7 +112,7 @@ class IntervalSet { private: using TokenRange = llvm::ArrayRef; struct RangeLess { - bool operator()(llvm::ArrayRef L, llvm::ArrayRef R) { + bool operator()(llvm::ArrayRef L, llvm::ArrayRef R) const { return L.begin() < R.begin(); } }; From 195eb9034af3d5352f8f5aa4b2156eb8579e8514 Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Tue, 26 Nov 2019 10:40:52 -0800 Subject: [PATCH 02/12] [UpdateTestChecks] Change shebang from python to python3 'python' means Python 2 on some platforms while Python 3 on others. 'python3' is Python 3 only. Python 2.7 End of Life is set to January 1, 2020. Getting rid of Python 2 support reduces maintenance burden. Reviewed By: lebedev.ri Differential Revision: https://reviews.llvm.org/D70730 --- llvm/utils/update_analyze_test_checks.py | 2 +- llvm/utils/update_llc_test_checks.py | 4 ++-- llvm/utils/update_mca_test_checks.py | 2 +- llvm/utils/update_mir_test_checks.py | 2 +- llvm/utils/update_test_checks.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/llvm/utils/update_analyze_test_checks.py b/llvm/utils/update_analyze_test_checks.py index f3572e762549e..e3b6dfdf620cf 100755 --- a/llvm/utils/update_analyze_test_checks.py +++ b/llvm/utils/update_analyze_test_checks.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """A script to generate FileCheck statements for 'opt' analysis tests. diff --git a/llvm/utils/update_llc_test_checks.py b/llvm/utils/update_llc_test_checks.py index 750650f8640c9..3e7da8aa06f8f 100755 --- a/llvm/utils/update_llc_test_checks.py +++ b/llvm/utils/update_llc_test_checks.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """A test case update script. @@ -45,7 +45,7 @@ def main(): for test in test_paths: with open(test) as f: input_lines = [l.rstrip() for l in f] - + first_line = input_lines[0] if input_lines else "" if 'autogenerated' in first_line and script_name not in first_line: common.warn("Skipping test which wasn't autogenerated by " + script_name, test) diff --git a/llvm/utils/update_mca_test_checks.py b/llvm/utils/update_mca_test_checks.py index 0522c80be4d5f..c5798a3a8848e 100755 --- a/llvm/utils/update_mca_test_checks.py +++ b/llvm/utils/update_mca_test_checks.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """A test case update script. diff --git a/llvm/utils/update_mir_test_checks.py b/llvm/utils/update_mir_test_checks.py index 46f497007fc90..c1590c55637ef 100755 --- a/llvm/utils/update_mir_test_checks.py +++ b/llvm/utils/update_mir_test_checks.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Updates FileCheck checks in MIR tests. diff --git a/llvm/utils/update_test_checks.py b/llvm/utils/update_test_checks.py index 3fd8dd7dd7e93..8ee226549802e 100755 --- a/llvm/utils/update_test_checks.py +++ b/llvm/utils/update_test_checks.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """A script to generate FileCheck statements for 'opt' regression tests. From b3fdf33ba6aa7ef80621696f74aaf2f6f8e1d1de Mon Sep 17 00:00:00 2001 From: Sergej Jaskiewicz Date: Tue, 5 Nov 2019 14:47:24 +0300 Subject: [PATCH 03/12] Enable `-funwind-tables` flag when building libunwind Summary: Or, rather, don't accidentally forget to pass it. This is aimed to solve the problem discussed in [this thread](http://lists.llvm.org/pipermail/llvm-dev/2019-November/136890.html), and to fix [a year-old bug](https://bugs.llvm.org/show_bug.cgi?id=38468). TL;DR: when building libunwind for ARM Linux, we **need** libunwind to be built with the `-funwind-tables` flag, because, well ARM EHABI needs unwind info produced by this flag. Without the flag all the procedures in libunwind are marked `.cantunwind`, which causes all sorts of bad things. From `_Unwind_Backtrace` not working, to C++ exceptions not being caught (which is the aforementioned bug is about). Previously, this flag was not added because the CMake check `add_compile_flags_if_supported(-funwind-tables)` produced a false negative. Why? With this flag, the compiler generates calls to the `__aeabi_unwind_cpp_pr0` symbol, which is defined in libunwind itself and obviously is not available at configure time, before libunwind is built. This led to failure at link time during the CMake check. We handle this by disabling the linker for CMake checks in linbunwind. Also, this patch introduces a lit feature `libunwind-arm-ehabi`, which is used to mark the `signal_frame.pass.cpp` test as unsupported (as was advised by @miyuki in D70397). Reviewers: peter.smith, phosek, EricWF, compnerd, jroelofs, saugustine, miyuki, jfb Subscribers: mgorny, kristof.beyls, christof, libcxx-commits, miyuki Tags: #libc Differential Revision: https://reviews.llvm.org/D70815 --- libunwind/CMakeLists.txt | 21 +++++++++++++++++++++ libunwind/cmake/config-ix.cmake | 11 +++++++++++ libunwind/test/CMakeLists.txt | 1 + libunwind/test/libunwind/test/config.py | 10 ++++++++++ libunwind/test/lit.site.cfg.in | 1 + libunwind/test/signal_frame.pass.cpp | 4 ++-- 6 files changed, 46 insertions(+), 2 deletions(-) diff --git a/libunwind/CMakeLists.txt b/libunwind/CMakeLists.txt index 08095d1333a56..7aa1c782ac74c 100644 --- a/libunwind/CMakeLists.txt +++ b/libunwind/CMakeLists.txt @@ -220,6 +220,21 @@ include(HandleLibunwindFlags) # Setup Compiler Flags #=============================================================================== +# Don't run the linker in CMake checks. +# +# The reason why this was added is that when building libunwind for +# ARM Linux, we need to pass the -funwind-tables flag in order for it to +# work properly with ARM EHABI. +# +# However, when performing CMake checks, adding this flag causes the check +# to produce a false negative, because the compiler generates calls +# to __aeabi_unwind_cpp_pr0, which is defined in libunwind itself, +# which isn't built yet, so the linker complains about undefined symbols. +# +# This leads to libunwind not being built with this flag, which makes +# libunwind quite useless in this setup. +set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) + # Get required flags. add_target_flags_if(LIBUNWIND_BUILD_32_BITS "-m32") @@ -292,6 +307,12 @@ add_cxx_compile_flags_if_supported(-fstrict-aliasing) add_cxx_compile_flags_if_supported(-EHsc) add_compile_flags_if_supported(-funwind-tables) + +if (LIBUNWIND_USES_ARM_EHABI AND NOT LIBUNWIND_SUPPORTS_FUNWIND_TABLES_FLAG) + message(SEND_ERROR "The -funwind-tables flag must be supported " + "because this target uses ARM Exception Handling ABI") +endif() + add_cxx_compile_flags_if_supported(-fno-exceptions) add_cxx_compile_flags_if_supported(-fno-rtti) diff --git a/libunwind/cmake/config-ix.cmake b/libunwind/cmake/config-ix.cmake index 02d2f13f2e28c..0d833e996ca16 100644 --- a/libunwind/cmake/config-ix.cmake +++ b/libunwind/cmake/config-ix.cmake @@ -2,6 +2,7 @@ include(CMakePushCheckState) include(CheckCCompilerFlag) include(CheckCXXCompilerFlag) include(CheckLibraryExists) +include(CheckSymbolExists) include(CheckCSourceCompiles) check_library_exists(c fopen "" LIBUNWIND_HAS_C_LIB) @@ -73,3 +74,13 @@ check_cxx_compiler_flag(-nostdinc++ LIBUNWIND_HAS_NOSTDINCXX_FLAG) # Check libraries check_library_exists(dl dladdr "" LIBUNWIND_HAS_DL_LIB) check_library_exists(pthread pthread_once "" LIBUNWIND_HAS_PTHREAD_LIB) + +# Check symbols +check_symbol_exists(__arm__ "" LIBUNWIND_TARGET_ARM) +check_symbol_exists(__USING_SJLJ_EXCEPTIONS__ "" LIBUNWIND_USES_SJLJ_EXCEPTIONS) +check_symbol_exists(__ARM_DWARF_EH__ "" LIBUNWIND_USES_DWARF_EH) + +if(LIBUNWIND_TARGET_ARM AND NOT LIBUNWIND_USES_SJLJ_EXCEPTIONS AND NOT LIBUNWIND_USES_DWARF_EH) + # This condition is copied from __libunwind_config.h + set(LIBUNWIND_USES_ARM_EHABI ON) +endif() diff --git a/libunwind/test/CMakeLists.txt b/libunwind/test/CMakeLists.txt index d902e3e829410..40d4acd4e8c2a 100644 --- a/libunwind/test/CMakeLists.txt +++ b/libunwind/test/CMakeLists.txt @@ -16,6 +16,7 @@ pythonize_bool(LIBCXX_ENABLE_SHARED) pythonize_bool(LIBUNWIND_ENABLE_SHARED) pythonize_bool(LIBUNWIND_ENABLE_THREADS) pythonize_bool(LIBUNWIND_ENABLE_EXCEPTIONS) +pythonize_bool(LIBUNWIND_USES_ARM_EHABI) pythonize_bool(LIBUNWIND_USE_COMPILER_RT) pythonize_bool(LIBUNWIND_BUILD_EXTERNAL_THREAD_LIBRARY) set(LIBUNWIND_TARGET_INFO "libcxx.test.target_info.LocalTI" CACHE STRING diff --git a/libunwind/test/libunwind/test/config.py b/libunwind/test/libunwind/test/config.py index 05e3f3cc21f31..41ca3f9b4a447 100644 --- a/libunwind/test/libunwind/test/config.py +++ b/libunwind/test/libunwind/test/config.py @@ -37,6 +37,8 @@ def configure_features(self): super(Configuration, self).configure_features() if not self.get_lit_bool('enable_exceptions', True): self.config.available_features.add('libcxxabi-no-exceptions') + if self.get_lit_bool('arm_ehabi', False): + self.config.available_features.add('libunwind-arm-ehabi') def configure_compile_flags(self): self.cxx.compile_flags += ['-DLIBUNWIND_NO_TIMER'] @@ -66,3 +68,11 @@ def configure_compile_flags_exceptions(self): def configure_compile_flags_rtti(self): pass + + def configure_link_flags_cxx_library(self): + # libunwind tests should not link with libc++ + pass + + def configure_link_flags_abi_library(self): + # libunwind tests should not link with libc++abi + pass diff --git a/libunwind/test/lit.site.cfg.in b/libunwind/test/lit.site.cfg.in index 34da72ac10684..37f90a90efdb4 100644 --- a/libunwind/test/lit.site.cfg.in +++ b/libunwind/test/lit.site.cfg.in @@ -19,6 +19,7 @@ config.executor = "@LIBUNWIND_EXECUTOR@" config.libunwind_shared = @LIBUNWIND_ENABLE_SHARED@ config.enable_shared = @LIBCXX_ENABLE_SHARED@ config.enable_exceptions = @LIBUNWIND_ENABLE_EXCEPTIONS@ +config.arm_ehabi = @LIBUNWIND_USES_ARM_EHABI@ config.host_triple = "@LLVM_HOST_TRIPLE@" config.target_triple = "@TARGET_TRIPLE@" config.use_target = bool("@LIBUNWIND_TARGET_TRIPLE@") diff --git a/libunwind/test/signal_frame.pass.cpp b/libunwind/test/signal_frame.pass.cpp index a6f3f483bea5f..a899461fafb4d 100644 --- a/libunwind/test/signal_frame.pass.cpp +++ b/libunwind/test/signal_frame.pass.cpp @@ -9,6 +9,8 @@ // Ensure that functions marked as signal frames are reported as such. +// UNSUPPORTED: libunwind-arm-ehabi + #include #include #include @@ -20,9 +22,7 @@ void test() { unw_getcontext(&uc); unw_init_local(&cursor, &uc); assert(unw_step(&cursor) > 0); -#if !defined(_LIBUNWIND_ARM_EHABI) assert(unw_is_signal_frame(&cursor)); -#endif } int main() { From 15a172bebbc5b95d05733ef842fcdbd14e9d441d Mon Sep 17 00:00:00 2001 From: Davide Italiano Date: Tue, 3 Dec 2019 13:36:50 -0800 Subject: [PATCH 04/12] [TypeCategory] Nothing passes down a list of languages. Summary: This should allow further simplifications, but it's a first step. Reviewers: teemperor, jingham, friss Subscribers: lldb-commits Tags: #lldb Differential Revision: https://reviews.llvm.org/D70983 --- lldb/include/lldb/DataFormatters/TypeCategory.h | 3 +-- lldb/source/DataFormatters/TypeCategory.cpp | 10 +++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/lldb/include/lldb/DataFormatters/TypeCategory.h b/lldb/include/lldb/DataFormatters/TypeCategory.h index a5438226bbbb8..90c8a3da60051 100644 --- a/lldb/include/lldb/DataFormatters/TypeCategory.h +++ b/lldb/include/lldb/DataFormatters/TypeCategory.h @@ -214,8 +214,7 @@ class TypeCategoryImpl { ValidatorContainer::RegexMatchForEachCallback m_validator_regex; }; - TypeCategoryImpl(IFormatChangeListener *clist, ConstString name, - std::initializer_list langs = {}); + TypeCategoryImpl(IFormatChangeListener *clist, ConstString name); template void ForEach(const ForEachCallbacks &foreach) { GetTypeFormatsContainer()->ForEach(foreach.GetFormatExactCallback()); diff --git a/lldb/source/DataFormatters/TypeCategory.cpp b/lldb/source/DataFormatters/TypeCategory.cpp index fed2dfb3c7c5b..9159de169f04e 100644 --- a/lldb/source/DataFormatters/TypeCategory.cpp +++ b/lldb/source/DataFormatters/TypeCategory.cpp @@ -13,18 +13,14 @@ using namespace lldb; using namespace lldb_private; -TypeCategoryImpl::TypeCategoryImpl( - IFormatChangeListener *clist, ConstString name, - std::initializer_list langs) +TypeCategoryImpl::TypeCategoryImpl(IFormatChangeListener *clist, + ConstString name) : m_format_cont("format", "regex-format", clist), m_summary_cont("summary", "regex-summary", clist), m_filter_cont("filter", "regex-filter", clist), m_synth_cont("synth", "regex-synth", clist), m_validator_cont("validator", "regex-validator", clist), m_enabled(false), - m_change_listener(clist), m_mutex(), m_name(name), m_languages() { - for (const lldb::LanguageType lang : langs) - AddLanguage(lang); -} + m_change_listener(clist), m_mutex(), m_name(name), m_languages() {} static bool IsApplicable(lldb::LanguageType category_lang, lldb::LanguageType valobj_lang) { From 5ebbabc1af360756f402203ba7704bb480f279a7 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 3 Dec 2019 13:48:39 -0800 Subject: [PATCH 05/12] [InstCombine] Revert aafde063aaf09285c701c80cd4b543c2beb523e8 and 6749dc3446671df05235d0a218c426a314ac33cd related to bitcast handling of x86_mmx This reverts these two commits [InstCombine] Turn (extractelement <1 x i64/double> (bitcast (x86_mmx))) into a single bitcast from x86_mmx to i64/double. [InstCombine] Don't transform bitcasts between x86_mmx and v1i64 into insertelement/extractelement We're seeing at least one internal test failure related to a bitcast that was previously before an inline assembly block containing emms being placed after it. This leads to the mmx state ending up not empty after the emms. IR has no way to make any specific guarantees about this. Reverting these patches to get back to previous behavior which at least worked for this test. --- llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp | 5 ++--- llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp | 7 ------- llvm/test/Transforms/InstCombine/bitcast-vec-canon.ll | 8 +++++--- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 078a80de2df4a..2171c819fd9e2 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -2394,8 +2394,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { } if (VectorType *DestVTy = dyn_cast(DestTy)) { - if (DestVTy->getNumElements() == 1 && - VectorType::isValidElementType(SrcTy)) { + if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) { Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); return InsertElementInst::Create(UndefValue::get(DestTy), Elem, Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); @@ -2427,7 +2426,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { if (SrcVTy->getNumElements() == 1) { // If our destination is not a vector, then make this a straight // scalar-scalar cast. - if (VectorType::isValidElementType(DestTy)) { + if (!DestTy->isVectorTy()) { Value *Elem = Builder.CreateExtractElement(Src, Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index d31cbc0882ee5..9fabe9def1104 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -435,13 +435,6 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { Worklist.AddValue(EE); return CastInst::Create(CI->getOpcode(), EE, EI.getType()); } - - // If the input is a bitcast from x86_mmx, turn into a single bitcast from - // the mmx type to the scalar type. - if (CI->getOpcode() == Instruction::BitCast && - EI.getVectorOperandType()->getNumElements() == 1 && - CI->getOperand(0)->getType()->isX86_MMXTy()) - return new BitCastInst(CI->getOperand(0), EI.getType()); } } return nullptr; diff --git a/llvm/test/Transforms/InstCombine/bitcast-vec-canon.ll b/llvm/test/Transforms/InstCombine/bitcast-vec-canon.ll index 21c8e78bbb165..50cbd763987d3 100644 --- a/llvm/test/Transforms/InstCombine/bitcast-vec-canon.ll +++ b/llvm/test/Transforms/InstCombine/bitcast-vec-canon.ll @@ -40,7 +40,8 @@ define <1 x i64> @d(i64 %y) { define x86_mmx @e(<1 x i64> %y) { ; CHECK-LABEL: @e( -; CHECK-NEXT: [[C:%.*]] = bitcast <1 x i64> %y to x86_mmx +; CHECK-NEXT: [[TMP1:%.*]] = extractelement <1 x i64> %y, i32 0 +; CHECK-NEXT: [[C:%.*]] = bitcast i64 [[TMP1]] to x86_mmx ; CHECK-NEXT: ret x86_mmx [[C]] ; %c = bitcast <1 x i64> %y to x86_mmx @@ -49,7 +50,8 @@ define x86_mmx @e(<1 x i64> %y) { define <1 x i64> @f(x86_mmx %y) { ; CHECK-LABEL: @f( -; CHECK-NEXT: [[C:%.*]] = bitcast x86_mmx [[Y:%.*]] to <1 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = bitcast x86_mmx %y to i64 +; CHECK-NEXT: [[C:%.*]] = insertelement <1 x i64> undef, i64 [[TMP1]], i32 0 ; CHECK-NEXT: ret <1 x i64> [[C]] ; %c = bitcast x86_mmx %y to <1 x i64> @@ -59,7 +61,7 @@ define <1 x i64> @f(x86_mmx %y) { define double @g(x86_mmx %x) { ; CHECK-LABEL: @g( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = bitcast x86_mmx [[X:%.*]] to double +; CHECK-NEXT: [[TMP0:%.*]] = bitcast x86_mmx %x to double ; CHECK-NEXT: ret double [[TMP0]] ; entry: From da7b129b1b52bdc52b02b546b75f90fad07f6d3a Mon Sep 17 00:00:00 2001 From: James Clarke Date: Tue, 3 Dec 2019 22:04:24 +0000 Subject: [PATCH 06/12] [RISCV] Don't force Local Exec TLS for non-PIC Summary: Forcing Local Exec TLS requires the use of copy relocations. Copy relocations need special handling in the runtime linker when being used against TLS symbols, which is present in glibc, but not in FreeBSD nor musl, and so cannot be relied upon. Moreover, copy relocations are a hack that embed the size of an object in the ABI when it otherwise wouldn't be, and break protected symbols (which are expected to be DSO local), whilst also wasting space, thus they should be avoided whenever possible. As discussed in D70398, RISC-V should move away from forcing Local Exec, and instead use Initial Exec like other targets, with possible linker relaxation to follow. The RISC-V GCC maintainers also intend to adopt this more-conventional behaviour (see https://github.com/riscv/riscv-elf-psabi-doc/issues/122). Reviewers: asb, MaskRay Reviewed By: MaskRay Subscribers: emaste, krytarowski, hiraditya, rbar, johnrusso, simoncook, sabuasal, niosHD, kito-cheng, shiva0217, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, llvm-commits, bsdjhb Tags: #llvm Differential Revision: https://reviews.llvm.org/D70649 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 5 +- llvm/test/CodeGen/RISCV/tls-models.ll | 103 ++++++++++++++------ 2 files changed, 73 insertions(+), 35 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index b9aa5cf32b590..1e562f3f54b59 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -581,10 +581,7 @@ SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, int64_t Offset = N->getOffset(); MVT XLenVT = Subtarget.getXLenVT(); - // Non-PIC TLS lowering should always use the LocalExec model. - TLSModel::Model Model = isPositionIndependent() - ? getTargetMachine().getTLSModel(N->getGlobal()) - : TLSModel::LocalExec; + TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); SDValue Addr; switch (Model) { diff --git a/llvm/test/CodeGen/RISCV/tls-models.ll b/llvm/test/CodeGen/RISCV/tls-models.ll index a2015b086f95f..25a2f71beb317 100644 --- a/llvm/test/CodeGen/RISCV/tls-models.ll +++ b/llvm/test/CodeGen/RISCV/tls-models.ll @@ -3,16 +3,17 @@ ; RUN: | FileCheck -check-prefix=RV32-PIC %s ; RUN: llc -mtriple=riscv64 -relocation-model=pic < %s \ ; RUN: | FileCheck -check-prefix=RV64-PIC %s -; RUN: llc -mtriple=riscv32 < %s | FileCheck -check-prefix=NOPIC %s -; RUN: llc -mtriple=riscv64 < %s | FileCheck -check-prefix=NOPIC %s +; RUN: llc -mtriple=riscv32 < %s | FileCheck -check-prefix=RV32-NOPIC %s +; RUN: llc -mtriple=riscv64 < %s | FileCheck -check-prefix=RV64-NOPIC %s ; Check that TLS symbols are lowered correctly based on the specified -; model. +; model. Make sure they're external to avoid them all being optimised to Local +; Exec for the executable. -@unspecified = thread_local global i32 42 -@ld = thread_local(localdynamic) global i32 42 -@ie = thread_local(initialexec) global i32 42 -@le = thread_local(localexec) global i32 42 +@unspecified = external thread_local global i32 +@ld = external thread_local(localdynamic) global i32 +@ie = external thread_local(initialexec) global i32 +@le = external thread_local(localexec) global i32 ; No model specified @@ -44,12 +45,23 @@ define i32* @f1() nounwind { ; RV64-PIC-NEXT: addi sp, sp, 16 ; RV64-PIC-NEXT: ret ; -; NOPIC-LABEL: f1: -; NOPIC: # %bb.0: # %entry -; NOPIC-NEXT: lui a0, %tprel_hi(unspecified) -; NOPIC-NEXT: add a0, a0, tp, %tprel_add(unspecified) -; NOPIC-NEXT: addi a0, a0, %tprel_lo(unspecified) -; NOPIC-NEXT: ret +; RV32-NOPIC-LABEL: f1: +; RV32-NOPIC: # %bb.0: # %entry +; RV32-NOPIC-NEXT: .LBB0_1: # %entry +; RV32-NOPIC-NEXT: # Label of block must be emitted +; RV32-NOPIC-NEXT: auipc a0, %tls_ie_pcrel_hi(unspecified) +; RV32-NOPIC-NEXT: lw a0, %pcrel_lo(.LBB0_1)(a0) +; RV32-NOPIC-NEXT: add a0, a0, tp +; RV32-NOPIC-NEXT: ret +; +; RV64-NOPIC-LABEL: f1: +; RV64-NOPIC: # %bb.0: # %entry +; RV64-NOPIC-NEXT: .LBB0_1: # %entry +; RV64-NOPIC-NEXT: # Label of block must be emitted +; RV64-NOPIC-NEXT: auipc a0, %tls_ie_pcrel_hi(unspecified) +; RV64-NOPIC-NEXT: ld a0, %pcrel_lo(.LBB0_1)(a0) +; RV64-NOPIC-NEXT: add a0, a0, tp +; RV64-NOPIC-NEXT: ret entry: ret i32* @unspecified } @@ -84,12 +96,23 @@ define i32* @f2() nounwind { ; RV64-PIC-NEXT: addi sp, sp, 16 ; RV64-PIC-NEXT: ret ; -; NOPIC-LABEL: f2: -; NOPIC: # %bb.0: # %entry -; NOPIC-NEXT: lui a0, %tprel_hi(ld) -; NOPIC-NEXT: add a0, a0, tp, %tprel_add(ld) -; NOPIC-NEXT: addi a0, a0, %tprel_lo(ld) -; NOPIC-NEXT: ret +; RV32-NOPIC-LABEL: f2: +; RV32-NOPIC: # %bb.0: # %entry +; RV32-NOPIC-NEXT: .LBB1_1: # %entry +; RV32-NOPIC-NEXT: # Label of block must be emitted +; RV32-NOPIC-NEXT: auipc a0, %tls_ie_pcrel_hi(ld) +; RV32-NOPIC-NEXT: lw a0, %pcrel_lo(.LBB1_1)(a0) +; RV32-NOPIC-NEXT: add a0, a0, tp +; RV32-NOPIC-NEXT: ret +; +; RV64-NOPIC-LABEL: f2: +; RV64-NOPIC: # %bb.0: # %entry +; RV64-NOPIC-NEXT: .LBB1_1: # %entry +; RV64-NOPIC-NEXT: # Label of block must be emitted +; RV64-NOPIC-NEXT: auipc a0, %tls_ie_pcrel_hi(ld) +; RV64-NOPIC-NEXT: ld a0, %pcrel_lo(.LBB1_1)(a0) +; RV64-NOPIC-NEXT: add a0, a0, tp +; RV64-NOPIC-NEXT: ret entry: ret i32* @ld } @@ -116,12 +139,23 @@ define i32* @f3() nounwind { ; RV64-PIC-NEXT: add a0, a0, tp ; RV64-PIC-NEXT: ret ; -; NOPIC-LABEL: f3: -; NOPIC: # %bb.0: # %entry -; NOPIC-NEXT: lui a0, %tprel_hi(ie) -; NOPIC-NEXT: add a0, a0, tp, %tprel_add(ie) -; NOPIC-NEXT: addi a0, a0, %tprel_lo(ie) -; NOPIC-NEXT: ret +; RV32-NOPIC-LABEL: f3: +; RV32-NOPIC: # %bb.0: # %entry +; RV32-NOPIC-NEXT: .LBB2_1: # %entry +; RV32-NOPIC-NEXT: # Label of block must be emitted +; RV32-NOPIC-NEXT: auipc a0, %tls_ie_pcrel_hi(ie) +; RV32-NOPIC-NEXT: lw a0, %pcrel_lo(.LBB2_1)(a0) +; RV32-NOPIC-NEXT: add a0, a0, tp +; RV32-NOPIC-NEXT: ret +; +; RV64-NOPIC-LABEL: f3: +; RV64-NOPIC: # %bb.0: # %entry +; RV64-NOPIC-NEXT: .LBB2_1: # %entry +; RV64-NOPIC-NEXT: # Label of block must be emitted +; RV64-NOPIC-NEXT: auipc a0, %tls_ie_pcrel_hi(ie) +; RV64-NOPIC-NEXT: ld a0, %pcrel_lo(.LBB2_1)(a0) +; RV64-NOPIC-NEXT: add a0, a0, tp +; RV64-NOPIC-NEXT: ret entry: ret i32* @ie } @@ -144,12 +178,19 @@ define i32* @f4() nounwind { ; RV64-PIC-NEXT: addi a0, a0, %tprel_lo(le) ; RV64-PIC-NEXT: ret ; -; NOPIC-LABEL: f4: -; NOPIC: # %bb.0: # %entry -; NOPIC-NEXT: lui a0, %tprel_hi(le) -; NOPIC-NEXT: add a0, a0, tp, %tprel_add(le) -; NOPIC-NEXT: addi a0, a0, %tprel_lo(le) -; NOPIC-NEXT: ret +; RV32-NOPIC-LABEL: f4: +; RV32-NOPIC: # %bb.0: # %entry +; RV32-NOPIC-NEXT: lui a0, %tprel_hi(le) +; RV32-NOPIC-NEXT: add a0, a0, tp, %tprel_add(le) +; RV32-NOPIC-NEXT: addi a0, a0, %tprel_lo(le) +; RV32-NOPIC-NEXT: ret +; +; RV64-NOPIC-LABEL: f4: +; RV64-NOPIC: # %bb.0: # %entry +; RV64-NOPIC-NEXT: lui a0, %tprel_hi(le) +; RV64-NOPIC-NEXT: add a0, a0, tp, %tprel_add(le) +; RV64-NOPIC-NEXT: addi a0, a0, %tprel_lo(le) +; RV64-NOPIC-NEXT: ret entry: ret i32* @le } From f586fd44e4ba898ad7fed1a3c4f865fe8a3a8338 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 3 Dec 2019 14:05:04 -0800 Subject: [PATCH 07/12] [FPEnv] [PowerPC] Lowering ppc_fp128 StrictFP Nodes to libcalls This is an alternative to D64662 that shares more code between strict and non-strict nodes. It's modeled after the implementation that I did for softening. Differential Revision: https://reviews.llvm.org/D70867 --- .../SelectionDAG/LegalizeFloatTypes.cpp | 426 +++-- .../CodeGen/SelectionDAG/LegalizeTypes.cpp | 26 - llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 5 +- .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 36 +- .../ppcf128-constrained-fp-intrinsics.ll | 1569 +++++++++++++++++ 5 files changed, 1852 insertions(+), 210 deletions(-) create mode 100644 llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index a8f038227bfbc..a94efe74c9abe 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -1108,36 +1108,61 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) { case ISD::ConstantFP: ExpandFloatRes_ConstantFP(N, Lo, Hi); break; case ISD::FABS: ExpandFloatRes_FABS(N, Lo, Hi); break; + case ISD::STRICT_FMINNUM: case ISD::FMINNUM: ExpandFloatRes_FMINNUM(N, Lo, Hi); break; + case ISD::STRICT_FMAXNUM: case ISD::FMAXNUM: ExpandFloatRes_FMAXNUM(N, Lo, Hi); break; + case ISD::STRICT_FADD: case ISD::FADD: ExpandFloatRes_FADD(N, Lo, Hi); break; case ISD::FCBRT: ExpandFloatRes_FCBRT(N, Lo, Hi); break; + case ISD::STRICT_FCEIL: case ISD::FCEIL: ExpandFloatRes_FCEIL(N, Lo, Hi); break; case ISD::FCOPYSIGN: ExpandFloatRes_FCOPYSIGN(N, Lo, Hi); break; + case ISD::STRICT_FCOS: case ISD::FCOS: ExpandFloatRes_FCOS(N, Lo, Hi); break; + case ISD::STRICT_FDIV: case ISD::FDIV: ExpandFloatRes_FDIV(N, Lo, Hi); break; + case ISD::STRICT_FEXP: case ISD::FEXP: ExpandFloatRes_FEXP(N, Lo, Hi); break; + case ISD::STRICT_FEXP2: case ISD::FEXP2: ExpandFloatRes_FEXP2(N, Lo, Hi); break; + case ISD::STRICT_FFLOOR: case ISD::FFLOOR: ExpandFloatRes_FFLOOR(N, Lo, Hi); break; + case ISD::STRICT_FLOG: case ISD::FLOG: ExpandFloatRes_FLOG(N, Lo, Hi); break; + case ISD::STRICT_FLOG2: case ISD::FLOG2: ExpandFloatRes_FLOG2(N, Lo, Hi); break; + case ISD::STRICT_FLOG10: case ISD::FLOG10: ExpandFloatRes_FLOG10(N, Lo, Hi); break; + case ISD::STRICT_FMA: case ISD::FMA: ExpandFloatRes_FMA(N, Lo, Hi); break; + case ISD::STRICT_FMUL: case ISD::FMUL: ExpandFloatRes_FMUL(N, Lo, Hi); break; + case ISD::STRICT_FNEARBYINT: case ISD::FNEARBYINT: ExpandFloatRes_FNEARBYINT(N, Lo, Hi); break; case ISD::FNEG: ExpandFloatRes_FNEG(N, Lo, Hi); break; + case ISD::STRICT_FP_EXTEND: case ISD::FP_EXTEND: ExpandFloatRes_FP_EXTEND(N, Lo, Hi); break; + case ISD::STRICT_FPOW: case ISD::FPOW: ExpandFloatRes_FPOW(N, Lo, Hi); break; + case ISD::STRICT_FPOWI: case ISD::FPOWI: ExpandFloatRes_FPOWI(N, Lo, Hi); break; + case ISD::STRICT_FRINT: case ISD::FRINT: ExpandFloatRes_FRINT(N, Lo, Hi); break; + case ISD::STRICT_FROUND: case ISD::FROUND: ExpandFloatRes_FROUND(N, Lo, Hi); break; + case ISD::STRICT_FSIN: case ISD::FSIN: ExpandFloatRes_FSIN(N, Lo, Hi); break; + case ISD::STRICT_FSQRT: case ISD::FSQRT: ExpandFloatRes_FSQRT(N, Lo, Hi); break; + case ISD::STRICT_FSUB: case ISD::FSUB: ExpandFloatRes_FSUB(N, Lo, Hi); break; + case ISD::STRICT_FTRUNC: case ISD::FTRUNC: ExpandFloatRes_FTRUNC(N, Lo, Hi); break; case ISD::LOAD: ExpandFloatRes_LOAD(N, Lo, Hi); break; case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: ExpandFloatRes_XINT_TO_FP(N, Lo, Hi); break; + case ISD::STRICT_FREM: case ISD::FREM: ExpandFloatRes_FREM(N, Lo, Hi); break; } @@ -1161,6 +1186,36 @@ void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo, dl, NVT); } +void DAGTypeLegalizer::ExpandFloatRes_Unary(SDNode *N, RTLIB::Libcall LC, + SDValue &Lo, SDValue &Hi) { + bool IsStrict = N->isStrictFPOpcode(); + unsigned Offset = IsStrict ? 1 : 0; + SDValue Op = N->getOperand(0 + Offset); + SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); + TargetLowering::MakeLibCallOptions CallOptions; + std::pair Tmp = TLI.makeLibCall(DAG, LC, N->getValueType(0), + Op, CallOptions, SDLoc(N), + Chain); + if (IsStrict) + ReplaceValueWith(SDValue(N, 1), Tmp.second); + GetPairElements(Tmp.first, Lo, Hi); +} + +void DAGTypeLegalizer::ExpandFloatRes_Binary(SDNode *N, RTLIB::Libcall LC, + SDValue &Lo, SDValue &Hi) { + bool IsStrict = N->isStrictFPOpcode(); + unsigned Offset = IsStrict ? 1 : 0; + SDValue Ops[] = { N->getOperand(0 + Offset), N->getOperand(1 + Offset) }; + SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); + TargetLowering::MakeLibCallOptions CallOptions; + std::pair Tmp = TLI.makeLibCall(DAG, LC, N->getValueType(0), + Ops, CallOptions, SDLoc(N), + Chain); + if (IsStrict) + ReplaceValueWith(SDValue(N, 1), Tmp.second); + GetPairElements(Tmp.first, Lo, Hi); +} + void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDValue &Lo, SDValue &Hi) { assert(N->getValueType(0) == MVT::ppcf128 && @@ -1177,190 +1232,159 @@ void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDValue &Lo, void DAGTypeLegalizer::ExpandFloatRes_FMINNUM(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::FMIN_F32, RTLIB::FMIN_F64, - RTLIB::FMIN_F80, RTLIB::FMIN_F128, - RTLIB::FMIN_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::FMIN_F32, RTLIB::FMIN_F64, + RTLIB::FMIN_F80, RTLIB::FMIN_F128, + RTLIB::FMIN_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FMAXNUM(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::FMAX_F32, RTLIB::FMAX_F64, - RTLIB::FMAX_F80, RTLIB::FMAX_F128, - RTLIB::FMAX_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), + RTLIB::FMAX_F32, RTLIB::FMAX_F64, + RTLIB::FMAX_F80, RTLIB::FMAX_F128, + RTLIB::FMAX_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FADD(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::ADD_F32, RTLIB::ADD_F64, - RTLIB::ADD_F80, RTLIB::ADD_F128, - RTLIB::ADD_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), + RTLIB::ADD_F32, RTLIB::ADD_F64, + RTLIB::ADD_F80, RTLIB::ADD_F128, + RTLIB::ADD_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FCBRT(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), RTLIB::CBRT_F32, - RTLIB::CBRT_F64, RTLIB::CBRT_F80, - RTLIB::CBRT_F128, RTLIB::CBRT_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), RTLIB::CBRT_F32, + RTLIB::CBRT_F64, RTLIB::CBRT_F80, + RTLIB::CBRT_F128, + RTLIB::CBRT_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FCEIL(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::CEIL_F32, RTLIB::CEIL_F64, - RTLIB::CEIL_F80, RTLIB::CEIL_F128, - RTLIB::CEIL_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::CEIL_F32, RTLIB::CEIL_F64, + RTLIB::CEIL_F80, RTLIB::CEIL_F128, + RTLIB::CEIL_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FCOPYSIGN(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::COPYSIGN_F32, - RTLIB::COPYSIGN_F64, - RTLIB::COPYSIGN_F80, - RTLIB::COPYSIGN_F128, - RTLIB::COPYSIGN_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), + RTLIB::COPYSIGN_F32, + RTLIB::COPYSIGN_F64, + RTLIB::COPYSIGN_F80, + RTLIB::COPYSIGN_F128, + RTLIB::COPYSIGN_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FCOS(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::COS_F32, RTLIB::COS_F64, - RTLIB::COS_F80, RTLIB::COS_F128, - RTLIB::COS_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::COS_F32, RTLIB::COS_F64, + RTLIB::COS_F80, RTLIB::COS_F128, + RTLIB::COS_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FDIV(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; - TargetLowering::MakeLibCallOptions CallOptions; - SDValue Call = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0), - RTLIB::DIV_F32, - RTLIB::DIV_F64, - RTLIB::DIV_F80, - RTLIB::DIV_F128, - RTLIB::DIV_PPCF128), - N->getValueType(0), Ops, CallOptions, - SDLoc(N)).first; - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), + RTLIB::DIV_F32, + RTLIB::DIV_F64, + RTLIB::DIV_F80, + RTLIB::DIV_F128, + RTLIB::DIV_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FEXP(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::EXP_F32, RTLIB::EXP_F64, - RTLIB::EXP_F80, RTLIB::EXP_F128, - RTLIB::EXP_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::EXP_F32, RTLIB::EXP_F64, + RTLIB::EXP_F80, RTLIB::EXP_F128, + RTLIB::EXP_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FEXP2(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::EXP2_F32, RTLIB::EXP2_F64, - RTLIB::EXP2_F80, RTLIB::EXP2_F128, - RTLIB::EXP2_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::EXP2_F32, RTLIB::EXP2_F64, + RTLIB::EXP2_F80, RTLIB::EXP2_F128, + RTLIB::EXP2_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FFLOOR(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, - RTLIB::FLOOR_F80, RTLIB::FLOOR_F128, - RTLIB::FLOOR_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::FLOOR_F32, RTLIB::FLOOR_F64, + RTLIB::FLOOR_F80, RTLIB::FLOOR_F128, + RTLIB::FLOOR_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FLOG(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::LOG_F32, RTLIB::LOG_F64, - RTLIB::LOG_F80, RTLIB::LOG_F128, - RTLIB::LOG_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::LOG_F32, RTLIB::LOG_F64, + RTLIB::LOG_F80, RTLIB::LOG_F128, + RTLIB::LOG_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FLOG2(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::LOG2_F32, RTLIB::LOG2_F64, - RTLIB::LOG2_F80, RTLIB::LOG2_F128, - RTLIB::LOG2_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::LOG2_F32, RTLIB::LOG2_F64, + RTLIB::LOG2_F80, RTLIB::LOG2_F128, + RTLIB::LOG2_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FLOG10(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::LOG10_F32, RTLIB::LOG10_F64, - RTLIB::LOG10_F80, RTLIB::LOG10_F128, - RTLIB::LOG10_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::LOG10_F32, RTLIB::LOG10_F64, + RTLIB::LOG10_F80, RTLIB::LOG10_F128, + RTLIB::LOG10_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FMA(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Ops[3] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) }; + bool IsStrict = N->isStrictFPOpcode(); + unsigned Offset = IsStrict ? 1 : 0; + SDValue Ops[3] = { N->getOperand(0 + Offset), N->getOperand(1 + Offset), + N->getOperand(2 + Offset) }; + SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); TargetLowering::MakeLibCallOptions CallOptions; - SDValue Call = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0), + std::pair Tmp = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0), RTLIB::FMA_F32, RTLIB::FMA_F64, RTLIB::FMA_F80, RTLIB::FMA_F128, RTLIB::FMA_PPCF128), N->getValueType(0), Ops, CallOptions, - SDLoc(N)).first; - GetPairElements(Call, Lo, Hi); + SDLoc(N), Chain); + if (IsStrict) + ReplaceValueWith(SDValue(N, 1), Tmp.second); + GetPairElements(Tmp.first, Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FMUL(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; - TargetLowering::MakeLibCallOptions CallOptions; - SDValue Call = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0), + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), RTLIB::MUL_F32, RTLIB::MUL_F64, RTLIB::MUL_F80, RTLIB::MUL_F128, - RTLIB::MUL_PPCF128), - N->getValueType(0), Ops, CallOptions, - SDLoc(N)).first; - GetPairElements(Call, Lo, Hi); + RTLIB::MUL_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FNEARBYINT(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::NEARBYINT_F32, - RTLIB::NEARBYINT_F64, - RTLIB::NEARBYINT_F80, - RTLIB::NEARBYINT_F128, - RTLIB::NEARBYINT_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::NEARBYINT_F32, + RTLIB::NEARBYINT_F64, + RTLIB::NEARBYINT_F80, + RTLIB::NEARBYINT_F128, + RTLIB::NEARBYINT_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FNEG(SDNode *N, SDValue &Lo, @@ -1375,106 +1399,105 @@ void DAGTypeLegalizer::ExpandFloatRes_FP_EXTEND(SDNode *N, SDValue &Lo, SDValue &Hi) { EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); SDLoc dl(N); - Hi = DAG.getNode(ISD::FP_EXTEND, dl, NVT, N->getOperand(0)); + bool IsStrict = N->isStrictFPOpcode(); + + SDValue Chain; + if (IsStrict) { + // If the expanded type is the same as the input type, just bypass the node. + if (NVT == N->getOperand(1).getValueType()) { + Hi = N->getOperand(1); + Chain = N->getOperand(0); + } else { + // Other we need to extend. + Hi = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, { NVT, MVT::Other }, + { N->getOperand(0), N->getOperand(1) }); + Chain = Hi.getValue(1); + } + } else { + Hi = DAG.getNode(ISD::FP_EXTEND, dl, NVT, N->getOperand(0)); + } + Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT), APInt(NVT.getSizeInBits(), 0)), dl, NVT); + + if (IsStrict) + ReplaceValueWith(SDValue(N, 1), Chain); } void DAGTypeLegalizer::ExpandFloatRes_FPOW(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::POW_F32, RTLIB::POW_F64, - RTLIB::POW_F80, RTLIB::POW_F128, - RTLIB::POW_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), + RTLIB::POW_F32, RTLIB::POW_F64, + RTLIB::POW_F80, RTLIB::POW_F128, + RTLIB::POW_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FPOWI(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::POWI_F32, RTLIB::POWI_F64, - RTLIB::POWI_F80, RTLIB::POWI_F128, - RTLIB::POWI_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), + RTLIB::POWI_F32, RTLIB::POWI_F64, + RTLIB::POWI_F80, RTLIB::POWI_F128, + RTLIB::POWI_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FREM(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::REM_F32, RTLIB::REM_F64, - RTLIB::REM_F80, RTLIB::REM_F128, - RTLIB::REM_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), + RTLIB::REM_F32, RTLIB::REM_F64, + RTLIB::REM_F80, RTLIB::REM_F128, + RTLIB::REM_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FRINT(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::RINT_F32, RTLIB::RINT_F64, - RTLIB::RINT_F80, RTLIB::RINT_F128, - RTLIB::RINT_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::RINT_F32, RTLIB::RINT_F64, + RTLIB::RINT_F80, RTLIB::RINT_F128, + RTLIB::RINT_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FROUND(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::ROUND_F32, - RTLIB::ROUND_F64, - RTLIB::ROUND_F80, - RTLIB::ROUND_F128, - RTLIB::ROUND_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::ROUND_F32, + RTLIB::ROUND_F64, + RTLIB::ROUND_F80, + RTLIB::ROUND_F128, + RTLIB::ROUND_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FSIN(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::SIN_F32, RTLIB::SIN_F64, - RTLIB::SIN_F80, RTLIB::SIN_F128, - RTLIB::SIN_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::SIN_F32, RTLIB::SIN_F64, + RTLIB::SIN_F80, RTLIB::SIN_F128, + RTLIB::SIN_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FSQRT(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::SQRT_F32, RTLIB::SQRT_F64, - RTLIB::SQRT_F80, RTLIB::SQRT_F128, - RTLIB::SQRT_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::SQRT_F32, RTLIB::SQRT_F64, + RTLIB::SQRT_F80, RTLIB::SQRT_F128, + RTLIB::SQRT_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FSUB(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; - TargetLowering::MakeLibCallOptions CallOptions; - SDValue Call = TLI.makeLibCall(DAG, GetFPLibCall(N->getValueType(0), - RTLIB::SUB_F32, - RTLIB::SUB_F64, - RTLIB::SUB_F80, - RTLIB::SUB_F128, - RTLIB::SUB_PPCF128), - N->getValueType(0), Ops, CallOptions, - SDLoc(N)).first; - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Binary(N, GetFPLibCall(N->getValueType(0), + RTLIB::SUB_F32, + RTLIB::SUB_F64, + RTLIB::SUB_F80, + RTLIB::SUB_F128, + RTLIB::SUB_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_FTRUNC(SDNode *N, SDValue &Lo, SDValue &Hi) { - SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0), - RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, - RTLIB::TRUNC_F80, RTLIB::TRUNC_F128, - RTLIB::TRUNC_PPCF128), - N, false); - GetPairElements(Call, Lo, Hi); + ExpandFloatRes_Unary(N, GetFPLibCall(N->getValueType(0), + RTLIB::TRUNC_F32, RTLIB::TRUNC_F64, + RTLIB::TRUNC_F80, RTLIB::TRUNC_F128, + RTLIB::TRUNC_PPCF128), Lo, Hi); } void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo, @@ -1615,8 +1638,11 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) { case ISD::BR_CC: Res = ExpandFloatOp_BR_CC(N); break; case ISD::FCOPYSIGN: Res = ExpandFloatOp_FCOPYSIGN(N); break; + case ISD::STRICT_FP_ROUND: case ISD::FP_ROUND: Res = ExpandFloatOp_FP_ROUND(N); break; + case ISD::STRICT_FP_TO_SINT: case ISD::FP_TO_SINT: Res = ExpandFloatOp_FP_TO_SINT(N); break; + case ISD::STRICT_FP_TO_UINT: case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_UINT(N); break; case ISD::LROUND: Res = ExpandFloatOp_LROUND(N); break; case ISD::LLROUND: Res = ExpandFloatOp_LLROUND(N); break; @@ -1705,34 +1731,72 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_FCOPYSIGN(SDNode *N) { } SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) { - assert(N->getOperand(0).getValueType() == MVT::ppcf128 && + bool IsStrict = N->isStrictFPOpcode(); + assert(N->getOperand(IsStrict ? 1 : 0).getValueType() == MVT::ppcf128 && "Logic only correct for ppcf128!"); SDValue Lo, Hi; - GetExpandedFloat(N->getOperand(0), Lo, Hi); - // Round it the rest of the way (e.g. to f32) if needed. - return DAG.getNode(ISD::FP_ROUND, SDLoc(N), - N->getValueType(0), Hi, N->getOperand(1)); + GetExpandedFloat(N->getOperand(IsStrict ? 1 : 0), Lo, Hi); + + if (!IsStrict) + // Round it the rest of the way (e.g. to f32) if needed. + return DAG.getNode(ISD::FP_ROUND, SDLoc(N), + N->getValueType(0), Hi, N->getOperand(1)); + + // Eliminate the node if the input float type is the same as the output float + // type. + if (Hi.getValueType() == N->getValueType(0)) { + // Connect the output chain to the input chain, unlinking the node. + ReplaceValueWith(SDValue(N, 1), N->getOperand(0)); + ReplaceValueWith(SDValue(N, 0), Hi); + return SDValue(); + } + + SDValue Expansion = DAG.getNode(ISD::STRICT_FP_ROUND, SDLoc(N), + {N->getValueType(0), MVT::Other}, + {N->getOperand(0), Hi, N->getOperand(2)}); + ReplaceValueWith(SDValue(N, 1), Expansion.getValue(1)); + ReplaceValueWith(SDValue(N, 0), Expansion); + return SDValue(); } SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_SINT(SDNode *N) { EVT RVT = N->getValueType(0); SDLoc dl(N); - RTLIB::Libcall LC = RTLIB::getFPTOSINT(N->getOperand(0).getValueType(), RVT); + bool IsStrict = N->isStrictFPOpcode(); + SDValue Op = N->getOperand(IsStrict ? 1 : 0); + SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); + RTLIB::Libcall LC = RTLIB::getFPTOSINT(Op.getValueType(), RVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_SINT!"); TargetLowering::MakeLibCallOptions CallOptions; - return TLI.makeLibCall(DAG, LC, RVT, N->getOperand(0), CallOptions, dl).first; + std::pair Tmp = TLI.makeLibCall(DAG, LC, RVT, Op, + CallOptions, dl, Chain); + if (!IsStrict) + return Tmp.first; + + ReplaceValueWith(SDValue(N, 1), Tmp.second); + ReplaceValueWith(SDValue(N, 0), Tmp.first); + return SDValue(); } SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_UINT(SDNode *N) { EVT RVT = N->getValueType(0); SDLoc dl(N); - RTLIB::Libcall LC = RTLIB::getFPTOUINT(N->getOperand(0).getValueType(), RVT); + bool IsStrict = N->isStrictFPOpcode(); + SDValue Op = N->getOperand(IsStrict ? 1 : 0); + SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); + RTLIB::Libcall LC = RTLIB::getFPTOUINT(Op.getValueType(), RVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_UINT!"); TargetLowering::MakeLibCallOptions CallOptions; - return TLI.makeLibCall(DAG, LC, N->getValueType(0), N->getOperand(0), - CallOptions, dl).first; + std::pair Tmp = TLI.makeLibCall(DAG, LC, RVT, Op, + CallOptions, dl, Chain); + if (!IsStrict) + return Tmp.first; + + ReplaceValueWith(SDValue(N, 1), Tmp.second); + ReplaceValueWith(SDValue(N, 0), Tmp.first); + return SDValue(); } SDValue DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) { diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index 9ddcbc9065251..7a97d980f9e4f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -974,32 +974,6 @@ SDValue DAGTypeLegalizer::JoinIntegers(SDValue Lo, SDValue Hi) { return DAG.getNode(ISD::OR, dlHi, NVT, Lo, Hi); } -/// Convert the node into a libcall with the same prototype. -SDValue DAGTypeLegalizer::LibCallify(RTLIB::Libcall LC, SDNode *N, - bool isSigned) { - TargetLowering::MakeLibCallOptions CallOptions; - CallOptions.setSExt(isSigned); - unsigned NumOps = N->getNumOperands(); - SDLoc dl(N); - if (NumOps == 0) { - return TLI.makeLibCall(DAG, LC, N->getValueType(0), None, CallOptions, - dl).first; - } else if (NumOps == 1) { - SDValue Op = N->getOperand(0); - return TLI.makeLibCall(DAG, LC, N->getValueType(0), Op, CallOptions, - dl).first; - } else if (NumOps == 2) { - SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; - return TLI.makeLibCall(DAG, LC, N->getValueType(0), Ops, CallOptions, - dl).first; - } - SmallVector Ops(NumOps); - for (unsigned i = 0; i < NumOps; ++i) - Ops[i] = N->getOperand(i); - - return TLI.makeLibCall(DAG, LC, N->getValueType(0), Ops, CallOptions, dl).first; -} - /// Promote the given target boolean to a target boolean of the given type. /// A target boolean is an integer value, not necessarily of type i1, the bits /// of which conform to getBooleanContents. diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h index 2fccf7b9cab6a..42597fcd12ecb 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -215,7 +215,6 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer { SDValue DisintegrateMERGE_VALUES(SDNode *N, unsigned ResNo); SDValue JoinIntegers(SDValue Lo, SDValue Hi); - SDValue LibCallify(RTLIB::Libcall LC, SDNode *N, bool isSigned); std::pair ExpandAtomic(SDNode *Node); @@ -560,6 +559,10 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer { // Float Result Expansion. void ExpandFloatResult(SDNode *N, unsigned ResNo); void ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_Unary(SDNode *N, RTLIB::Libcall LC, + SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_Binary(SDNode *N, RTLIB::Libcall LC, + SDValue &Lo, SDValue &Hi); void ExpandFloatRes_FABS (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandFloatRes_FMINNUM (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandFloatRes_FMAXNUM (SDNode *N, SDValue &Lo, SDValue &Hi); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 5dbdde5d8ea9b..c1c599c5a5d83 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -7322,8 +7322,40 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, if (VTList.NumVTs == 1) return getNode(Opcode, DL, VTList.VTs[0], Ops); -#if 0 switch (Opcode) { + case ISD::STRICT_FP_EXTEND: + assert(VTList.NumVTs == 2 && Ops.size() == 2 && + "Invalid STRICT_FP_EXTEND!"); + assert(VTList.VTs[0].isFloatingPoint() && + Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); + assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && + "STRICT_FP_EXTEND result type should be vector iff the operand " + "type is vector!"); + assert((!VTList.VTs[0].isVector() || + VTList.VTs[0].getVectorNumElements() == + Ops[1].getValueType().getVectorNumElements()) && + "Vector element count mismatch!"); + assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && + "Invalid fpext node, dst <= src!"); + break; + case ISD::STRICT_FP_ROUND: + assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); + assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && + "STRICT_FP_ROUND result type should be vector iff the operand " + "type is vector!"); + assert((!VTList.VTs[0].isVector() || + VTList.VTs[0].getVectorNumElements() == + Ops[1].getValueType().getVectorNumElements()) && + "Vector element count mismatch!"); + assert(VTList.VTs[0].isFloatingPoint() && + Ops[1].getValueType().isFloatingPoint() && + VTList.VTs[0].bitsLT(Ops[1].getValueType()) && + isa(Ops[2]) && + (cast(Ops[2])->getZExtValue() == 0 || + cast(Ops[2])->getZExtValue() == 1) && + "Invalid STRICT_FP_ROUND!"); + break; +#if 0 // FIXME: figure out how to safely handle things like // int foo(int x) { return 1 << (x & 255); } // int bar() { return foo(256); } @@ -7342,8 +7374,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); } break; - } #endif + } // Memoize the node unless it returns a flag. SDNode *N; diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll new file mode 100644 index 0000000000000..179ddc1980a94 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll @@ -0,0 +1,1569 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O3 -mtriple=powerpc64le-linux-gnu < %s | FileCheck --check-prefix=PC64LE %s +; RUN: llc -O3 -mtriple=powerpc64le-linux-gnu -mcpu=pwr9 < %s | FileCheck --check-prefix=PC64LE9 %s +; RUN: llc -O3 -mtriple=powerpc64-linux-gnu < %s | FileCheck --check-prefix=PC64 %s + +define ppc_fp128 @test_fadd_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind { +; PC64LE-LABEL: test_fadd_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __gcc_qadd +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fadd_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __gcc_qadd +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fadd_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __gcc_qadd +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %add = call ppc_fp128 @llvm.experimental.constrained.fadd.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %add +} + +define ppc_fp128 @test_fsub_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind { +; PC64LE-LABEL: test_fsub_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __gcc_qsub +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fsub_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __gcc_qsub +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fsub_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __gcc_qsub +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %sub = call ppc_fp128 @llvm.experimental.constrained.fsub.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %sub +} + +define ppc_fp128 @test_fmul_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind { +; PC64LE-LABEL: test_fmul_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __gcc_qmul +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fmul_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __gcc_qmul +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fmul_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __gcc_qmul +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %mul = call ppc_fp128 @llvm.experimental.constrained.fmul.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %mul +} + +define ppc_fp128 @test_fdiv_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind { +; PC64LE-LABEL: test_fdiv_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __gcc_qdiv +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fdiv_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __gcc_qdiv +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fdiv_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __gcc_qdiv +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %div = call ppc_fp128 @llvm.experimental.constrained.fdiv.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %div +} + +define ppc_fp128 @test_frem_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind { +; PC64LE-LABEL: test_frem_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl fmodl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_frem_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl fmodl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_frem_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl fmodl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %rem = call ppc_fp128 @llvm.experimental.constrained.frem.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %rem +} + +define ppc_fp128 @test_fma_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second, ppc_fp128 %third) nounwind { +; PC64LE-LABEL: test_fma_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl fmal +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fma_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl fmal +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fma_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl fmal +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %add = call ppc_fp128 @llvm.experimental.constrained.fma.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + ppc_fp128 %third, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %add +} + +define ppc_fp128 @test_sqrt_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_sqrt_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl sqrtl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_sqrt_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl sqrtl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_sqrt_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl sqrtl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %sqrt = call ppc_fp128 @llvm.experimental.constrained.sqrt.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %sqrt +} + +define ppc_fp128 @test_pow_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind { +; PC64LE-LABEL: test_pow_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl powl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_pow_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl powl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_pow_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl powl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %pow = call ppc_fp128 @llvm.experimental.constrained.pow.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %pow +} + +define ppc_fp128 @test_powi_ppc_fp128(ppc_fp128 %first, i32 %second) nounwind { +; PC64LE-LABEL: test_powi_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: clrldi 5, 5, 32 +; PC64LE-NEXT: bl __powitf2 +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_powi_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: clrldi 5, 5, 32 +; PC64LE9-NEXT: bl __powitf2 +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_powi_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: clrldi 5, 5, 32 +; PC64-NEXT: bl __powitf2 +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +; PC64LE9 : clrldi 5, 5, 32 +entry: + %powi = call ppc_fp128 @llvm.experimental.constrained.powi.ppcf128( + ppc_fp128 %first, + i32 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %powi +} + +define ppc_fp128 @test_sin_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_sin_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl sinl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_sin_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl sinl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_sin_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl sinl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %sin = call ppc_fp128 @llvm.experimental.constrained.sin.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %sin +} + +define ppc_fp128 @test_cos_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_cos_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl cosl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_cos_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl cosl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_cos_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl cosl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %cos = call ppc_fp128 @llvm.experimental.constrained.cos.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %cos +} + +define ppc_fp128 @test_exp_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_exp_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl expl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_exp_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl expl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_exp_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl expl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %exp = call ppc_fp128 @llvm.experimental.constrained.exp.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %exp +} + +define ppc_fp128 @test_exp2_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_exp2_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl exp2l +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_exp2_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl exp2l +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_exp2_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl exp2l +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %exp2 = call ppc_fp128 @llvm.experimental.constrained.exp2.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %exp2 +} + +define ppc_fp128 @test_log_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_log_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl logl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_log_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl logl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_log_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl logl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %log = call ppc_fp128 @llvm.experimental.constrained.log.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %log +} + +define ppc_fp128 @test_log2_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_log2_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl log2l +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_log2_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl log2l +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_log2_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl log2l +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %log2 = call ppc_fp128 @llvm.experimental.constrained.log2.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %log2 +} + +define ppc_fp128 @test_log10_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_log10_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl log10l +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_log10_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl log10l +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_log10_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl log10l +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %log10 = call ppc_fp128 @llvm.experimental.constrained.log10.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %log10 +} + +define ppc_fp128 @test_rint_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_rint_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl rintl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_rint_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl rintl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_rint_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl rintl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %rint = call ppc_fp128 @llvm.experimental.constrained.rint.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %rint +} + +define ppc_fp128 @test_nearbyint_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_nearbyint_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl nearbyintl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_nearbyint_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl nearbyintl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_nearbyint_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl nearbyintl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %nearbyint = call ppc_fp128 @llvm.experimental.constrained.nearbyint.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %nearbyint +} + +define ppc_fp128 @test_maxnum_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind { +; PC64LE-LABEL: test_maxnum_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl fmaxl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_maxnum_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl fmaxl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_maxnum_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl fmaxl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %maxnum = call ppc_fp128 @llvm.experimental.constrained.maxnum.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %maxnum +} + +define ppc_fp128 @test_minnum_ppc_fp128(ppc_fp128 %first, ppc_fp128 %second) nounwind { +; PC64LE-LABEL: test_minnum_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl fminl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_minnum_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl fminl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_minnum_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl fminl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %minnum = call ppc_fp128 @llvm.experimental.constrained.minnum.ppcf128( + ppc_fp128 %first, + ppc_fp128 %second, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %minnum +} + +define ppc_fp128 @test_ceil_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_ceil_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl ceill +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_ceil_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl ceill +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_ceil_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl ceill +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %ceil = call ppc_fp128 @llvm.experimental.constrained.ceil.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %ceil +} + +define ppc_fp128 @test_floor_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_floor_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl floorl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_floor_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl floorl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_floor_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl floorl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %floor = call ppc_fp128 @llvm.experimental.constrained.floor.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %floor +} + +define ppc_fp128 @test_round_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_round_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl roundl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_round_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl roundl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_round_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl roundl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %round = call ppc_fp128 @llvm.experimental.constrained.round.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %round +} + +define ppc_fp128 @test_trunc_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_trunc_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl truncl +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_trunc_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl truncl +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_trunc_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl truncl +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %trunc = call ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret ppc_fp128 %trunc +} + +define float @test_fptrunc_ppc_fp128_f32(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_fptrunc_ppc_fp128_f32: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: frsp 1, 1 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fptrunc_ppc_fp128_f32: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: frsp 1, 1 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fptrunc_ppc_fp128_f32: +; PC64: # %bb.0: # %entry +; PC64-NEXT: frsp 1, 1 +; PC64-NEXT: blr +entry: + %fptrunc = call float @llvm.experimental.constrained.fptrunc.ppcf128.f32( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret float %fptrunc +} + +define double @test_fptrunc_ppc_fp128_f64(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_fptrunc_ppc_fp128_f64: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fptrunc_ppc_fp128_f64: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fptrunc_ppc_fp128_f64: +; PC64: # %bb.0: # %entry +; PC64-NEXT: blr +entry: + %fptrunc = call double @llvm.experimental.constrained.fptrunc.ppcf128.f64( + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret double %fptrunc +} + +define ppc_fp128 @test_fpext_ppc_fp128_f32(float %first) nounwind { +; PC64LE-LABEL: test_fpext_ppc_fp128_f32: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: xxlxor 2, 2, 2 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fpext_ppc_fp128_f32: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: xxlxor 2, 2, 2 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fpext_ppc_fp128_f32: +; PC64: # %bb.0: # %entry +; PC64-NEXT: addis 3, 2, .LCPI26_0@toc@ha +; PC64-NEXT: lfs 2, .LCPI26_0@toc@l(3) +; PC64-NEXT: blr +entry: + %fpext = call ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128( + float %first, + metadata !"fpexcept.strict") + ret ppc_fp128 %fpext +} + +define ppc_fp128 @test_fpext_ppc_fp128_f64(double %first) nounwind { +; PC64LE-LABEL: test_fpext_ppc_fp128_f64: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: xxlxor 2, 2, 2 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fpext_ppc_fp128_f64: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: xxlxor 2, 2, 2 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fpext_ppc_fp128_f64: +; PC64: # %bb.0: # %entry +; PC64-NEXT: addis 3, 2, .LCPI27_0@toc@ha +; PC64-NEXT: lfs 2, .LCPI27_0@toc@l(3) +; PC64-NEXT: blr +entry: + %fpext = call ppc_fp128 @llvm.experimental.constrained.fpext.f64.ppcf128( + double %first, + metadata !"fpexcept.strict") + ret ppc_fp128 %fpext +} + +define i64 @test_fptosi_ppc_i64_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_fptosi_ppc_i64_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __fixtfdi +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fptosi_ppc_i64_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __fixtfdi +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fptosi_ppc_i64_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __fixtfdi +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %fpext = call i64 @llvm.experimental.constrained.fptosi.i64.ppcf128( + ppc_fp128 %first, + metadata !"fpexcept.strict") + ret i64 %fpext +} + +define i32 @test_fptosi_ppc_i32_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_fptosi_ppc_i32_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __gcc_qtou +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fptosi_ppc_i32_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __gcc_qtou +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fptosi_ppc_i32_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __gcc_qtou +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %fpext = call i32 @llvm.experimental.constrained.fptosi.i32.ppcf128( + ppc_fp128 %first, + metadata !"fpexcept.strict") + ret i32 %fpext +} + +define i64 @test_fptoui_ppc_i64_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_fptoui_ppc_i64_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __fixunstfdi +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fptoui_ppc_i64_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __fixunstfdi +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fptoui_ppc_i64_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __fixunstfdi +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %fpext = call i64 @llvm.experimental.constrained.fptoui.i64.ppcf128( + ppc_fp128 %first, + metadata !"fpexcept.strict") + ret i64 %fpext +} + +define i32 @test_fptoui_ppc_i32_ppc_fp128(ppc_fp128 %first) nounwind { +; PC64LE-LABEL: test_fptoui_ppc_i32_ppc_fp128: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __fixunstfsi +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_fptoui_ppc_i32_ppc_fp128: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __fixunstfsi +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_fptoui_ppc_i32_ppc_fp128: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __fixunstfsi +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %fpext = call i32 @llvm.experimental.constrained.fptoui.i32.ppcf128( + ppc_fp128 %first, + metadata !"fpexcept.strict") + ret i32 %fpext +} + +; Test that resultant libcalls retain order even when their non-strict FLOP form could be +; trivially optimized into differing sequences. +define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %result) nounwind { +; PC64LE-LABEL: test_constrained_libcall_multichain: +; PC64LE: # %bb.0: +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 29, -48(1) # 8-byte Folded Spill +; PC64LE-NEXT: std 30, -40(1) # 8-byte Folded Spill +; PC64LE-NEXT: stfd 29, -24(1) # 8-byte Folded Spill +; PC64LE-NEXT: stfd 30, -16(1) # 8-byte Folded Spill +; PC64LE-NEXT: stfd 31, -8(1) # 8-byte Folded Spill +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -80(1) +; PC64LE-NEXT: mr 29, 3 +; PC64LE-NEXT: xxlxor 2, 2, 2 +; PC64LE-NEXT: li 3, 0 +; PC64LE-NEXT: mr 30, 4 +; PC64LE-NEXT: lfsx 31, 0, 29 +; PC64LE-NEXT: xxlxor 4, 4, 4 +; PC64LE-NEXT: std 3, 8(4) +; PC64LE-NEXT: fmr 1, 31 +; PC64LE-NEXT: fmr 3, 31 +; PC64LE-NEXT: stfdx 31, 0, 4 +; PC64LE-NEXT: bl __gcc_qadd +; PC64LE-NEXT: nop +; PC64LE-NEXT: fmr 3, 1 +; PC64LE-NEXT: fmr 4, 2 +; PC64LE-NEXT: fmr 30, 1 +; PC64LE-NEXT: fmr 29, 2 +; PC64LE-NEXT: stfd 1, 16(30) +; PC64LE-NEXT: stfd 2, 24(30) +; PC64LE-NEXT: bl __gcc_qmul +; PC64LE-NEXT: nop +; PC64LE-NEXT: fmr 1, 31 +; PC64LE-NEXT: xxlxor 2, 2, 2 +; PC64LE-NEXT: li 5, 2 +; PC64LE-NEXT: stfd 30, 32(30) +; PC64LE-NEXT: stfd 29, 40(30) +; PC64LE-NEXT: bl __powitf2 +; PC64LE-NEXT: nop +; PC64LE-NEXT: frsp 0, 1 +; PC64LE-NEXT: stfsx 0, 0, 29 +; PC64LE-NEXT: stfd 2, -8(30) +; PC64LE-NEXT: stfd 1, -16(30) +; PC64LE-NEXT: addi 1, 1, 80 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: lfd 31, -8(1) # 8-byte Folded Reload +; PC64LE-NEXT: lfd 30, -16(1) # 8-byte Folded Reload +; PC64LE-NEXT: ld 30, -40(1) # 8-byte Folded Reload +; PC64LE-NEXT: ld 29, -48(1) # 8-byte Folded Reload +; PC64LE-NEXT: lfd 29, -24(1) # 8-byte Folded Reload +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: test_constrained_libcall_multichain: +; PC64LE9: # %bb.0: +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 29, -48(1) # 8-byte Folded Spill +; PC64LE9-NEXT: std 30, -40(1) # 8-byte Folded Spill +; PC64LE9-NEXT: stfd 29, -24(1) # 8-byte Folded Spill +; PC64LE9-NEXT: stfd 30, -16(1) # 8-byte Folded Spill +; PC64LE9-NEXT: stfd 31, -8(1) # 8-byte Folded Spill +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -80(1) +; PC64LE9-NEXT: lfs 31, 0(3) +; PC64LE9-NEXT: mr 29, 3 +; PC64LE9-NEXT: li 3, 0 +; PC64LE9-NEXT: xxlxor 2, 2, 2 +; PC64LE9-NEXT: xxlxor 4, 4, 4 +; PC64LE9-NEXT: std 3, 8(4) +; PC64LE9-NEXT: fmr 1, 31 +; PC64LE9-NEXT: fmr 3, 31 +; PC64LE9-NEXT: mr 30, 4 +; PC64LE9-NEXT: stfd 31, 0(4) +; PC64LE9-NEXT: bl __gcc_qadd +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: fmr 3, 1 +; PC64LE9-NEXT: fmr 4, 2 +; PC64LE9-NEXT: fmr 30, 2 +; PC64LE9-NEXT: fmr 29, 1 +; PC64LE9-NEXT: stfd 1, 16(30) +; PC64LE9-NEXT: stfd 2, 24(30) +; PC64LE9-NEXT: bl __gcc_qmul +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: fmr 1, 31 +; PC64LE9-NEXT: xxlxor 2, 2, 2 +; PC64LE9-NEXT: li 5, 2 +; PC64LE9-NEXT: stfd 29, 32(30) +; PC64LE9-NEXT: stfd 30, 40(30) +; PC64LE9-NEXT: bl __powitf2 +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: frsp 0, 1 +; PC64LE9-NEXT: stfs 0, 0(29) +; PC64LE9-NEXT: stfd 2, -8(30) +; PC64LE9-NEXT: stfd 1, -16(30) +; PC64LE9-NEXT: addi 1, 1, 80 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: lfd 31, -8(1) # 8-byte Folded Reload +; PC64LE9-NEXT: lfd 30, -16(1) # 8-byte Folded Reload +; PC64LE9-NEXT: lfd 29, -24(1) # 8-byte Folded Reload +; PC64LE9-NEXT: ld 30, -40(1) # 8-byte Folded Reload +; PC64LE9-NEXT: ld 29, -48(1) # 8-byte Folded Reload +; PC64LE9-NEXT: blr +; +; PC64-LABEL: test_constrained_libcall_multichain: +; PC64: # %bb.0: +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -176(1) +; PC64-NEXT: std 29, 120(1) # 8-byte Folded Spill +; PC64-NEXT: mr 29, 3 +; PC64-NEXT: li 3, 0 +; PC64-NEXT: stfd 31, 168(1) # 8-byte Folded Spill +; PC64-NEXT: stfd 30, 160(1) # 8-byte Folded Spill +; PC64-NEXT: std 30, 128(1) # 8-byte Folded Spill +; PC64-NEXT: stfd 28, 144(1) # 8-byte Folded Spill +; PC64-NEXT: stfd 29, 152(1) # 8-byte Folded Spill +; PC64-NEXT: mr 30, 4 +; PC64-NEXT: lfs 31, 0(29) +; PC64-NEXT: std 3, 8(4) +; PC64-NEXT: addis 3, 2, .LCPI32_0@toc@ha +; PC64-NEXT: lfs 30, .LCPI32_0@toc@l(3) +; PC64-NEXT: fmr 1, 31 +; PC64-NEXT: fmr 3, 31 +; PC64-NEXT: fmr 2, 30 +; PC64-NEXT: fmr 4, 30 +; PC64-NEXT: stfd 31, 0(4) +; PC64-NEXT: bl __gcc_qadd +; PC64-NEXT: nop +; PC64-NEXT: fmr 3, 1 +; PC64-NEXT: fmr 4, 2 +; PC64-NEXT: fmr 29, 1 +; PC64-NEXT: fmr 28, 2 +; PC64-NEXT: stfd 1, 16(30) +; PC64-NEXT: stfd 2, 24(30) +; PC64-NEXT: bl __gcc_qmul +; PC64-NEXT: nop +; PC64-NEXT: fmr 1, 31 +; PC64-NEXT: fmr 2, 30 +; PC64-NEXT: li 5, 2 +; PC64-NEXT: stfd 29, 32(30) +; PC64-NEXT: stfd 28, 40(30) +; PC64-NEXT: bl __powitf2 +; PC64-NEXT: nop +; PC64-NEXT: frsp 0, 1 +; PC64-NEXT: stfs 0, 0(29) +; PC64-NEXT: lfd 31, 168(1) # 8-byte Folded Reload +; PC64-NEXT: lfd 30, 160(1) # 8-byte Folded Reload +; PC64-NEXT: lfd 29, 152(1) # 8-byte Folded Reload +; PC64-NEXT: lfd 28, 144(1) # 8-byte Folded Reload +; PC64-NEXT: ld 29, 120(1) # 8-byte Folded Reload +; PC64-NEXT: stfd 2, -8(30) +; PC64-NEXT: stfd 1, -16(30) +; PC64-NEXT: ld 30, 128(1) # 8-byte Folded Reload +; PC64-NEXT: addi 1, 1, 176 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr + %load = load float, float* %firstptr + %first = call ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128( + float %load, + metadata !"fpexcept.strict") + store ppc_fp128 %first, ppc_fp128* %result + + ; For unconstrained FLOPs, these next two FP instructions would necessarily + ; be executed in series with one another. + %fadd = call ppc_fp128 @llvm.experimental.constrained.fadd.ppcf128( + ppc_fp128 %first, + ppc_fp128 %first, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + %stridx1 = getelementptr ppc_fp128, ppc_fp128* %result, i32 1 + store ppc_fp128 %fadd, ppc_fp128* %stridx1 + %fmul = call ppc_fp128 @llvm.experimental.constrained.fmul.ppcf128( + ppc_fp128 %fadd, + ppc_fp128 %fadd, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + %stridx2 = getelementptr ppc_fp128, ppc_fp128* %stridx1, i32 1 + store ppc_fp128 %fadd, ppc_fp128* %stridx2 + + ; For unconstrained FLOPs, these next two FP instructions could be reordered + ; or even executed in parallel with respect to the previous two instructions. + ; However, strict floating point rules would not allow this. + %powi = call ppc_fp128 @llvm.experimental.constrained.powi.ppcf128( + ppc_fp128 %first, + i32 2, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + %tinypow = call float @llvm.experimental.constrained.fptrunc.ppcf128.f32( + ppc_fp128 %powi, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + store float %tinypow, float* %firstptr + %stridxn1 = getelementptr ppc_fp128, ppc_fp128* %result, i32 -1 + store ppc_fp128 %powi, ppc_fp128* %stridxn1 + ret void +} + +declare ppc_fp128 @llvm.experimental.constrained.fadd.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.ceil.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.cos.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.fdiv.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.exp.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.exp2.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.floor.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.fma.ppcf128(ppc_fp128, ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.fpext.f32.ppcf128(float, metadata) +declare ppc_fp128 @llvm.experimental.constrained.fpext.f64.ppcf128(double, metadata) +declare float @llvm.experimental.constrained.fptrunc.ppcf128.f32(ppc_fp128, metadata, metadata) +declare double @llvm.experimental.constrained.fptrunc.ppcf128.f64(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.log.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.log10.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.log2.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.maxnum.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.minnum.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.fmul.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.nearbyint.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.pow.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.powi.ppcf128(ppc_fp128, i32, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.frem.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.rint.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.round.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.sin.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.sqrt.ppcf128(ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.fsub.ppcf128(ppc_fp128, ppc_fp128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.trunc.ppcf128(ppc_fp128, metadata, metadata) +declare i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(ppc_fp128, metadata) +declare i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128, metadata) +declare i64 @llvm.experimental.constrained.fptoui.i64.ppcf128(ppc_fp128, metadata) +declare i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128, metadata) From ad871e42958ed94f0a27b0ba173cff4e00b5ee61 Mon Sep 17 00:00:00 2001 From: Alex Lorenz Date: Tue, 3 Dec 2019 14:34:02 -0800 Subject: [PATCH 08/12] [compiler-rt] Disable fuzzer large.test when LLVM_ENABLE_EXPENSIVE_CHECKS=ON This test is timing out on Green Dragon http://green.lab.llvm.org/green/job/clang-stage1-cmake-RA-expensive/ and looks like it's not executed on other bots with expensive checks enabled http://lab.llvm.org:8011/builders/llvm-clang-x86_64-expensive-checks-ubuntu http://lab.llvm.org:8011/builders/llvm-clang-x86_64-expensive-checks-win The test times out at the C++ source file takes too long to build (2+ hours on my machine), as clang spends a lot of time in IR/MIR verifiers. Differential Revision: https://reviews.llvm.org/D70024 --- compiler-rt/test/CMakeLists.txt | 1 + compiler-rt/test/fuzzer/large.test | 2 ++ compiler-rt/test/lit.common.cfg.py | 3 +++ compiler-rt/test/lit.common.configured.in | 1 + llvm/cmake/modules/LLVMConfig.cmake.in | 2 ++ 5 files changed, 9 insertions(+) diff --git a/compiler-rt/test/CMakeLists.txt b/compiler-rt/test/CMakeLists.txt index 02ce6aabd6995..f0330bcfe3041 100644 --- a/compiler-rt/test/CMakeLists.txt +++ b/compiler-rt/test/CMakeLists.txt @@ -1,6 +1,7 @@ # Needed for lit support in standalone builds. include(AddLLVM) +pythonize_bool(LLVM_ENABLE_EXPENSIVE_CHECKS) configure_compiler_rt_lit_site_cfg( ${CMAKE_CURRENT_SOURCE_DIR}/lit.common.configured.in ${CMAKE_CURRENT_BINARY_DIR}/lit.common.configured) diff --git a/compiler-rt/test/fuzzer/large.test b/compiler-rt/test/fuzzer/large.test index b03b60fdb6503..9aa7c46dc42c8 100644 --- a/compiler-rt/test/fuzzer/large.test +++ b/compiler-rt/test/fuzzer/large.test @@ -1,3 +1,5 @@ +UNSUPPORTED: expensive_checks + RUN: %cpp_compiler %S/LargeTest.cpp -o %t-LargeTest RUN: %run %t-LargeTest -runs=10000 diff --git a/compiler-rt/test/lit.common.cfg.py b/compiler-rt/test/lit.common.cfg.py index 2a8d93166f695..00f0a1e93abe1 100644 --- a/compiler-rt/test/lit.common.cfg.py +++ b/compiler-rt/test/lit.common.cfg.py @@ -477,6 +477,9 @@ def is_windows_lto_supported(): else: config.available_features.add("shadow-scale-3") +if config.expensive_checks: + config.available_features.add("expensive_checks") + # Propagate the LLD/LTO into the clang config option, so nothing else is needed. run_wrapper = [] target_cflags = [getattr(config, 'target_cflags', None)] diff --git a/compiler-rt/test/lit.common.configured.in b/compiler-rt/test/lit.common.configured.in index 5ca95efd530ce..b4862f74cdd02 100644 --- a/compiler-rt/test/lit.common.configured.in +++ b/compiler-rt/test/lit.common.configured.in @@ -42,6 +42,7 @@ set_default("android_serial", "@ANDROID_SERIAL_FOR_TESTING@") set_default("android_files_to_push", []) set_default("have_rpc_xdr_h", @HAVE_RPC_XDR_H@) set_default("gwp_asan", @COMPILER_RT_HAS_GWP_ASAN_PYBOOL@) +set_default("expensive_checks", @LLVM_ENABLE_EXPENSIVE_CHECKS_PYBOOL@) config.available_features.add('target-is-%s' % config.target_arch) if config.enable_per_target_runtime_dir: diff --git a/llvm/cmake/modules/LLVMConfig.cmake.in b/llvm/cmake/modules/LLVMConfig.cmake.in index 7fdca536c1fdb..082393212b674 100644 --- a/llvm/cmake/modules/LLVMConfig.cmake.in +++ b/llvm/cmake/modules/LLVMConfig.cmake.in @@ -35,6 +35,8 @@ set(TARGET_TRIPLE "@TARGET_TRIPLE@") set(LLVM_ABI_BREAKING_CHECKS @LLVM_ABI_BREAKING_CHECKS@) +set(LLVM_ENABLE_EXPENSIVE_CHECKS @LLVM_ENABLE_EXPENSIVE_CHECKS@) + set(LLVM_ENABLE_ASSERTIONS @LLVM_ENABLE_ASSERTIONS@) set(LLVM_ENABLE_EH @LLVM_ENABLE_EH@) From 705a6aef350246c790ff8e73864dd27a640c59c8 Mon Sep 17 00:00:00 2001 From: Reid Kleckner Date: Mon, 2 Dec 2019 15:22:44 -0800 Subject: [PATCH 09/12] [MS] Emit exported complete/vbase destructors Summary: Fixes PR44205 I checked, and deleting destructors are not affected. Reviewers: hans Subscribers: cfe-commits Tags: #clang Differential Revision: https://reviews.llvm.org/D70931 --- clang/lib/CodeGen/MicrosoftCXXABI.cpp | 7 +++++++ clang/test/CodeGenCXX/dllexport-dtor-thunks.cpp | 7 +++++++ clang/test/CodeGenCXX/dllimport-dtor-thunks.cpp | 6 +++--- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp index 8196df614cee8..800d02d5d0394 100644 --- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp +++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -1343,6 +1343,13 @@ void MicrosoftCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { // The TU defining a dtor is only guaranteed to emit a base destructor. All // other destructor variants are delegating thunks. CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); + + // If the class is dllexported, emit the complete (vbase) destructor wherever + // the base dtor is emitted. + // FIXME: To match MSVC, this should only be done when the class is exported + // with -fdllexport-inlines enabled. + if (D->getParent()->getNumVBases() > 0 && D->hasAttr()) + CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); } CharUnits diff --git a/clang/test/CodeGenCXX/dllexport-dtor-thunks.cpp b/clang/test/CodeGenCXX/dllexport-dtor-thunks.cpp index bda126eba855d..d2aa195e8cc3a 100644 --- a/clang/test/CodeGenCXX/dllexport-dtor-thunks.cpp +++ b/clang/test/CodeGenCXX/dllexport-dtor-thunks.cpp @@ -1,5 +1,12 @@ // RUN: %clang_cc1 -mconstructor-aliases -fms-extensions %s -emit-llvm -o - -triple x86_64-windows-msvc | FileCheck %s +namespace test1 { +struct A { ~A(); }; +struct __declspec(dllexport) B : virtual A { }; +// CHECK: define weak_odr dso_local dllexport void @"??1B@test1@@QEAA@XZ" +// CHECK: define weak_odr dso_local dllexport void @"??_DB@test1@@QEAAXXZ" +} + struct __declspec(dllexport) A { virtual ~A(); }; struct __declspec(dllexport) B { virtual ~B(); }; struct __declspec(dllexport) C : A, B { virtual ~C(); }; diff --git a/clang/test/CodeGenCXX/dllimport-dtor-thunks.cpp b/clang/test/CodeGenCXX/dllimport-dtor-thunks.cpp index da3227a49a4b5..53aa2cdbf3eef 100644 --- a/clang/test/CodeGenCXX/dllimport-dtor-thunks.cpp +++ b/clang/test/CodeGenCXX/dllimport-dtor-thunks.cpp @@ -19,9 +19,9 @@ struct __declspec(dllimport) ImportOverrideVDtor : public BaseClass { virtual ~ImportOverrideVDtor() {} }; -// Virtually inherits from a non-dllimport base class. This time we need to call -// the complete destructor and emit it inline. It's not exported from the DLL, -// and it must be emitted. +// Virtually inherits from a non-dllimport base class. In this case, we can +// expect the DLL to provide a definition of the complete dtor. See +// dllexport-dtor-thunks.cpp. struct __declspec(dllimport) ImportVBaseOverrideVDtor : public virtual BaseClass { virtual ~ImportVBaseOverrideVDtor() {} From 89618a7ce1c13dcb540d925626638c93cc85a553 Mon Sep 17 00:00:00 2001 From: Davide Italiano Date: Tue, 3 Dec 2019 14:54:59 -0800 Subject: [PATCH 10/12] [DataVisualization] Simplify. NFCI. --- lldb/source/DataFormatters/DataVisualization.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lldb/source/DataFormatters/DataVisualization.cpp b/lldb/source/DataFormatters/DataVisualization.cpp index 08b3b34447bba..e73d44f60f03f 100644 --- a/lldb/source/DataFormatters/DataVisualization.cpp +++ b/lldb/source/DataFormatters/DataVisualization.cpp @@ -122,8 +122,7 @@ void DataVisualization::Categories::Enable(ConstString category, TypeCategoryMap::Position pos) { if (GetFormatManager().GetCategory(category)->IsEnabled()) GetFormatManager().DisableCategory(category); - GetFormatManager().EnableCategory( - category, pos, std::initializer_list()); + GetFormatManager().EnableCategory(category, pos, {}); } void DataVisualization::Categories::Enable(lldb::LanguageType lang_type) { From 0cfb4a6b3d9556c8fc55766bce47cbb433ff19fe Mon Sep 17 00:00:00 2001 From: Davide Italiano Date: Tue, 3 Dec 2019 15:02:54 -0800 Subject: [PATCH 11/12] [FormatManager] Provide only one variant of EnableCategory. All the callers pass a single language anyway. --- lldb/include/lldb/DataFormatters/FormatManager.h | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/lldb/include/lldb/DataFormatters/FormatManager.h b/lldb/include/lldb/DataFormatters/FormatManager.h index afaafda47e761..66df8397dfee4 100644 --- a/lldb/include/lldb/DataFormatters/FormatManager.h +++ b/lldb/include/lldb/DataFormatters/FormatManager.h @@ -52,24 +52,15 @@ class FormatManager : public IFormatChangeListener { void EnableCategory(ConstString category_name, TypeCategoryMap::Position pos = TypeCategoryMap::Default) { - EnableCategory(category_name, pos, - std::initializer_list()); + EnableCategory(category_name, pos, {}); } void EnableCategory(ConstString category_name, TypeCategoryMap::Position pos, lldb::LanguageType lang) { - std::initializer_list langs = {lang}; - EnableCategory(category_name, pos, langs); - } - - void EnableCategory(ConstString category_name, - TypeCategoryMap::Position pos = TypeCategoryMap::Default, - std::initializer_list langs = {}) { TypeCategoryMap::ValueSP category_sp; if (m_categories_map.Get(category_name, category_sp) && category_sp) { m_categories_map.Enable(category_sp, pos); - for (const lldb::LanguageType lang : langs) - category_sp->AddLanguage(lang); + category_sp->AddLanguage(lang); } } From f139ae3d9379746164e8056c45817041417dfd4c Mon Sep 17 00:00:00 2001 From: Akira Hatanaka Date: Tue, 3 Dec 2019 15:17:01 -0800 Subject: [PATCH 12/12] [NFC] Pass a reference to CodeGenFunction to methods of LValue and AggValueSlot This reapplies 8a5b7c35709d9ce1f44a99f0c5b084bf2696ea17 after a null dereference bug in CGOpenMPRuntime::emitUserDefinedMapper. Original commit message: This is needed for the pointer authentication work we plan to do in the near future. https://github.com/apple/llvm-project/blob/a63a81bd9911f87a0b5dcd5bdd7ccdda7124af87/clang/docs/PointerAuthentication.rst --- clang/lib/CodeGen/CGAtomic.cpp | 26 ++-- clang/lib/CodeGen/CGBlocks.cpp | 2 +- clang/lib/CodeGen/CGBuiltin.cpp | 14 +- clang/lib/CodeGen/CGCall.cpp | 34 ++--- clang/lib/CodeGen/CGClass.cpp | 31 ++--- clang/lib/CodeGen/CGDecl.cpp | 36 ++--- clang/lib/CodeGen/CGDeclCXX.cpp | 9 +- clang/lib/CodeGen/CGExpr.cpp | 108 ++++++++------- clang/lib/CodeGen/CGExprAgg.cpp | 59 ++++---- clang/lib/CodeGen/CGExprCXX.cpp | 34 ++--- clang/lib/CodeGen/CGExprComplex.cpp | 8 +- clang/lib/CodeGen/CGExprScalar.cpp | 25 ++-- clang/lib/CodeGen/CGNonTrivialStruct.cpp | 19 +-- clang/lib/CodeGen/CGObjC.cpp | 42 +++--- clang/lib/CodeGen/CGOpenMPRuntime.cpp | 153 +++++++++++---------- clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp | 31 +++-- clang/lib/CodeGen/CGStmt.cpp | 18 +-- clang/lib/CodeGen/CGStmtOpenMP.cpp | 97 ++++++------- clang/lib/CodeGen/CGValue.h | 25 ++-- clang/lib/CodeGen/CodeGenFunction.cpp | 6 +- clang/lib/CodeGen/TargetInfo.cpp | 2 +- 21 files changed, 397 insertions(+), 382 deletions(-) diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index 039fe6da84201..d07aaf58681c2 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -139,7 +139,7 @@ namespace { const LValue &getAtomicLValue() const { return LVal; } llvm::Value *getAtomicPointer() const { if (LVal.isSimple()) - return LVal.getPointer(); + return LVal.getPointer(CGF); else if (LVal.isBitField()) return LVal.getBitFieldPointer(); else if (LVal.isVectorElt()) @@ -343,7 +343,7 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const { bool AtomicInfo::emitMemSetZeroIfNecessary() const { assert(LVal.isSimple()); - llvm::Value *addr = LVal.getPointer(); + llvm::Value *addr = LVal.getPointer(CGF); if (!requiresMemSetZero(addr->getType()->getPointerElementType())) return false; @@ -1628,7 +1628,7 @@ Address AtomicInfo::materializeRValue(RValue rvalue) const { LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType()); AtomicInfo Atomics(CGF, TempLV); Atomics.emitCopyIntoMemory(rvalue); - return TempLV.getAddress(); + return TempLV.getAddress(CGF); } llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const { @@ -1975,8 +1975,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!rvalue.isAggregate() || - rvalue.getAggregateAddress().getElementType() - == dest.getAddress().getElementType()); + rvalue.getAggregateAddress().getElementType() == + dest.getAddress(*this).getElementType()); AtomicInfo atomics(*this, dest); LValue LVal = atomics.getAtomicLValue(); @@ -2043,10 +2043,10 @@ std::pair CodeGenFunction::EmitAtomicCompareExchange( // maybe for address-space qualification. assert(!Expected.isAggregate() || Expected.getAggregateAddress().getElementType() == - Obj.getAddress().getElementType()); + Obj.getAddress(*this).getElementType()); assert(!Desired.isAggregate() || Desired.getAggregateAddress().getElementType() == - Obj.getAddress().getElementType()); + Obj.getAddress(*this).getElementType()); AtomicInfo Atomics(*this, Obj); return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure, @@ -2086,13 +2086,11 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { } // Evaluate the expression directly into the destination. - AggValueSlot slot = AggValueSlot::forLValue(dest, - AggValueSlot::IsNotDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap, - Zeroed ? AggValueSlot::IsZeroed : - AggValueSlot::IsNotZeroed); + AggValueSlot slot = AggValueSlot::forLValue( + dest, *this, AggValueSlot::IsNotDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap, + Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed); EmitAggExpr(init, slot); return; diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index f90d9439af257..6a1a73955319c 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -1076,7 +1076,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { /*RefersToEnclosingVariableOrCapture*/ CI.isNested(), type.getNonReferenceType(), VK_LValue, SourceLocation()); - src = EmitDeclRefLValue(&declRef).getAddress(); + src = EmitDeclRefLValue(&declRef).getAddress(*this); }; // For byrefs, we just write the pointer to the byref struct into diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 68706d78cd111..b5b0c3e61d479 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -3367,7 +3367,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Carry); } case Builtin::BI__builtin_addressof: - return RValue::get(EmitLValue(E->getArg(0)).getPointer()); + return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this)); case Builtin::BI__builtin_operator_new: return EmitBuiltinNewDeleteCall( E->getCallee()->getType()->castAs(), E, false); @@ -3750,8 +3750,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); LValue NDRangeL = EmitAggExprToLValue(E->getArg(2)); - llvm::Value *Range = NDRangeL.getAddress().getPointer(); - llvm::Type *RangeTy = NDRangeL.getAddress().getType(); + llvm::Value *Range = NDRangeL.getAddress(*this).getPointer(); + llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType(); if (NumArgs == 4) { // The most basic form of the call with parameters: @@ -3770,7 +3770,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); AttrBuilder B; - B.addByValAttr(NDRangeL.getAddress().getElementType()); + B.addByValAttr(NDRangeL.getAddress(*this).getElementType()); llvm::AttributeList ByValAttrSet = llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B); @@ -3955,7 +3955,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( getContext().getTargetAddressSpace(LangAS::opencl_generic)); LValue NDRangeL = EmitAggExprToLValue(E->getArg(0)); - llvm::Value *NDRange = NDRangeL.getAddress().getPointer(); + llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer(); auto Info = CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1)); Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); @@ -9470,14 +9470,14 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID, if (!getDebugInfo()) { CGM.Error(E->getExprLoc(), "using builtin_preserve_field_info() without -g"); return IsBitField ? EmitLValue(Arg).getBitFieldPointer() - : EmitLValue(Arg).getPointer(); + : EmitLValue(Arg).getPointer(*this); } // Enable underlying preserve_*_access_index() generation. bool OldIsInPreservedAIRegion = IsInPreservedAIRegion; IsInPreservedAIRegion = true; Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer() - : EmitLValue(Arg).getPointer(); + : EmitLValue(Arg).getPointer(*this); IsInPreservedAIRegion = OldIsInPreservedAIRegion; ConstantInt *C = cast(EmitScalarExpr(E->getArg(1))); diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index dc4c5ca2159f0..f992f904fe753 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1020,13 +1020,13 @@ void CodeGenFunction::ExpandTypeFromArgs( auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast(Exp.get())) { - forConstantArrayExpansion(*this, CAExp, LV.getAddress(), - [&](Address EltAddr) { - LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); - ExpandTypeFromArgs(CAExp->EltTy, LV, AI); - }); + forConstantArrayExpansion( + *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { + LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); + ExpandTypeFromArgs(CAExp->EltTy, LV, AI); + }); } else if (auto RExp = dyn_cast(Exp.get())) { - Address This = LV.getAddress(); + Address This = LV.getAddress(*this); for (const CXXBaseSpecifier *BS : RExp->Bases) { // Perform a single step derived-to-base conversion. Address Base = @@ -1057,7 +1057,7 @@ void CodeGenFunction::ExpandTypeToArgs( SmallVectorImpl &IRCallArgs, unsigned &IRCallArgPos) { auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast(Exp.get())) { - Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress() + Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) : Arg.getKnownRValue().getAggregateAddress(); forConstantArrayExpansion( *this, CAExp, Addr, [&](Address EltAddr) { @@ -1068,7 +1068,7 @@ void CodeGenFunction::ExpandTypeToArgs( IRCallArgPos); }); } else if (auto RExp = dyn_cast(Exp.get())) { - Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress() + Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) : Arg.getKnownRValue().getAggregateAddress(); for (const CXXBaseSpecifier *BS : RExp->Bases) { // Perform a single step derived-to-base conversion. @@ -3138,7 +3138,7 @@ static bool isProvablyNull(llvm::Value *addr) { static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback) { const LValue &srcLV = writeback.Source; - Address srcAddr = srcLV.getAddress(); + Address srcAddr = srcLV.getAddress(CGF); assert(!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"); @@ -3246,7 +3246,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, CRE->getSubExpr()->getType()->castAs()->getPointeeType(); srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); } - Address srcAddr = srcLV.getAddress(); + Address srcAddr = srcLV.getAddress(CGF); // The dest and src types don't necessarily match in LLVM terms // because of the crazy ObjC compatibility rules. @@ -3560,7 +3560,7 @@ RValue CallArg::getRValue(CodeGenFunction &CGF) const { CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, LV.isVolatile()); IsUsed = true; - return RValue::getAggregate(Copy.getAddress()); + return RValue::getAggregate(Copy.getAddress(CGF)); } void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { @@ -3570,7 +3570,7 @@ void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { else if (!HasLV && RV.isComplex()) CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); else { - auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress(); + auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); // We assume that call args are never copied into subobjects. CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, @@ -3933,7 +3933,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, if (I->isAggregate()) { // Replace the placeholder with the appropriate argument slot GEP. Address Addr = I->hasLValue() - ? I->getKnownLValue().getAddress() + ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); llvm::Instruction *Placeholder = cast(Addr.getPointer()); @@ -3978,7 +3978,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // 3. If the argument is byval, but RV is not located in default // or alloca address space. Address Addr = I->hasLValue() - ? I->getKnownLValue().getAddress() + ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); llvm::Value *V = Addr.getPointer(); CharUnits Align = ArgInfo.getIndirectAlign(); @@ -4065,7 +4065,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, V = I->getKnownRValue().getScalarVal(); else V = Builder.CreateLoad( - I->hasLValue() ? I->getKnownLValue().getAddress() + I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress()); // Implement swifterror by copying into a new swifterror argument. @@ -4108,7 +4108,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Src = CreateMemTemp(I->Ty, "coerce"); I->copyInto(*this, Src); } else { - Src = I->hasLValue() ? I->getKnownLValue().getAddress() + Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); } @@ -4163,7 +4163,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Address addr = Address::invalid(); Address AllocaAddr = Address::invalid(); if (I->isAggregate()) { - addr = I->hasLValue() ? I->getKnownLValue().getAddress() + addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); } else { diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp index d07b1c665cc43..64c4d3e423fdd 100644 --- a/clang/lib/CodeGen/CGClass.cpp +++ b/clang/lib/CodeGen/CGClass.cpp @@ -657,7 +657,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, // the constructor. QualType::DestructionKind dtorKind = FieldType.isDestructedType(); if (CGF.needsEHCleanup(dtorKind)) - CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); + CGF.pushEHDestroy(dtorKind, LHS.getAddress(CGF), FieldType); return; } } @@ -681,16 +681,12 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS, EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); break; case TEK_Aggregate: { - AggValueSlot Slot = - AggValueSlot::forLValue( - LHS, - AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - getOverlapForFieldInit(Field), - AggValueSlot::IsNotZeroed, - // Checks are made by the code that calls constructor. - AggValueSlot::IsSanitizerChecked); + AggValueSlot Slot = AggValueSlot::forLValue( + LHS, *this, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + getOverlapForFieldInit(Field), AggValueSlot::IsNotZeroed, + // Checks are made by the code that calls constructor. + AggValueSlot::IsSanitizerChecked); EmitAggExpr(Init, Slot); break; } @@ -700,7 +696,7 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS, // later in the constructor. QualType::DestructionKind dtorKind = FieldType.isDestructedType(); if (needsEHCleanup(dtorKind)) - pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); + pushEHDestroy(dtorKind, LHS.getAddress(*this), FieldType); } /// Checks whether the given constructor is a valid subject for the @@ -963,9 +959,10 @@ namespace { LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); - emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(), - Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(), - MemcpySize); + emitMemcpyIR( + Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(CGF), + Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(CGF), + MemcpySize); reset(); } @@ -1119,7 +1116,7 @@ namespace { continue; LValue FieldLHS = LHS; EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); - CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); + CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(CGF), FieldType); } } @@ -1629,7 +1626,7 @@ namespace { LValue LV = CGF.EmitLValueForField(ThisLV, field); assert(LV.isSimple()); - CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, + CGF.emitDestroy(LV.getAddress(CGF), field->getType(), destroyer, flags.isForNormalCleanup() && useEHCleanupForArray); } }; diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp index f60628b9b622d..2ddc1a66e1804 100644 --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -570,7 +570,7 @@ namespace { Var.getType(), VK_LValue, SourceLocation()); // Compute the address of the local variable, in case it's a byref // or something. - llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(); + llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF); // In some cases, the type of the function argument will be different from // the type of the pointer. An example of this is @@ -685,18 +685,18 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF, LValue srcLV = CGF.EmitLValue(srcExpr); // Handle a formal type change to avoid asserting. - auto srcAddr = srcLV.getAddress(); + auto srcAddr = srcLV.getAddress(CGF); if (needsCast) { - srcAddr = CGF.Builder.CreateElementBitCast(srcAddr, - destLV.getAddress().getElementType()); + srcAddr = CGF.Builder.CreateElementBitCast( + srcAddr, destLV.getAddress(CGF).getElementType()); } // If it was an l-value, use objc_copyWeak. if (srcExpr->getValueKind() == VK_LValue) { - CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr); + CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr); } else { assert(srcExpr->getValueKind() == VK_XValue); - CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr); + CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr); } return true; } @@ -714,7 +714,7 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF, static void drillIntoBlockVariable(CodeGenFunction &CGF, LValue &lvalue, const VarDecl *var) { - lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var)); + lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var)); } void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, @@ -774,17 +774,18 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D, if (capturedByInit) { // We can use a simple GEP for this because it can't have been // moved yet. - tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(), + tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this), cast(D), /*follow*/ false)); } - auto ty = cast(tempLV.getAddress().getElementType()); + auto ty = + cast(tempLV.getAddress(*this).getElementType()); llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType()); // If __weak, we want to use a barrier under certain conditions. if (lifetime == Qualifiers::OCL_Weak) - EmitARCInitWeak(tempLV.getAddress(), zero); + EmitARCInitWeak(tempLV.getAddress(*this), zero); // Otherwise just do a simple store. else @@ -827,9 +828,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D, if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast(D)); if (accessedByInit) - EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true); + EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true); else - EmitARCInitWeak(lvalue.getAddress(), value); + EmitARCInitWeak(lvalue.getAddress(*this), value); return; } @@ -1897,11 +1898,10 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D, else if (auto *FD = dyn_cast(D)) Overlap = getOverlapForFieldInit(FD); // TODO: how can we delay here if D is captured by its initializer? - EmitAggExpr(init, AggValueSlot::forLValue(lvalue, - AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - Overlap)); + EmitAggExpr(init, AggValueSlot::forLValue( + lvalue, *this, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, Overlap)); } return; } @@ -2457,7 +2457,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg, // objc_storeStrong attempts to release its old value. llvm::Value *Null = CGM.EmitNullConstant(D.getType()); EmitStoreOfScalar(Null, lv, /* isInitialization */ true); - EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true); + EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true); DoStore = false; } else diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp index 5b172a3480be1..b19a728d0faaa 100644 --- a/clang/lib/CodeGen/CGDeclCXX.cpp +++ b/clang/lib/CodeGen/CGDeclCXX.cpp @@ -54,10 +54,11 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); return; case TEK_Aggregate: - CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap)); + CGF.EmitAggExpr(Init, + AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); return; } llvm_unreachable("bad evaluation kind"); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 04c6504910b8d..6e3a26e2c78aa 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -573,7 +573,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { LV = EmitLValueForField(LV, Adjustment.Field); assert(LV.isSimple() && "materialized temporary field is not a simple lvalue"); - Object = LV.getAddress(); + Object = LV.getAddress(*this); break; } @@ -594,7 +594,7 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { // Emit the expression as an lvalue. LValue LV = EmitLValue(E); assert(LV.isSimple()); - llvm::Value *Value = LV.getPointer(); + llvm::Value *Value = LV.getPointer(*this); if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { // C++11 [dcl.ref]p5 (as amended by core issue 453): @@ -1129,7 +1129,7 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E, LValue LV = EmitLValue(UO->getSubExpr()); if (BaseInfo) *BaseInfo = LV.getBaseInfo(); if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); - return LV.getAddress(); + return LV.getAddress(*this); } } @@ -1219,8 +1219,8 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { if (IsBaseCXXThis || isa(ME->getBase())) SkippedChecks.set(SanitizerKind::Null, true); } - EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), - E->getType(), LV.getAlignment(), SkippedChecks); + EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(), + LV.getAlignment(), SkippedChecks); } return LV; } @@ -1307,7 +1307,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { if (LV.isSimple()) { // Defend against branches out of gnu statement expressions surrounded by // cleanups. - llvm::Value *V = LV.getPointer(); + llvm::Value *V = LV.getPointer(*this); Scope.ForceCleanup({&V}); return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(), getContext(), LV.getBaseInfo(), LV.getTBAAInfo()); @@ -1523,7 +1523,7 @@ llvm::Value *CodeGenFunction::emitScalarConstant( llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, SourceLocation Loc) { - return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(), lvalue.getType(), Loc, lvalue.getBaseInfo(), lvalue.getTBAAInfo(), lvalue.isNontemporal()); } @@ -1763,7 +1763,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit) { - EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(), lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); } @@ -1774,18 +1774,18 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { if (LV.isObjCWeak()) { // load of a __weak object. - Address AddrWeakObj = LV.getAddress(); + Address AddrWeakObj = LV.getAddress(*this); return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, AddrWeakObj)); } if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { // In MRC mode, we do a load+autorelease. if (!getLangOpts().ObjCAutoRefCount) { - return RValue::get(EmitARCLoadWeak(LV.getAddress())); + return RValue::get(EmitARCLoadWeak(LV.getAddress(*this))); } // In ARC mode, we load retained and then consume the value. - llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress()); + llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this)); Object = EmitObjCConsumeObject(LV.getType(), Object); return RValue::get(Object); } @@ -1971,9 +1971,10 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, case Qualifiers::OCL_Weak: if (isInit) // Initialize and then skip the primitive store. - EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal()); + EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal()); else - EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); + EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(), + /*ignore*/ true); return; case Qualifiers::OCL_Autoreleasing: @@ -1986,7 +1987,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, if (Dst.isObjCWeak() && !Dst.isNonGC()) { // load of a __weak object. - Address LvalueDst = Dst.getAddress(); + Address LvalueDst = Dst.getAddress(*this); llvm::Value *src = Src.getScalarVal(); CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); return; @@ -1994,7 +1995,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, if (Dst.isObjCStrong() && !Dst.isNonGC()) { // load of a __strong object. - Address LvalueDst = Dst.getAddress(); + Address LvalueDst = Dst.getAddress(*this); llvm::Value *src = Src.getScalarVal(); if (Dst.isObjCIvar()) { assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); @@ -2320,8 +2321,8 @@ Address CodeGenFunction::EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo, TBAAAccessInfo *PointeeTBAAInfo) { - llvm::LoadInst *Load = Builder.CreateLoad(RefLVal.getAddress(), - RefLVal.isVolatile()); + llvm::LoadInst *Load = + Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile()); CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo()); CharUnits Align = getNaturalTypeAlignment(RefLVal.getType()->getPointeeType(), @@ -2577,7 +2578,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), CapturedStmtInfo->getContextValue()); return MakeAddrLValue( - Address(CapLVal.getPointer(), getContext().getDeclAlign(VD)), + Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)), CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl), CapLVal.getTBAAInfo()); } @@ -2712,7 +2713,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { // __real is valid on scalars. This is a faster way of testing that. // __imag can only produce an rvalue on scalars. if (E->getOpcode() == UO_Real && - !LV.getAddress().getElementType()->isStructTy()) { + !LV.getAddress(*this).getElementType()->isStructTy()) { assert(E->getSubExpr()->getType()->isArithmeticType()); return LV; } @@ -2720,9 +2721,9 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { QualType T = ExprTy->castAs()->getElementType(); Address Component = - (E->getOpcode() == UO_Real - ? emitAddrOfRealComponent(LV.getAddress(), LV.getType()) - : emitAddrOfImagComponent(LV.getAddress(), LV.getType())); + (E->getOpcode() == UO_Real + ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType()) + : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType())); LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(), CGM.getTBAAInfoForSubobject(LV, T)); ElemLV.getQuals().addQualifiers(LV.getQuals()); @@ -3322,7 +3323,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, // Expressions of array type can't be bitfields or vector elements. LValue LV = EmitLValue(E); - Address Addr = LV.getAddress(); + Address Addr = LV.getAddress(*this); // If the array type was an incomplete type, we need to make sure // the decay ends up being the right type. @@ -3525,8 +3526,9 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, LValue LHS = EmitLValue(E->getBase()); auto *Idx = EmitIdxAfterBase(/*Promote*/false); assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); - return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(), - LHS.getBaseInfo(), TBAAAccessInfo()); + return LValue::MakeVectorElt(LHS.getAddress(*this), Idx, + E->getBase()->getType(), LHS.getBaseInfo(), + TBAAAccessInfo()); } // All the other cases basically behave like simple offsetting. @@ -3621,7 +3623,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, // Propagate the alignment from the array itself to the result. QualType arrayType = Array->getType(); Addr = emitArraySubscriptGEP( - *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, + *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, E->getExprLoc(), &arrayType, E->getBase()); EltBaseInfo = ArrayLV.getBaseInfo(); @@ -3656,7 +3658,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, if (auto *ASE = dyn_cast(Base->IgnoreParenImpCasts())) { BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound); if (BaseTy->isArrayType()) { - Address Addr = BaseLVal.getAddress(); + Address Addr = BaseLVal.getAddress(CGF); BaseInfo = BaseLVal.getBaseInfo(); // If the array type was an incomplete type, we need to make sure @@ -3681,7 +3683,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, &TypeTBAAInfo); BaseInfo.mergeForCast(TypeBaseInfo); TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo); - return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()), Align); + return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align); } return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); } @@ -3822,7 +3824,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, // Propagate the alignment from the array itself to the result. EltPtr = emitArraySubscriptGEP( - *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, + *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx}, ResultExprTy, !getLangOpts().isSignedOverflowDefined(), /*signedIndices=*/false, E->getExprLoc()); BaseInfo = ArrayLV.getBaseInfo(); @@ -3882,7 +3884,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { if (Base.isSimple()) { llvm::Constant *CV = llvm::ConstantDataVector::get(getLLVMContext(), Indices); - return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, + return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type, Base.getBaseInfo(), TBAAAccessInfo()); } assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); @@ -4033,7 +4035,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(field->getParent()); const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); - Address Addr = base.getAddress(); + Address Addr = base.getAddress(*this); unsigned Idx = RL.getLLVMFieldNo(field); const RecordDecl *rec = field->getParent(); if (!IsInPreservedAIRegion && @@ -4101,7 +4103,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, getContext().getTypeSizeInChars(FieldType).getQuantity(); } - Address addr = base.getAddress(); + Address addr = base.getAddress(*this); if (auto *ClassDef = dyn_cast(rec)) { if (CGM.getCodeGenOpts().StrictVTablePointers && ClassDef->isDynamicClass()) { @@ -4189,7 +4191,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, if (!FieldType->isReferenceType()) return EmitLValueForField(Base, Field); - Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field); + Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field); // Make sure that the address is pointing to the right type. llvm::Type *llvmType = ConvertTypeForMem(FieldType); @@ -4307,10 +4309,10 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { EmitBlock(contBlock); if (lhs && rhs) { - llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer()->getType(), - 2, "cond-lvalue"); - phi->addIncoming(lhs->getPointer(), lhsBlock); - phi->addIncoming(rhs->getPointer(), rhsBlock); + llvm::PHINode *phi = + Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue"); + phi->addIncoming(lhs->getPointer(*this), lhsBlock); + phi->addIncoming(rhs->getPointer(*this), rhsBlock); Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment())); AlignmentSource alignSource = std::max(lhs->getBaseInfo().getAlignmentSource(), @@ -4393,7 +4395,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { case CK_Dynamic: { LValue LV = EmitLValue(E->getSubExpr()); - Address V = LV.getAddress(); + Address V = LV.getAddress(*this); const auto *DCE = cast(E); return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType()); } @@ -4413,7 +4415,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { auto *DerivedClassDecl = cast(DerivedClassTy->getDecl()); LValue LV = EmitLValue(E->getSubExpr()); - Address This = LV.getAddress(); + Address This = LV.getAddress(*this); // Perform the derived-to-base conversion Address Base = GetAddressOfBaseClass( @@ -4435,10 +4437,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { LValue LV = EmitLValue(E->getSubExpr()); // Perform the base-to-derived conversion - Address Derived = - GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, - E->path_begin(), E->path_end(), - /*NullCheckValue=*/false); + Address Derived = GetAddressOfDerivedClass( + LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(), + /*NullCheckValue=*/false); // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is // performed and the object is not of the derived type. @@ -4460,7 +4461,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { CGM.EmitExplicitCastExprType(CE, this); LValue LV = EmitLValue(E->getSubExpr()); - Address V = Builder.CreateBitCast(LV.getAddress(), + Address V = Builder.CreateBitCast(LV.getAddress(*this), ConvertType(CE->getTypeAsWritten())); if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) @@ -4475,14 +4476,15 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { LValue LV = EmitLValue(E->getSubExpr()); QualType DestTy = getContext().getPointerType(E->getType()); llvm::Value *V = getTargetHooks().performAddrSpaceCast( - *this, LV.getPointer(), E->getSubExpr()->getType().getAddressSpace(), + *this, LV.getPointer(*this), + E->getSubExpr()->getType().getAddressSpace(), E->getType().getAddressSpace(), ConvertType(DestTy)); - return MakeAddrLValue(Address(V, LV.getAddress().getAlignment()), + return MakeAddrLValue(Address(V, LV.getAddress(*this).getAlignment()), E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); } case CK_ObjCObjectLValueCast: { LValue LV = EmitLValue(E->getSubExpr()); - Address V = Builder.CreateElementBitCast(LV.getAddress(), + Address V = Builder.CreateElementBitCast(LV.getAddress(*this), ConvertType(E->getType())); return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), CGM.getTBAAInfoForSubobject(LV, E->getType())); @@ -4536,12 +4538,12 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV, case TEK_Complex: return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); case TEK_Aggregate: - return FieldLV.asAggregateRValue(); + return FieldLV.asAggregateRValue(*this); case TEK_Scalar: // This routine is used to load fields one-by-one to perform a copy, so // don't load reference fields. if (FD->getType()->isReferenceType()) - return RValue::get(FieldLV.getPointer()); + return RValue::get(FieldLV.getPointer(*this)); return EmitLoadOfLValue(FieldLV, Loc); } llvm_unreachable("bad evaluation kind"); @@ -4636,7 +4638,7 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) { functionType = ptrType->getPointeeType(); } else { functionType = E->getType(); - calleePtr = EmitLValue(E).getPointer(); + calleePtr = EmitLValue(E).getPointer(*this); } assert(functionType->isFunctionType()); @@ -4796,7 +4798,7 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { BaseQuals = ObjectTy.getQualifiers(); } else { LValue BaseLV = EmitLValue(BaseExpr); - BaseValue = BaseLV.getPointer(); + BaseValue = BaseLV.getPointer(*this); ObjectTy = BaseExpr->getType(); BaseQuals = ObjectTy.getQualifiers(); } @@ -5006,7 +5008,7 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { if (E->getOpcode() == BO_PtrMemI) { BaseAddr = EmitPointerWithAlignment(E->getLHS()); } else { - BaseAddr = EmitLValue(E->getLHS()).getAddress(); + BaseAddr = EmitLValue(E->getLHS()).getAddress(*this); } llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); @@ -5033,7 +5035,7 @@ RValue CodeGenFunction::convertTempToRValue(Address addr, case TEK_Complex: return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); case TEK_Aggregate: - return lvalue.asAggregateRValue(); + return lvalue.asAggregateRValue(*this); case TEK_Scalar: return RValue::get(EmitLoadOfScalar(lvalue, loc)); } diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index ecb5253c07ec3..41a9329386559 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -345,10 +345,9 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src, } } - AggValueSlot srcAgg = - AggValueSlot::forLValue(src, AggValueSlot::IsDestructed, - needsGC(type), AggValueSlot::IsAliased, - AggValueSlot::MayOverlap); + AggValueSlot srcAgg = AggValueSlot::forLValue( + src, CGF, AggValueSlot::IsDestructed, needsGC(type), + AggValueSlot::IsAliased, AggValueSlot::MayOverlap); EmitCopy(type, Dest, srcAgg); } @@ -386,7 +385,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { ASTContext &Ctx = CGF.getContext(); LValue Array = CGF.EmitLValue(E->getSubExpr()); assert(Array.isSimple() && "initializer_list array not a simple lvalue"); - Address ArrayPtr = Array.getAddress(); + Address ArrayPtr = Array.getAddress(CGF); const ConstantArrayType *ArrayType = Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); @@ -688,7 +687,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { CodeGenFunction::TCK_Load); // FIXME: Do we also need to handle property references here? if (LV.isSimple()) - CGF.EmitDynamicCast(LV.getAddress(), cast(E)); + CGF.EmitDynamicCast(LV.getAddress(CGF), cast(E)); else CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); @@ -723,7 +722,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { LValue SourceLV = CGF.EmitLValue(E->getSubExpr()); Address SourceAddress = - Builder.CreateElementBitCast(SourceLV.getAddress(), CGF.Int8Ty); + Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty); Address DestAddress = Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty); llvm::Value *SizeVal = llvm::ConstantInt::get( @@ -1163,7 +1162,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { } EmitCopy(E->getLHS()->getType(), - AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, + AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()), AggValueSlot::IsAliased, AggValueSlot::MayOverlap), @@ -1184,11 +1183,9 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { } // Codegen the RHS so that it stores directly into the LHS. - AggValueSlot LHSSlot = - AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, - needsGC(E->getLHS()->getType()), - AggValueSlot::IsAliased, - AggValueSlot::MayOverlap); + AggValueSlot LHSSlot = AggValueSlot::forLValue( + LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()), + AggValueSlot::IsAliased, AggValueSlot::MayOverlap); // A non-volatile aggregate destination might have volatile member. if (!LHSSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType())) @@ -1320,7 +1317,7 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { llvm::Constant::getNullValue(CGF.Int8PtrTy), CharUnits::One()); // placeholder - CGF.pushDestroy(EHCleanup, LV.getAddress(), CurField->getType(), + CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(), CGF.getDestroyer(DtorKind), false); Cleanups.push_back(CGF.EHStack.stable_begin()); } @@ -1408,12 +1405,11 @@ AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true); return; case TEK_Aggregate: - CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, - AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::MayOverlap, - Dest.isZeroed())); + CGF.EmitAggExpr( + E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::MayOverlap, Dest.isZeroed())); return; case TEK_Scalar: if (LV.isSimple()) { @@ -1449,7 +1445,7 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { // There's a potential optimization opportunity in combining // memsets; that would be easy for arrays, but relatively // difficult for structures with the current code. - CGF.EmitNullInitialization(lv.getAddress(), lv.getType()); + CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType()); } } @@ -1606,7 +1602,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { = field->getType().isDestructedType()) { assert(LV.isSimple()); if (CGF.needsEHCleanup(dtorKind)) { - CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(), + CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(), CGF.getDestroyer(dtorKind), false); addCleanup(CGF.EHStack.stable_begin()); pushedCleanup = true; @@ -1617,7 +1613,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { // else, clean it up for -O0 builds and general tidiness. if (!pushedCleanup && LV.isSimple()) if (llvm::GetElementPtrInst *GEP = - dyn_cast(LV.getPointer())) + dyn_cast(LV.getPointer(CGF))) if (GEP->use_empty()) GEP->eraseFromParent(); } @@ -1699,9 +1695,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, if (InnerLoop) { // If the subexpression is an ArrayInitLoopExpr, share its cleanup. auto elementSlot = AggValueSlot::forLValue( - elementLV, AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, + elementLV, CGF, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap); AggExprEmitter(CGF, elementSlot, false) .VisitArrayInitLoopExpr(InnerLoop, outerBegin); @@ -1864,10 +1859,10 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); Address Temp = CreateMemTemp(E->getType()); LValue LV = MakeAddrLValue(Temp, E->getType()); - EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap)); + EmitAggExpr(E, AggValueSlot::forLValue( + LV, *this, AggValueSlot::IsNotDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); return LV; } @@ -1916,8 +1911,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty, bool isVolatile) { assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); - Address DestPtr = Dest.getAddress(); - Address SrcPtr = Src.getAddress(); + Address DestPtr = Dest.getAddress(*this); + Address SrcPtr = Src.getAddress(*this); if (getLangOpts().CPlusPlus) { if (const RecordType *RT = Ty->getAs()) { diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index 114d806d454bb..269b80b434032 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -133,7 +133,7 @@ RValue CodeGenFunction::EmitCXXPseudoDestructorExpr( BaseQuals = PTy->getPointeeType().getQualifiers(); } else { LValue BaseLV = EmitLValue(BaseExpr); - BaseValue = BaseLV.getAddress(); + BaseValue = BaseLV.getAddress(*this); QualType BaseTy = BaseExpr->getType(); BaseQuals = BaseTy.getQualifiers(); } @@ -271,11 +271,11 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( assert(ReturnValue.isNull() && "Constructor shouldn't have return value"); CallArgList Args; commonEmitCXXMemberOrOperatorCall( - *this, Ctor, This.getPointer(), /*ImplicitParam=*/nullptr, + *this, Ctor, This.getPointer(*this), /*ImplicitParam=*/nullptr, /*ImplicitParamTy=*/QualType(), CE, Args, nullptr); EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, - /*Delegating=*/false, This.getAddress(), Args, + /*Delegating=*/false, This.getAddress(*this), Args, AggValueSlot::DoesNotOverlap, CE->getExprLoc(), /*NewPointerIsChecked=*/false); return RValue::get(nullptr); @@ -293,7 +293,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( (*(CE->arg_begin() + 1))->getType()) : EmitLValue(*CE->arg_begin()); EmitAggregateAssign(This, RHS, CE->getType()); - return RValue::get(This.getPointer()); + return RValue::get(This.getPointer(*this)); } llvm_unreachable("unknown trivial member function"); } @@ -328,7 +328,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( if (IsImplicitObjectCXXThis || isa(IOA)) SkippedChecks.set(SanitizerKind::Null, true); } - EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, This.getPointer(), + EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, + This.getPointer(*this), C.getRecordType(CalleeDecl->getParent()), /*Alignment=*/CharUnits::Zero(), SkippedChecks); @@ -345,9 +346,9 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( "Destructor shouldn't have explicit parameters"); assert(ReturnValue.isNull() && "Destructor shouldn't have return value"); if (UseVirtualCall) { - CGM.getCXXABI().EmitVirtualDestructorCall( - *this, Dtor, Dtor_Complete, This.getAddress(), - cast(CE)); + CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete, + This.getAddress(*this), + cast(CE)); } else { GlobalDecl GD(Dtor, Dtor_Complete); CGCallee Callee; @@ -362,7 +363,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( QualType ThisTy = IsArrow ? Base->getType()->getPointeeType() : Base->getType(); - EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, + EmitCXXDestructorCall(GD, Callee, This.getPointer(*this), ThisTy, /*ImplicitParam=*/nullptr, /*ImplicitParamTy=*/QualType(), nullptr); } @@ -374,15 +375,14 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( CGCallee Callee; if (UseVirtualCall) { - Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty); + Callee = CGCallee::forVirtual(CE, MD, This.getAddress(*this), Ty); } else { if (SanOpts.has(SanitizerKind::CFINVCall) && MD->getParent()->isDynamicClass()) { llvm::Value *VTable; const CXXRecordDecl *RD; - std::tie(VTable, RD) = - CGM.getCXXABI().LoadVTablePtr(*this, This.getAddress(), - CalleeDecl->getParent()); + std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr( + *this, This.getAddress(*this), CalleeDecl->getParent()); EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc()); } @@ -401,12 +401,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( if (MD->isVirtual()) { Address NewThisAddr = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall( - *this, CalleeDecl, This.getAddress(), UseVirtualCall); + *this, CalleeDecl, This.getAddress(*this), UseVirtualCall); This.setAddress(NewThisAddr); } return EmitCXXMemberOrOperatorCall( - CalleeDecl, Callee, ReturnValue, This.getPointer(), + CalleeDecl, Callee, ReturnValue, This.getPointer(*this), /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); } @@ -428,7 +428,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, if (BO->getOpcode() == BO_PtrMemI) This = EmitPointerWithAlignment(BaseExpr); else - This = EmitLValue(BaseExpr).getAddress(); + This = EmitLValue(BaseExpr).getAddress(*this); EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(), QualType(MPT->getClass(), 0)); @@ -2103,7 +2103,7 @@ static bool isGLValueFromPointerDeref(const Expr *E) { static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E, llvm::Type *StdTypeInfoPtrTy) { // Get the vtable pointer. - Address ThisPtr = CGF.EmitLValue(E).getAddress(); + Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF); QualType SrcRecordTy = E->getType(); diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp index 385f87f12a9b3..6b11969771567 100644 --- a/clang/lib/CodeGen/CGExprComplex.cpp +++ b/clang/lib/CodeGen/CGExprComplex.cpp @@ -348,7 +348,7 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue, if (lvalue.getType()->isAtomicType()) return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal(); - Address SrcPtr = lvalue.getAddress(); + Address SrcPtr = lvalue.getAddress(CGF); bool isVolatile = lvalue.isVolatileQualified(); llvm::Value *Real = nullptr, *Imag = nullptr; @@ -374,7 +374,7 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, (!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue))) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); - Address Ptr = lvalue.getAddress(); + Address Ptr = lvalue.getAddress(CGF); Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType()); Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType()); @@ -463,14 +463,14 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op, case CK_LValueBitCast: { LValue origLV = CGF.EmitLValue(Op); - Address V = origLV.getAddress(); + Address V = origLV.getAddress(CGF); V = Builder.CreateElementBitCast(V, CGF.ConvertType(DestTy)); return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc()); } case CK_LValueToRValueBitCast: { LValue SourceLVal = CGF.EmitLValue(Op); - Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(), + Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF), CGF.ConvertTypeForMem(DestTy)); LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 750b5503c08f8..226025eac9f6f 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -615,7 +615,7 @@ class ScalarExprEmitter if (isa(E->getType())) // never sugared return CGF.CGM.getMemberPointerConstant(E); - return EmitLValue(E->getSubExpr()).getPointer(); + return EmitLValue(E->getSubExpr()).getPointer(CGF); } Value *VisitUnaryDeref(const UnaryOperator *E) { if (E->getType()->isVoidType()) @@ -1979,7 +1979,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_LValueBitCast: case CK_ObjCObjectLValueCast: { - Address Addr = EmitLValue(E).getAddress(); + Address Addr = EmitLValue(E).getAddress(CGF); Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy)); LValue LV = CGF.MakeAddrLValue(Addr, DestTy); return EmitLoadOfLValue(LV, CE->getExprLoc()); @@ -1987,7 +1987,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_LValueToRValueBitCast: { LValue SourceLVal = CGF.EmitLValue(E); - Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(), + Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF), CGF.ConvertTypeForMem(DestTy)); LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); @@ -2105,7 +2105,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_ArrayToPointerDecay: return CGF.EmitArrayToPointerDecay(E).getPointer(); case CK_FunctionToPointerDecay: - return EmitLValue(E).getPointer(); + return EmitLValue(E).getPointer(CGF); case CK_NullToPointer: if (MustVisitNullValue(E)) @@ -2370,14 +2370,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (isInc && type->isBooleanType()) { llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); if (isPre) { - Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified()) - ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); + Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified()) + ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); return Builder.getTrue(); } // For atomic bool increment, we just store true and return it for // preincrement, do an atomic swap with true for postincrement return Builder.CreateAtomicRMW( - llvm::AtomicRMWInst::Xchg, LV.getPointer(), True, + llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True, llvm::AtomicOrdering::SequentiallyConsistent); } // Special case for atomic increment / decrement on integers, emit @@ -2394,8 +2394,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, llvm::Instruction::Sub; llvm::Value *amt = CGF.EmitToMemory( llvm::ConstantInt::get(ConvertType(type), 1, true), type); - llvm::Value *old = Builder.CreateAtomicRMW(aop, - LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent); + llvm::Value *old = + Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt, + llvm::AtomicOrdering::SequentiallyConsistent); return isPre ? Builder.CreateBinOp(op, old, amt) : old; } value = EmitLoadOfLValue(LV, E->getExprLoc()); @@ -2936,7 +2937,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( E->getExprLoc()), LHSTy); Value *OldVal = Builder.CreateAtomicRMW( - AtomicOp, LHSLV.getPointer(), Amt, + AtomicOp, LHSLV.getPointer(CGF), Amt, llvm::AtomicOrdering::SequentiallyConsistent); // Since operation is atomic, the result type is guaranteed to be the @@ -3982,7 +3983,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { case Qualifiers::OCL_Weak: RHS = Visit(E->getRHS()); LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); - RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore); + RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); break; case Qualifiers::OCL_None: @@ -4543,7 +4544,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { if (BaseExpr->isRValue()) { Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign()); } else { - Addr = EmitLValue(BaseExpr).getAddress(); + Addr = EmitLValue(BaseExpr).getAddress(*this); } // Cast the address to Class*. diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp index 332e51e57ded0..d5f378c522322 100644 --- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp +++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp @@ -707,7 +707,7 @@ struct GenMoveConstructor : GenBinaryFunc { LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT); llvm::Value *SrcVal = CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal(); - CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV); + CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress(*CGF)), SrcLV); CGF->EmitStoreOfScalar(SrcVal, CGF->MakeAddrLValue(Addrs[DstIdx], QT), /* isInitialization */ true); } @@ -770,7 +770,7 @@ struct GenMoveAssignment : GenBinaryFunc { LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT); llvm::Value *SrcVal = CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal(); - CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV); + CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress(*CGF)), SrcLV); LValue DstLV = CGF->MakeAddrLValue(Addrs[DstIdx], QT); llvm::Value *DstVal = CGF->EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal(); @@ -806,7 +806,8 @@ void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF, // such structure. void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) { GenDefaultInitialize Gen(getContext()); - Address DstPtr = Builder.CreateBitCast(Dst.getAddress(), CGM.Int8PtrPtrTy); + Address DstPtr = + Builder.CreateBitCast(Dst.getAddress(*this), CGM.Int8PtrPtrTy); Gen.setCGF(this); QualType QT = Dst.getType(); QT = Dst.isVolatile() ? QT.withVolatile() : QT; @@ -850,7 +851,7 @@ getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile, // Functions to emit calls to the special functions of a non-trivial C struct. void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) { bool IsVolatile = Dst.isVolatile(); - Address DstPtr = Dst.getAddress(); + Address DstPtr = Dst.getAddress(*this); QualType QT = Dst.getType(); GenDefaultInitializeFuncName GenName(DstPtr.getAlignment(), getContext()); std::string FuncName = GenName.getName(QT, IsVolatile); @@ -874,7 +875,7 @@ std::string CodeGenFunction::getNonTrivialDestructorStr(QualType QT, void CodeGenFunction::callCStructDestructor(LValue Dst) { bool IsVolatile = Dst.isVolatile(); - Address DstPtr = Dst.getAddress(); + Address DstPtr = Dst.getAddress(*this); QualType QT = Dst.getType(); GenDestructorFuncName GenName("__destructor_", DstPtr.getAlignment(), getContext()); @@ -885,7 +886,7 @@ void CodeGenFunction::callCStructDestructor(LValue Dst) { void CodeGenFunction::callCStructCopyConstructor(LValue Dst, LValue Src) { bool IsVolatile = Dst.isVolatile() || Src.isVolatile(); - Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress(); + Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this); QualType QT = Dst.getType(); GenBinaryFuncName GenName("__copy_constructor_", DstPtr.getAlignment(), SrcPtr.getAlignment(), getContext()); @@ -899,7 +900,7 @@ void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src ) { bool IsVolatile = Dst.isVolatile() || Src.isVolatile(); - Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress(); + Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this); QualType QT = Dst.getType(); GenBinaryFuncName GenName("__copy_assignment_", DstPtr.getAlignment(), SrcPtr.getAlignment(), getContext()); @@ -910,7 +911,7 @@ void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src void CodeGenFunction::callCStructMoveConstructor(LValue Dst, LValue Src) { bool IsVolatile = Dst.isVolatile() || Src.isVolatile(); - Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress(); + Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this); QualType QT = Dst.getType(); GenBinaryFuncName GenName("__move_constructor_", DstPtr.getAlignment(), SrcPtr.getAlignment(), getContext()); @@ -924,7 +925,7 @@ void CodeGenFunction::callCStructMoveAssignmentOperator(LValue Dst, LValue Src ) { bool IsVolatile = Dst.isVolatile() || Src.isVolatile(); - Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress(); + Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this); QualType QT = Dst.getType(); GenBinaryFuncName GenName("__move_assignment_", DstPtr.getAlignment(), SrcPtr.getAlignment(), getContext()); diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp index 984fa599a99f3..14391f3b129a1 100644 --- a/clang/lib/CodeGen/CGObjC.cpp +++ b/clang/lib/CodeGen/CGObjC.cpp @@ -511,7 +511,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, method->getMethodFamily() == OMF_retain) { if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { LValue lvalue = EmitLValue(lvalueExpr); - llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress()); + llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this)); return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); } } @@ -749,8 +749,8 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, ASTContext &Context = CGF.getContext(); Address src = - CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) - .getAddress(); + CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) + .getAddress(CGF); // objc_copyStruct (ReturnValue, &structIvar, // sizeof (Type of Ivar), isAtomic, false); @@ -1022,8 +1022,8 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, // The 2nd argument is the address of the ivar. llvm::Value *ivarAddr = - CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), - CGF.LoadObjCSelf(), ivar, 0).getPointer(); + CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) + .getPointer(CGF); ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); @@ -1082,7 +1082,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay // Perform an atomic load. This does not impose ordering constraints. - Address ivarAddr = LV.getAddress(); + Address ivarAddr = LV.getAddress(*this); ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType); llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); load->setAtomic(llvm::AtomicOrdering::Unordered); @@ -1183,14 +1183,14 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, case TEK_Scalar: { llvm::Value *value; if (propType->isReferenceType()) { - value = LV.getAddress().getPointer(); + value = LV.getAddress(*this).getPointer(); } else { // We want to load and autoreleaseReturnValue ARC __weak ivars. if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { if (getLangOpts().ObjCAutoRefCount) { value = emitARCRetainLoadOfScalar(*this, LV, ivarType); } else { - value = EmitARCLoadWeak(LV.getAddress()); + value = EmitARCLoadWeak(LV.getAddress(*this)); } // Otherwise we want to do a simple load, suppressing the @@ -1224,9 +1224,9 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, CallArgList args; // The first argument is the address of the ivar. - llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), - CGF.LoadObjCSelf(), ivar, 0) - .getPointer(); + llvm::Value *ivarAddr = + CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) + .getPointer(CGF); ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); @@ -1235,7 +1235,7 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, DeclRefExpr argRef(CGF.getContext(), argVar, false, argVar->getType().getNonReferenceType(), VK_LValue, SourceLocation()); - llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); + llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); @@ -1271,8 +1271,8 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, // The first argument is the address of the ivar. llvm::Value *ivarAddr = - CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), - CGF.LoadObjCSelf(), ivar, 0).getPointer(); + CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) + .getPointer(CGF); ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); @@ -1281,7 +1281,7 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, DeclRefExpr argRef(CGF.getContext(), argVar, false, argVar->getType().getNonReferenceType(), VK_LValue, SourceLocation()); - llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); + llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); @@ -1358,7 +1358,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, LValue ivarLValue = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); - Address ivarAddr = ivarLValue.getAddress(); + Address ivarAddr = ivarLValue.getAddress(*this); // Currently, all atomic accesses have to be through integer // types, so there's no point in trying to pick a prettier type. @@ -1535,7 +1535,7 @@ namespace { void Emit(CodeGenFunction &CGF, Flags flags) override { LValue lvalue = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); - CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer, + CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer, flags.isForNormalCleanup() && useEHCleanupForArray); } }; @@ -1602,7 +1602,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0); EmitAggExpr(IvarInit->getInit(), - AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, + AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); @@ -2327,7 +2327,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, !isBlock && (dst.getAlignment().isZero() || dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { - return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored); + return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored); } // Otherwise, split it out. @@ -2726,7 +2726,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); } else { assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); - result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress()); + result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF)); } return TryEmitResult(result, !shouldRetain); } @@ -2750,7 +2750,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, SourceLocation()).getScalarVal(); // Set the source pointer to NULL. - CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv); + CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv); return TryEmitResult(result, true); } diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index f80249483c7a4..7f6f498a3392c 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -357,7 +357,7 @@ class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo { VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); PrivScope.addPrivate( - VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(); }); + VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(CGF); }); } (void)PrivScope.Privatize(); } @@ -842,7 +842,7 @@ static void emitInitWithReductionInitializer(CodeGenFunction &CGF, RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation())); break; case TEK_Aggregate: - InitRVal = RValue::getAggregate(LV.getAddress()); + InitRVal = RValue::getAggregate(LV.getAddress(CGF)); break; } OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue); @@ -966,7 +966,7 @@ void ReductionCodeGen::emitAggregateInitialization( EmitDeclareReductionInit, EmitDeclareReductionInit ? ClausesData[N].ReductionOp : PrivateVD->getInit(), - DRD, SharedLVal.getAddress()); + DRD, SharedLVal.getAddress(CGF)); } ReductionCodeGen::ReductionCodeGen(ArrayRef Shareds, @@ -1007,13 +1007,13 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) { } llvm::Value *Size; llvm::Value *SizeInChars; - auto *ElemType = - cast(SharedAddresses[N].first.getPointer()->getType()) - ->getElementType(); + auto *ElemType = cast( + SharedAddresses[N].first.getPointer(CGF)->getType()) + ->getElementType(); auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType); if (AsArraySection) { - Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(), - SharedAddresses[N].first.getPointer()); + Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(CGF), + SharedAddresses[N].first.getPointer(CGF)); Size = CGF.Builder.CreateNUWAdd( Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1)); SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf); @@ -1063,7 +1063,7 @@ void ReductionCodeGen::emitInitialization( PrivateAddr, CGF.ConvertTypeForMem(PrivateType)); QualType SharedType = SharedAddresses[N].first.getType(); SharedLVal = CGF.MakeAddrLValue( - CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(), + CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(CGF), CGF.ConvertTypeForMem(SharedType)), SharedType, SharedAddresses[N].first.getBaseInfo(), CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType)); @@ -1071,7 +1071,7 @@ void ReductionCodeGen::emitInitialization( emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD); } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) { emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp, - PrivateAddr, SharedLVal.getAddress(), + PrivateAddr, SharedLVal.getAddress(CGF), SharedLVal.getType()); } else if (!DefaultInit(CGF) && PrivateVD->hasInit() && !CGF.isTrivialInitializer(PrivateVD->getInit())) { @@ -1108,15 +1108,15 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) && !CGF.getContext().hasSameType(BaseTy, ElTy)) { if (const auto *PtrTy = BaseTy->getAs()) { - BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy); + BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy); } else { - LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy); + LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy); BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal); } BaseTy = BaseTy->getPointeeType(); } return CGF.MakeAddrLValue( - CGF.Builder.CreateElementBitCast(BaseLV.getAddress(), + CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF), CGF.ConvertTypeForMem(ElTy)), BaseLV.getType(), BaseLV.getBaseInfo(), CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType())); @@ -1180,15 +1180,15 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(), OriginalBaseLValue); llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff( - BaseLValue.getPointer(), SharedAddresses[N].first.getPointer()); + BaseLValue.getPointer(CGF), SharedAddresses[N].first.getPointer(CGF)); llvm::Value *PrivatePointer = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( PrivateAddr.getPointer(), - SharedAddresses[N].first.getAddress().getType()); + SharedAddresses[N].first.getAddress(CGF).getType()); llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment); return castToBase(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(), - OriginalBaseLValue.getAddress().getType(), + OriginalBaseLValue.getAddress(CGF).getType(), OriginalBaseLValue.getAlignment(), Ptr); } BaseDecls.emplace_back( @@ -1381,12 +1381,12 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty, Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm); Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() { return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs()) - .getAddress(); + .getAddress(CGF); }); Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm); Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() { return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs()) - .getAddress(); + .getAddress(CGF); }); (void)Scope.Privatize(); if (!IsCombiner && Out->hasInit() && @@ -1496,7 +1496,7 @@ llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction( UpLoc, ThreadID, CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar), TaskTVar->getType()->castAs()) - .getPointer()}; + .getPointer(CGF)}; CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs); }; CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar, @@ -1707,9 +1707,10 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF, if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions || !CGF.getLangOpts().CXXExceptions || CGF.Builder.GetInsertBlock() == TopBlock || - !isa(LVal.getPointer()) || - cast(LVal.getPointer())->getParent() == TopBlock || - cast(LVal.getPointer())->getParent() == + !isa(LVal.getPointer(CGF)) || + cast(LVal.getPointer(CGF))->getParent() == + TopBlock || + cast(LVal.getPointer(CGF))->getParent() == CGF.Builder.GetInsertBlock()) { ThreadID = CGF.EmitLoadOfScalar(LVal, Loc); // If value loaded in entry block, cache it and use it everywhere in @@ -3119,7 +3120,7 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF, if (auto *OMPRegionInfo = dyn_cast_or_null(CGF.CapturedStmtInfo)) if (OMPRegionInfo->getThreadIDVariable()) - return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(); + return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF); llvm::Value *ThreadID = getThreadID(CGF, Loc); QualType Int32Ty = @@ -3395,7 +3396,8 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF, Address Elem = CGF.Builder.CreateConstArrayGEP(CopyprivateList, I); CGF.Builder.CreateStore( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy), + CGF.EmitLValue(CopyprivateVars[I]).getPointer(CGF), + CGF.VoidPtrTy), Elem); } // Build function that copies private values from single region to all other @@ -4540,7 +4542,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc, const auto *KmpTaskTQTyRD = cast(KmpTaskTQTy->getAsTagDecl()); auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId); LValue PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI); - llvm::Value *PartidParam = PartIdLVal.getPointer(); + llvm::Value *PartidParam = PartIdLVal.getPointer(CGF); auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds); LValue SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI); @@ -4553,7 +4555,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc, if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) { LValue PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI); PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - PrivatesLVal.getPointer(), CGF.VoidPtrTy); + PrivatesLVal.getPointer(CGF), CGF.VoidPtrTy); } else { PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); } @@ -4562,7 +4564,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc, TaskPrivatesMap, CGF.Builder .CreatePointerBitCastOrAddrSpaceCast( - TDBase.getAddress(), CGF.VoidPtrTy) + TDBase.getAddress(CGF), CGF.VoidPtrTy) .getPointer()}; SmallVector CallArgs(std::begin(CommonArgs), std::end(CommonArgs)); @@ -4640,7 +4642,7 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM, if (QualType::DestructionKind DtorKind = Field->getType().isDestructedType()) { LValue FieldLValue = CGF.EmitLValueForField(Base, Field); - CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType()); + CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType()); } } CGF.FinishFunction(); @@ -4738,8 +4740,8 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc, LValue RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType()); LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue( - RefLVal.getAddress(), RefLVal.getType()->castAs()); - CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal); + RefLVal.getAddress(CGF), RefLVal.getType()->castAs()); + CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal); ++Counter; } CGF.FinishFunction(); @@ -4804,7 +4806,8 @@ static void emitPrivatesInit(CodeGenFunction &CGF, } else { SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField); SharedRefLValue = CGF.MakeAddrLValue( - Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)), + Address(SharedRefLValue.getPointer(CGF), + C.getDeclAlign(OriginalVD)), SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl), SharedRefLValue.getTBAAInfo()); } @@ -4817,7 +4820,8 @@ static void emitPrivatesInit(CodeGenFunction &CGF, // Initialize firstprivate array using element-by-element // initialization. CGF.EmitOMPAggregateAssign( - PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type, + PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF), + Type, [&CGF, Elem, Init, &CapturesInfo](Address DestElement, Address SrcElement) { // Clean up any temporaries needed by the initialization. @@ -4835,8 +4839,8 @@ static void emitPrivatesInit(CodeGenFunction &CGF, } } else { CodeGenFunction::OMPPrivateScope InitScope(CGF); - InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address { - return SharedRefLValue.getAddress(); + InitScope.addPrivate(Elem, [SharedRefLValue, &CGF]() -> Address { + return SharedRefLValue.getAddress(CGF); }); (void)InitScope.Privatize(); CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo); @@ -5236,10 +5240,10 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, dyn_cast(E->IgnoreParenImpCasts())) { LValue UpAddrLVal = CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false); - llvm::Value *UpAddr = - CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1); + llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32( + UpAddrLVal.getPointer(CGF), /*Idx0=*/1); llvm::Value *LowIntPtr = - CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy); + CGF.Builder.CreatePtrToInt(Addr.getPointer(CGF), CGM.SizeTy); llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy); Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr); } else { @@ -5252,7 +5256,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, LValue BaseAddrLVal = CGF.EmitLValueForField( Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr)); CGF.EmitStoreOfScalar( - CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy), + CGF.Builder.CreatePtrToInt(Addr.getPointer(CGF), CGF.IntPtrTy), BaseAddrLVal); // deps[i].len = sizeof(); LValue LenLVal = CGF.EmitLValueForField( @@ -5406,21 +5410,24 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound)); const auto *LBVar = cast(cast(D.getLowerBoundVariable())->getDecl()); - CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(), + CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF), + LBLVal.getQuals(), /*IsInitializer=*/true); LValue UBLVal = CGF.EmitLValueForField( Result.TDBase, *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound)); const auto *UBVar = cast(cast(D.getUpperBoundVariable())->getDecl()); - CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(), + CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF), + UBLVal.getQuals(), /*IsInitializer=*/true); LValue StLVal = CGF.EmitLValueForField( Result.TDBase, *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride)); const auto *StVar = cast(cast(D.getStrideVariable())->getDecl()); - CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(), + CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF), + StLVal.getQuals(), /*IsInitializer=*/true); // Store reductions address. LValue RedLVal = CGF.EmitLValueForField( @@ -5429,7 +5436,7 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, if (Data.Reductions) { CGF.EmitStoreOfScalar(Data.Reductions, RedLVal); } else { - CGF.EmitNullInitialization(RedLVal.getAddress(), + CGF.EmitNullInitialization(RedLVal.getAddress(CGF), CGF.getContext().VoidPtrTy); } enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 }; @@ -5438,11 +5445,11 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, ThreadID, Result.NewTask, IfVal, - LBLVal.getPointer(), - UBLVal.getPointer(), + LBLVal.getPointer(CGF), + UBLVal.getPointer(CGF), CGF.EmitLoadOfScalar(StLVal, Loc), llvm::ConstantInt::getSigned( - CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler + CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler llvm::ConstantInt::getSigned( CGF.IntTy, Data.Schedule.getPointer() ? Data.Schedule.getInt() ? NumTasks : Grainsize @@ -5754,7 +5761,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc, Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); CGF.Builder.CreateStore( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy), + CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy), Elem); if ((*IPriv)->getType()->isVariablyModifiedType()) { // Store array size. @@ -6234,7 +6241,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit( LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD); RCG.emitSharedLValue(CGF, Cnt); llvm::Value *CastedShared = - CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer()); + CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF)); CGF.EmitStoreOfScalar(CastedShared, SharedLVal); RCG.emitAggregateType(CGF, Cnt); llvm::Value *SizeValInChars; @@ -6277,7 +6284,8 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit( llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true), FlagsLVal); } else - CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType()); + CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF), + FlagsLVal.getType()); } // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void // *data); @@ -6313,7 +6321,7 @@ void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF, generateUniqueName(CGM, "reduction", RCG.getRefExpr(N))); CGF.Builder.CreateStore( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy), + RCG.getSharedLValue(N).getPointer(CGF), CGM.VoidPtrTy), SharedAddr, /*IsVolatile=*/false); } } @@ -6324,12 +6332,12 @@ Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF, LValue SharedLVal) { // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void // *d); - llvm::Value *Args[] = { - CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy, - /*isSigned=*/true), - ReductionsPtr, - CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(), - CGM.VoidPtrTy)}; + llvm::Value *Args[] = {CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), + CGM.IntTy, + /*isSigned=*/true), + ReductionsPtr, + CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + SharedLVal.getPointer(CGF), CGM.VoidPtrTy)}; return Address( CGF.EmitRuntimeCall( createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args), @@ -7514,11 +7522,11 @@ class MappableExprsHandler { } else if ((AE && isa(AE->getBase()->IgnoreParenImpCasts())) || (OASE && isa(OASE->getBase()->IgnoreParenImpCasts()))) { - BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(); + BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF); } else { // The base is the reference to the variable. // BP = &Var. - BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(); + BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF); if (const auto *VD = dyn_cast_or_null(I->getAssociatedDeclaration())) { if (llvm::Optional Res = @@ -7612,8 +7620,8 @@ class MappableExprsHandler { isa(Next->getAssociatedExpression())) && "Unexpected expression"); - Address LB = - CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress(); + Address LB = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()) + .getAddress(CGF); // If this component is a pointer inside the base struct then we don't // need to create any entry for it - it will be combined with the object @@ -7660,7 +7668,7 @@ class MappableExprsHandler { if (MC.getAssociatedDeclaration()) { ComponentLB = CGF.EmitOMPSharedLValue(MC.getAssociatedExpression()) - .getAddress(); + .getAddress(CGF); Size = CGF.Builder.CreatePtrDiff( CGF.EmitCastToVoidPtr(ComponentLB.getPointer()), CGF.EmitCastToVoidPtr(LB.getPointer())); @@ -8064,7 +8072,7 @@ class MappableExprsHandler { auto CI = DeferredInfo.find(M.first); if (CI != DeferredInfo.end()) { for (const DeferredDevicePtrEntryTy &L : CI->second) { - llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer(); + llvm::Value *BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF); llvm::Value *Ptr = this->CGF.EmitLoadOfScalar( this->CGF.EmitLValue(L.IE), L.IE->getExprLoc()); CurBasePointers.emplace_back(BasePtr, L.VD); @@ -8186,9 +8194,10 @@ class MappableExprsHandler { LValue ThisLVal = CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture); LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture); - LambdaPointers.try_emplace(ThisLVal.getPointer(), VDLVal.getPointer()); - BasePointers.push_back(ThisLVal.getPointer()); - Pointers.push_back(ThisLValVal.getPointer()); + LambdaPointers.try_emplace(ThisLVal.getPointer(CGF), + VDLVal.getPointer(CGF)); + BasePointers.push_back(ThisLVal.getPointer(CGF)); + Pointers.push_back(ThisLValVal.getPointer(CGF)); Sizes.push_back( CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty, /*isSigned=*/true)); @@ -8206,17 +8215,19 @@ class MappableExprsHandler { LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second); if (LC.getCaptureKind() == LCK_ByRef) { LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second); - LambdaPointers.try_emplace(VarLVal.getPointer(), VDLVal.getPointer()); - BasePointers.push_back(VarLVal.getPointer()); - Pointers.push_back(VarLValVal.getPointer()); + LambdaPointers.try_emplace(VarLVal.getPointer(CGF), + VDLVal.getPointer(CGF)); + BasePointers.push_back(VarLVal.getPointer(CGF)); + Pointers.push_back(VarLValVal.getPointer(CGF)); Sizes.push_back(CGF.Builder.CreateIntCast( CGF.getTypeSize( VD->getType().getCanonicalType().getNonReferenceType()), CGF.Int64Ty, /*isSigned=*/true)); } else { RValue VarRVal = CGF.EmitLoadOfLValue(VarLVal, RD->getLocation()); - LambdaPointers.try_emplace(VarLVal.getPointer(), VDLVal.getPointer()); - BasePointers.push_back(VarLVal.getPointer()); + LambdaPointers.try_emplace(VarLVal.getPointer(CGF), + VDLVal.getPointer(CGF)); + BasePointers.push_back(VarLVal.getPointer(CGF)); Pointers.push_back(VarRVal.getScalarVal()); Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0)); } @@ -8522,7 +8533,7 @@ class MappableExprsHandler { CGF.CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(CGF, VD); // Copy the value of the original variable to the new global copy. CGF.Builder.CreateMemCpy( - CGF.MakeNaturalAlignAddrLValue(Addr, ElementType).getAddress(), + CGF.MakeNaturalAlignAddrLValue(Addr, ElementType).getAddress(CGF), Address(CV, CGF.getContext().getTypeAlignInChars(ElementType)), CurSizes.back(), /*IsVolatile=*/false); // Use new global variable as the base pointers. @@ -8932,7 +8943,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D, Scope.addPrivate(MapperVarDecl, [&MapperCGF, PtrCurrent, PtrTy]() { return MapperCGF .EmitLoadOfPointerLValue(PtrCurrent, PtrTy->castAs()) - .getAddress(); + .getAddress(MapperCGF); }); (void)Scope.Privatize(); diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp index abfba39e6be17..e5ec3deac2c94 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp @@ -2318,7 +2318,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF, VarTy = Rec.second.FD->getType(); } else { llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP( - VarAddr.getAddress().getPointer(), + VarAddr.getAddress(CGF).getPointer(), {Bld.getInt32(0), getNVPTXLaneID(CGF)}); VarTy = Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType(); @@ -2326,7 +2326,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF, Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy, AlignmentSource::Decl); } - Rec.second.PrivateAddr = VarAddr.getAddress(); + Rec.second.PrivateAddr = VarAddr.getAddress(CGF); if (!IsInTTDRegion && (WithSPMDCheck || getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) { @@ -2337,10 +2337,10 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF, "Secondary glob data must be one per team."); LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD); VarAddr.setAddress( - Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(), - VarAddr.getPointer()), + Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF), + VarAddr.getPointer(CGF)), VarAddr.getAlignment())); - Rec.second.PrivateAddr = VarAddr.getAddress(); + Rec.second.PrivateAddr = VarAddr.getAddress(CGF); } Address GlobalPtr = Rec.second.PrivateAddr; Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName()); @@ -2352,7 +2352,8 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF, if (EscapedParam) { const auto *VD = cast(Rec.first); CGF.EmitStoreOfScalar(ParValue, VarAddr); - I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress()); + I->getSecond().MappedParams->setVarAddr(CGF, VD, + VarAddr.getAddress(CGF)); } if (IsTTD) ++SecIt; @@ -2386,7 +2387,7 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF, CGM.getContext().getDeclAlign(VD), AlignmentSource::Decl); I->getSecond().MappedParams->setVarAddr(CGF, cast(VD), - Base.getAddress()); + Base.getAddress(CGF)); I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue); } I->getSecond().MappedParams->apply(CGF); @@ -3690,7 +3691,8 @@ static llvm::Value *emitListToGlobalCopyFunction( const FieldDecl *FD = VarFieldMap.lookup(VD); LValue GlobLVal = CGF.EmitLValueForField( CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); - llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs); + llvm::Value *BufferPtr = + Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment())); switch (CGF.getEvaluationKind(Private->getType())) { case TEK_Scalar: { @@ -3787,7 +3789,8 @@ static llvm::Value *emitListToGlobalReduceFunction( const FieldDecl *FD = VarFieldMap.lookup(VD); LValue GlobLVal = CGF.EmitLValueForField( CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); - llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs); + llvm::Value *BufferPtr = + Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); if ((*IPriv)->getType()->isVariablyModifiedType()) { @@ -3891,7 +3894,8 @@ static llvm::Value *emitGlobalToListCopyFunction( const FieldDecl *FD = VarFieldMap.lookup(VD); LValue GlobLVal = CGF.EmitLValueForField( CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); - llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs); + llvm::Value *BufferPtr = + Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment())); switch (CGF.getEvaluationKind(Private->getType())) { case TEK_Scalar: { @@ -3987,7 +3991,8 @@ static llvm::Value *emitGlobalToListReduceFunction( const FieldDecl *FD = VarFieldMap.lookup(VD); LValue GlobLVal = CGF.EmitLValueForField( CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); - llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs); + llvm::Value *BufferPtr = + Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); if ((*IPriv)->getType()->isVariablyModifiedType()) { @@ -4310,7 +4315,7 @@ void CGOpenMPRuntimeNVPTX::emitReduction( Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); CGF.Builder.CreateStore( CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( - CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy), + CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy), Elem); if ((*IPriv)->getType()->isVariablyModifiedType()) { // Store array size. @@ -4892,7 +4897,7 @@ void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas( if (VD->getType().getCanonicalType()->isReferenceType()) VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType().getCanonicalType()) - .getAddress(); + .getAddress(CGF); CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal); } } diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index 46fa29fa69bd8..4d7864471bb9f 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -1834,15 +1834,15 @@ CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, Ty = llvm::IntegerType::get(getLLVMContext(), Size); Ty = llvm::PointerType::getUnqual(Ty); - Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(), - Ty)); + Arg = Builder.CreateLoad( + Builder.CreateBitCast(InputValue.getAddress(*this), Ty)); } else { - Arg = InputValue.getPointer(); + Arg = InputValue.getPointer(*this); ConstraintStr += '*'; } } } else { - Arg = InputValue.getPointer(); + Arg = InputValue.getPointer(*this); ConstraintStr += '*'; } @@ -2091,8 +2091,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, VT->getPrimitiveSizeInBits().getFixedSize()); } else { - ArgTypes.push_back(Dest.getAddress().getType()); - Args.push_back(Dest.getPointer()); + ArgTypes.push_back(Dest.getAddress(*this).getType()); + Args.push_back(Dest.getPointer(*this)); Constraints += "=*"; Constraints += OutputConstraint; ReadOnly = ReadNone = false; @@ -2334,7 +2334,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // ResultTypeRequiresCast.size() elements of RegResults. if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]); - Address A = Builder.CreateBitCast(Dest.getAddress(), + Address A = Builder.CreateBitCast(Dest.getAddress(*this), ResultRegTypes[i]->getPointerTo()); QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false); if (Ty.isNull()) { @@ -2387,14 +2387,14 @@ CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { delete CGF.CapturedStmtInfo; // Emit call to the helper function. - EmitCallOrInvoke(F, CapStruct.getPointer()); + EmitCallOrInvoke(F, CapStruct.getPointer(*this)); return F; } Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { LValue CapStruct = InitCapturedStruct(S); - return CapStruct.getAddress(); + return CapStruct.getAddress(*this); } /// Creates the outlined function for a CapturedStmt. diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp index e2c055f549e02..2195c4443eb83 100644 --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -77,7 +77,7 @@ class OMPLexicalScope : public CodeGenFunction::LexicalScope { InlinedShareds.isGlobalVarCaptured(VD)), VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { - return CGF.EmitLValue(&DRE).getAddress(); + return CGF.EmitLValue(&DRE).getAddress(CGF); }); } } @@ -232,7 +232,7 @@ class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { VD->getType().getNonReferenceType(), VK_LValue, C.getLocation()); InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address { - return CGF.EmitLValue(&DRE).getAddress(); + return CGF.EmitLValue(&DRE).getAddress(CGF); }); } } @@ -325,7 +325,7 @@ void CodeGenFunction::GenerateOpenMPCapturedVars( CapturedVars.push_back(CV); } else { assert(CurCap->capturesVariable() && "Expected capture by reference."); - CapturedVars.push_back(EmitLValue(*I).getAddress().getPointer()); + CapturedVars.push_back(EmitLValue(*I).getAddress(*this).getPointer()); } } } @@ -336,11 +336,11 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc, ASTContext &Ctx = CGF.getContext(); llvm::Value *CastedPtr = CGF.EmitScalarConversion( - AddrLV.getAddress().getPointer(), Ctx.getUIntPtrType(), + AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(), Ctx.getPointerType(DstType), Loc); Address TmpAddr = CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType)) - .getAddress(); + .getAddress(CGF); return TmpAddr; } @@ -519,7 +519,7 @@ static llvm::Function *emitOutlinedFunctionPrologue( } else if (I->capturesVariable()) { const VarDecl *Var = I->getCapturedVar(); QualType VarTy = Var->getType(); - Address ArgAddr = ArgLVal.getAddress(); + Address ArgAddr = ArgLVal.getAddress(CGF); if (ArgLVal.getType()->isLValueReferenceType()) { ArgAddr = CGF.EmitLoadOfReference(ArgLVal); } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) { @@ -541,12 +541,12 @@ static llvm::Function *emitOutlinedFunctionPrologue( ? castValueFromUintptr( CGF, I->getLocation(), FD->getType(), Args[Cnt]->getName(), ArgLVal) - : ArgLVal.getAddress()}}); + : ArgLVal.getAddress(CGF)}}); } else { // If 'this' is captured, load it into CXXThisValue. assert(I->capturesThis()); CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation()); - LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}}); + LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}}); } ++Cnt; ++I; @@ -830,8 +830,8 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, EmitAggregateAssign(Dest, OriginalLVal, Type); } else { EmitOMPAggregateAssign( - Emission.getAllocatedAddress(), OriginalLVal.getAddress(), - Type, + Emission.getAllocatedAddress(), + OriginalLVal.getAddress(*this), Type, [this, VDInit, Init](Address DestElement, Address SrcElement) { // Clean up any temporaries needed by the @@ -849,7 +849,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, return Emission.getAllocatedAddress(); }); } else { - Address OriginalAddr = OriginalLVal.getAddress(); + Address OriginalAddr = OriginalLVal.getAddress(*this); IsRegistered = PrivateScope.addPrivate( OrigVD, [this, VDInit, OriginalAddr, VD]() { // Emit private VarDecl with copy init. @@ -926,7 +926,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { "Copyin threadprivates should have been captured!"); DeclRefExpr DRE(getContext(), const_cast(VD), true, (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); - MasterAddr = EmitLValue(&DRE).getAddress(); + MasterAddr = EmitLValue(&DRE).getAddress(*this); LocalDeclMap.erase(VD); } else { MasterAddr = @@ -935,7 +935,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { getContext().getDeclAlign(VD)); } // Get the address of the threadprivate variable. - Address PrivateAddr = EmitLValue(*IRef).getAddress(); + Address PrivateAddr = EmitLValue(*IRef).getAddress(*this); if (CopiedVars.size() == 1) { // At first check if current thread is a master thread. If it is, no // need to copy data. @@ -1003,7 +1003,7 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit( /*RefersToEnclosingVariableOrCapture=*/ CapturedStmtInfo->lookup(OrigVD) != nullptr, (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); - return EmitLValue(&DRE).getAddress(); + return EmitLValue(&DRE).getAddress(*this); }); // Check if the variable is also a firstprivate: in this case IInit is // not generated. Initialization of this variable will happen in codegen @@ -1160,8 +1160,8 @@ void CodeGenFunction::EmitOMPReductionClauseInit( if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) { // Store the address of the original variable associated with the LHS // implicit variable. - PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() { - return RedCG.getSharedLValue(Count).getAddress(); + PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { + return RedCG.getSharedLValue(Count).getAddress(*this); }); PrivateScope.addPrivate( RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); }); @@ -1169,8 +1169,8 @@ void CodeGenFunction::EmitOMPReductionClauseInit( isa(IRef)) { // Store the address of the original variable associated with the LHS // implicit variable. - PrivateScope.addPrivate(LHSVD, [&RedCG, Count]() { - return RedCG.getSharedLValue(Count).getAddress(); + PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() { + return RedCG.getSharedLValue(Count).getAddress(*this); }); PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() { return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD), @@ -1180,7 +1180,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit( } else { QualType Type = PrivateVD->getType(); bool IsArray = getContext().getAsArrayType(Type) != nullptr; - Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(); + Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this); // Store the address of the original variable associated with the LHS // implicit variable. if (IsArray) { @@ -1529,7 +1529,7 @@ void CodeGenFunction::EmitOMPLinearClauseFinal( DeclRefExpr DRE(getContext(), const_cast(OrigVD), CapturedStmtInfo->lookup(OrigVD) != nullptr, (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); - Address OrigAddr = EmitLValue(&DRE).getAddress(); + Address OrigAddr = EmitLValue(&DRE).getAddress(*this); CodeGenFunction::OMPPrivateScope VarScope(*this); VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); (void)VarScope.Privatize(); @@ -1599,7 +1599,7 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters( DeclRefExpr DRE(getContext(), const_cast(VD), LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD), E->getType(), VK_LValue, E->getExprLoc()); - return EmitLValue(&DRE).getAddress(); + return EmitLValue(&DRE).getAddress(*this); }); } else { (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() { @@ -1762,12 +1762,13 @@ void CodeGenFunction::EmitOMPSimdFinal( } Address OrigAddr = Address::invalid(); if (CED) { - OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(); + OrigAddr = + EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this); } else { DeclRefExpr DRE(getContext(), const_cast(PrivateVD), /*RefersToEnclosingVariableOrCapture=*/false, (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc()); - OrigAddr = EmitLValue(&DRE).getAddress(); + OrigAddr = EmitLValue(&DRE).getAddress(*this); } OMPPrivateScope VarScope(*this); VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; }); @@ -2277,14 +2278,16 @@ static void emitDistributeParallelForDistributeInnerBoundParams( const auto &Dir = cast(S); LValue LB = CGF.EmitLValue(cast(Dir.getCombinedLowerBoundVariable())); - llvm::Value *LBCast = CGF.Builder.CreateIntCast( - CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false); + llvm::Value *LBCast = + CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)), + CGF.SizeTy, /*isSigned=*/false); CapturedVars.push_back(LBCast); LValue UB = CGF.EmitLValue(cast(Dir.getCombinedUpperBoundVariable())); - llvm::Value *UBCast = CGF.Builder.CreateIntCast( - CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false); + llvm::Value *UBCast = + CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)), + CGF.SizeTy, /*isSigned=*/false); CapturedVars.push_back(UBCast); } @@ -2521,8 +2524,8 @@ bool CodeGenFunction::EmitOMPWorksharingLoop( // one chunk is distributed to each thread. Note that the size of // the chunks is unspecified in this case. CGOpenMPRuntime::StaticRTInput StaticInit( - IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(), - UB.getAddress(), ST.getAddress(), + IVSize, IVSigned, Ordered, IL.getAddress(CGF), + LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF), StaticChunkedOne ? Chunk : nullptr); CGF.CGM.getOpenMPRuntime().emitForStaticInit( CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, @@ -2571,9 +2574,9 @@ bool CodeGenFunction::EmitOMPWorksharingLoop( ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic; // Emit the outer loop, which requests its work chunk [LB..UB] from // runtime and runs the inner loop to process it. - const OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(), - ST.getAddress(), IL.getAddress(), - Chunk, EUB); + const OMPLoopArguments LoopArguments( + LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), + IL.getAddress(*this), Chunk, EUB); EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered, LoopArguments, CGDispatchBounds); } @@ -2777,8 +2780,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) { OpenMPScheduleTy ScheduleKind; ScheduleKind.Schedule = OMPC_SCHEDULE_static; CGOpenMPRuntime::StaticRTInput StaticInit( - /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), - LB.getAddress(), UB.getAddress(), ST.getAddress()); + /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF), + LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF)); CGF.CGM.getOpenMPRuntime().emitForStaticInit( CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit); // UB = min(UB, GlobalUB); @@ -3112,7 +3115,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective( Pair.second->getType(), VK_LValue, Pair.second->getExprLoc()); Scope.addPrivate(Pair.first, [&CGF, &DRE]() { - return CGF.EmitLValue(&DRE).getAddress(); + return CGF.EmitLValue(&DRE).getAddress(CGF); }); } for (const auto &Pair : PrivatePtrs) { @@ -3570,8 +3573,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, if (isOpenMPSimdDirective(S.getDirectiveKind())) EmitOMPSimdInit(S, /*IsMonotonic=*/true); CGOpenMPRuntime::StaticRTInput StaticInit( - IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(), - LB.getAddress(), UB.getAddress(), ST.getAddress(), + IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this), + LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), StaticChunked ? Chunk : nullptr); RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit); @@ -3637,8 +3640,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S, // Emit the outer loop, which requests its work chunk [LB..UB] from // runtime and runs the inner loop to process it. const OMPLoopArguments LoopArguments = { - LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(), - Chunk}; + LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this), + IL.getAddress(*this), Chunk}; EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments, CodeGenLoop); } @@ -3838,11 +3841,11 @@ static std::pair emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, // expression is simple and atomic is allowed for the given type for the // target platform. if (BO == BO_Comma || !Update.isScalar() || - !Update.getScalarVal()->getType()->isIntegerTy() || - !X.isSimple() || (!isa(Update.getScalarVal()) && - (Update.getScalarVal()->getType() != - X.getAddress().getElementType())) || - !X.getAddress().getElementType()->isIntegerTy() || + !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() || + (!isa(Update.getScalarVal()) && + (Update.getScalarVal()->getType() != + X.getAddress(CGF).getElementType())) || + !X.getAddress(CGF).getElementType()->isIntegerTy() || !Context.getTargetInfo().hasBuiltinAtomic( Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) return std::make_pair(false, RValue::get(nullptr)); @@ -3914,11 +3917,11 @@ static std::pair emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, llvm::Value *UpdateVal = Update.getScalarVal(); if (auto *IC = dyn_cast(UpdateVal)) { UpdateVal = CGF.Builder.CreateIntCast( - IC, X.getAddress().getElementType(), + IC, X.getAddress(CGF).getElementType(), X.getType()->hasSignedIntegerRepresentation()); } llvm::Value *Res = - CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(), UpdateVal, AO); + CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO); return std::make_pair(true, RValue::get(Res)); } @@ -5292,7 +5295,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective( if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) { LValue GlobLVal = CGF.EmitLValue(E); LoopGlobals.addPrivate( - VD, [&GlobLVal]() { return GlobLVal.getAddress(); }); + VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); }); } if (isa(VD)) { // Emit only those that were not explicitly referenced in clauses. diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h index 71f95abe488a9..9fd07bdb187d4 100644 --- a/clang/lib/CodeGen/CGValue.h +++ b/clang/lib/CodeGen/CGValue.h @@ -29,6 +29,7 @@ namespace llvm { namespace clang { namespace CodeGen { class AggValueSlot; + class CodeGenFunction; struct CGBitFieldInfo; /// RValue - This trivial value class is used to represent the result of an @@ -319,11 +320,13 @@ class LValue { void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; } // simple lvalue - llvm::Value *getPointer() const { + llvm::Value *getPointer(CodeGenFunction &CGF) const { assert(isSimple()); return V; } - Address getAddress() const { return Address(getPointer(), getAlignment()); } + Address getAddress(CodeGenFunction &CGF) const { + return Address(getPointer(CGF), getAlignment()); + } void setAddress(Address address) { assert(isSimple()); V = address.getPointer(); @@ -427,8 +430,8 @@ class LValue { return R; } - RValue asAggregateRValue() const { - return RValue::getAggregate(getAddress(), isVolatileQualified()); + RValue asAggregateRValue(CodeGenFunction &CGF) const { + return RValue::getAggregate(getAddress(CGF), isVolatileQualified()); } }; @@ -536,14 +539,12 @@ class AggValueSlot { return AV; } - static AggValueSlot forLValue(const LValue &LV, - IsDestructed_t isDestructed, - NeedsGCBarriers_t needsGC, - IsAliased_t isAliased, - Overlap_t mayOverlap, - IsZeroed_t isZeroed = IsNotZeroed, - IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) { - return forAddr(LV.getAddress(), LV.getQuals(), isDestructed, needsGC, + static AggValueSlot + forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed, + NeedsGCBarriers_t needsGC, IsAliased_t isAliased, + Overlap_t mayOverlap, IsZeroed_t isZeroed = IsNotZeroed, + IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) { + return forAddr(LV.getAddress(CGF), LV.getQuals(), isDestructed, needsGC, isAliased, mayOverlap, isZeroed, isChecked); } diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 68b599e88bc37..7f3be896a7b92 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -976,7 +976,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); if (!LambdaThisCaptureField->getType()->isPointerType()) { // If the enclosing object was captured by value, just use its address. - CXXThisValue = ThisFieldLValue.getAddress().getPointer(); + CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer(); } else { // Load the lvalue pointed to by the field, since '*this' was captured // by reference. @@ -2013,11 +2013,11 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) { Address CodeGenFunction::EmitVAListRef(const Expr* E) { if (getContext().getBuiltinVaListType()->isArrayType()) return EmitPointerWithAlignment(E); - return EmitLValue(E).getAddress(); + return EmitLValue(E).getAddress(*this); } Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { - return EmitLValue(E).getAddress(); + return EmitLValue(E).getAddress(*this); } void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp index ec848a312ae01..97bea0150e7f7 100644 --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -1225,7 +1225,7 @@ void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( ResultTruncRegTypes.push_back(CoerceTy); // Coerce the integer by bitcasting the return slot pointer. - ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), + ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF), CoerceTy->getPointerTo())); ResultRegDests.push_back(ReturnSlot);