diff --git a/src/coreclr/ToolBox/superpmi/mcs/removedup.cpp b/src/coreclr/ToolBox/superpmi/mcs/removedup.cpp index fb00c860860ad2..98cd16fc027774 100644 --- a/src/coreclr/ToolBox/superpmi/mcs/removedup.cpp +++ b/src/coreclr/ToolBox/superpmi/mcs/removedup.cpp @@ -61,7 +61,8 @@ bool RemoveDup::unique(MethodContext* mc) CORINFO_METHOD_INFO newInfo; unsigned newFlags = 0; - mc->repCompileMethod(&newInfo, &newFlags); + CORINFO_OS newOs = CORINFO_WINNT; + mc->repCompileMethod(&newInfo, &newFlags, &newOs); // Assume that there are lots of duplicates, so don't allocate a new buffer for the MD5 hash data // until we know we're going to add it to the map. @@ -95,7 +96,8 @@ bool RemoveDup::uniqueLegacy(MethodContext* mc) CORINFO_METHOD_INFO newInfo; unsigned newFlags = 0; - mc->repCompileMethod(&newInfo, &newFlags); + CORINFO_OS newOs = CORINFO_WINNT; + mc->repCompileMethod(&newInfo, &newFlags, &newOs); if (m_inFileLegacy->GetIndex(newInfo.ILCodeSize) == -1) m_inFileLegacy->Add(newInfo.ILCodeSize, new DenseLightWeightMap()); diff --git a/src/coreclr/ToolBox/superpmi/mcs/verbdumpmap.cpp b/src/coreclr/ToolBox/superpmi/mcs/verbdumpmap.cpp index a86b85be14ff43..c612b13f80de6e 100644 --- a/src/coreclr/ToolBox/superpmi/mcs/verbdumpmap.cpp +++ b/src/coreclr/ToolBox/superpmi/mcs/verbdumpmap.cpp @@ -17,15 +17,17 @@ void DumpMapHeader() // printf("process name,"); printf("method name,"); printf("full signature,"); - printf("jit flags\n"); + printf("jit flags,"); + printf("os\n"); } void DumpMap(int index, MethodContext* mc) { CORINFO_METHOD_INFO cmi; unsigned int flags = 0; + CORINFO_OS os; - mc->repCompileMethod(&cmi, &flags); + mc->repCompileMethod(&cmi, &flags, &os); const char* moduleName = nullptr; const char* methodName = mc->repGetMethodName(cmi.ftn, &moduleName); @@ -121,7 +123,7 @@ void DumpMap(int index, MethodContext* mc) } } - printf(", %s\n", SpmiDumpHelper::DumpJitFlags(rawFlags).c_str()); + printf(", %s, %d\n", SpmiDumpHelper::DumpJitFlags(rawFlags).c_str(), (int)os); } int verbDumpMap::DoWork(const char* nameOfInput) diff --git a/src/coreclr/ToolBox/superpmi/mcs/verbildump.cpp b/src/coreclr/ToolBox/superpmi/mcs/verbildump.cpp index fef9292d99dcf1..506d14e587f992 100644 --- a/src/coreclr/ToolBox/superpmi/mcs/verbildump.cpp +++ b/src/coreclr/ToolBox/superpmi/mcs/verbildump.cpp @@ -927,8 +927,9 @@ void DumpIL(MethodContext* mc) { CORINFO_METHOD_INFO cmi; unsigned int flags = 0; + CORINFO_OS os = CORINFO_WINNT; - mc->repCompileMethod(&cmi, &flags); + mc->repCompileMethod(&cmi, &flags, &os); const char* moduleName = nullptr; const char* methodName = mc->repGetMethodName(cmi.ftn, &moduleName); diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/agnostic.h b/src/coreclr/ToolBox/superpmi/superpmi-shared/agnostic.h index e51eb3ef6d049b..fd2a35093c7dec 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shared/agnostic.h +++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/agnostic.h @@ -48,6 +48,7 @@ struct Agnostic_CompileMethod { Agnostic_CORINFO_METHOD_INFO info; DWORD flags; + DWORD os; }; struct Agnostic_InitClass diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/asmdumper.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shared/asmdumper.cpp index 1a88589e7e55ff..0e22c494559101 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shared/asmdumper.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/asmdumper.cpp @@ -8,7 +8,8 @@ void ASMDumper::DumpToFile(HANDLE hFile, MethodContext* mc, CompileResult* cr) { CORINFO_METHOD_INFO info; unsigned flags = 0; - mc->repCompileMethod(&info, &flags); + CORINFO_OS os = CORINFO_WINNT; + mc->repCompileMethod(&info, &flags, &os); #define bufflen 4096 DWORD bytesWritten; diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/icorjitcompilerimpl.h b/src/coreclr/ToolBox/superpmi/superpmi-shared/icorjitcompilerimpl.h index 6b7ed4fd8e73d3..49cfb3b7a1482f 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shared/icorjitcompilerimpl.h +++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/icorjitcompilerimpl.h @@ -24,7 +24,7 @@ // * In the 32 bit jit this is implemented by code:CILJit.compileMethod // * For the 64 bit jit this is implemented by code:PreJit.compileMethod // -// Note: Obfuscators that are hacking the JIT depend on this method having __stdcall calling convention +// Note: setTargetOS must be called before this api is used. CorJitResult compileMethod(ICorJitInfo* comp, /* IN */ struct CORINFO_METHOD_INFO* info, /* IN */ unsigned /* code:CorJitFlag */ flags, /* IN */ @@ -47,4 +47,10 @@ void getVersionIdentifier(GUID* versionIdentifier /* OUT */ // intrinsics, so the EE should use the default size (i.e. the size of the IL implementation). unsigned getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags); /* { return 0; } */ +// Some JIT's may support multiple OSs. This api provides a means to specify to the JIT what OS it should +// be trying to compile. This api does not produce any errors, any errors are to be generated by the +// the compileMethod call, which will call back into the VM to ensure bits are correctly setup. +// +// Note: this api MUST be called before the compileMethod is called for the first time in the process. +void setTargetOS(CORINFO_OS os); #endif diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.cpp index 7d4a191c51cf62..999ad17a870546 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.cpp @@ -384,11 +384,13 @@ bool MethodContext::Equal(MethodContext* other) // Compare MethodInfo's first. CORINFO_METHOD_INFO otherInfo; unsigned otherFlags = 0; - other->repCompileMethod(&otherInfo, &otherFlags); + CORINFO_OS otherOs = CORINFO_WINNT; + other->repCompileMethod(&otherInfo, &otherFlags, &otherOs); CORINFO_METHOD_INFO ourInfo; unsigned ourFlags = 0; - repCompileMethod(&ourInfo, &ourFlags); + CORINFO_OS ourOs = CORINFO_WINNT; + repCompileMethod(&ourInfo, &ourFlags, &ourOs); if (otherInfo.ILCodeSize != ourInfo.ILCodeSize) return false; @@ -419,6 +421,8 @@ bool MethodContext::Equal(MethodContext* other) return false; if (otherFlags != ourFlags) return false; + if (otherOs != ourOs) + return false; // Now compare the other maps to "estimate" equality. @@ -643,7 +647,7 @@ unsigned int toCorInfoSize(CorInfoType cit) return -1; } -void MethodContext::recCompileMethod(CORINFO_METHOD_INFO* info, unsigned flags) +void MethodContext::recCompileMethod(CORINFO_METHOD_INFO* info, unsigned flags, CORINFO_OS os) { if (CompileMethod == nullptr) CompileMethod = new LightWeightMap(); @@ -662,6 +666,8 @@ void MethodContext::recCompileMethod(CORINFO_METHOD_INFO* info, unsigned flags) value.info.args = SpmiRecordsHelper::StoreAgnostic_CORINFO_SIG_INFO(info->args, CompileMethod, SigInstHandleMap); value.info.locals = SpmiRecordsHelper::StoreAgnostic_CORINFO_SIG_INFO(info->locals, CompileMethod, SigInstHandleMap); + value.os = (DWORD)os; + value.flags = (DWORD)flags; CompileMethod->Add(0, value); @@ -669,14 +675,14 @@ void MethodContext::recCompileMethod(CORINFO_METHOD_INFO* info, unsigned flags) } void MethodContext::dmpCompileMethod(DWORD key, const Agnostic_CompileMethod& value) { - printf("CompileMethod key %u, value ftn-%016llX scp-%016llX ilo-%u ils-%u ms-%u ehc-%u opt-%u rk-%u args-%s locals-%s flg-%08X", + printf("CompileMethod key %u, value ftn-%016llX scp-%016llX ilo-%u ils-%u ms-%u ehc-%u opt-%u rk-%u args-%s locals-%s flg-%08X os-%u", key, value.info.ftn, value.info.scope, value.info.ILCode_offset, value.info.ILCodeSize, value.info.maxStack, value.info.EHcount, value.info.options, value.info.regionKind, SpmiDumpHelper::DumpAgnostic_CORINFO_SIG_INFO(value.info.args, CompileMethod, SigInstHandleMap).c_str(), SpmiDumpHelper::DumpAgnostic_CORINFO_SIG_INFO(value.info.locals, CompileMethod, SigInstHandleMap).c_str(), - value.flags); + value.flags, value.os); } -void MethodContext::repCompileMethod(CORINFO_METHOD_INFO* info, unsigned* flags) +void MethodContext::repCompileMethod(CORINFO_METHOD_INFO* info, unsigned* flags, CORINFO_OS* os) { AssertMapAndKeyExistNoMessage(CompileMethod, 0); @@ -699,6 +705,7 @@ void MethodContext::repCompileMethod(CORINFO_METHOD_INFO* info, unsigned* flags) info->locals = SpmiRecordsHelper::Restore_CORINFO_SIG_INFO(value.info.locals, CompileMethod, SigInstHandleMap); *flags = (unsigned)value.flags; + *os = (CORINFO_OS)value.os; } void MethodContext::recGetMethodClass(CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle) @@ -4188,7 +4195,9 @@ void MethodContext::repGetEEInfo(CORINFO_EE_INFO* pEEInfoOut) pEEInfoOut->osPageSize = (size_t)0x1000; pEEInfoOut->maxUncheckedOffsetForNullObject = (size_t)((32 * 1024) - 1); pEEInfoOut->targetAbi = CORINFO_DESKTOP_ABI; -#ifdef TARGET_UNIX +#ifdef TARGET_OSX + pEEInfoOut->osType = CORINFO_MACOS; +#elif defined(TARGET_UNIX) pEEInfoOut->osType = CORINFO_UNIX; #else pEEInfoOut->osType = CORINFO_WINNT; @@ -6848,7 +6857,8 @@ int MethodContext::dumpMethodIdentityInfoToBuffer(char* buff, int len, bool igno } else { - repCompileMethod(&info, &flags); + CORINFO_OS os; + repCompileMethod(&info, &flags, &os); pInfo = &info; } @@ -6982,7 +6992,8 @@ bool MethodContext::hasPgoData(bool& hasEdgeProfile, bool& hasClassProfile, bool // Obtain the Method Info structure for this method CORINFO_METHOD_INFO info; unsigned flags = 0; - repCompileMethod(&info, &flags); + CORINFO_OS os; + repCompileMethod(&info, &flags, &os); if ((GetPgoInstrumentationResults != nullptr) && (GetPgoInstrumentationResults->GetIndex(CastHandle(info.ftn)) != -1)) diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.h b/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.h index 4abbef3faf135d..f209a4b5d0ad71 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.h +++ b/src/coreclr/ToolBox/superpmi/superpmi-shared/methodcontext.h @@ -109,9 +109,9 @@ class MethodContext void dmpEnvironment(DWORD key, const Agnostic_Environment& value); - void recCompileMethod(CORINFO_METHOD_INFO* info, unsigned flags); + void recCompileMethod(CORINFO_METHOD_INFO* info, unsigned flags, CORINFO_OS os); void dmpCompileMethod(DWORD key, const Agnostic_CompileMethod& value); - void repCompileMethod(CORINFO_METHOD_INFO* info, unsigned* flags); + void repCompileMethod(CORINFO_METHOD_INFO* info, unsigned* flags, CORINFO_OS* os); void recGetMethodClass(CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle); void dmpGetMethodClass(DWORDLONG key, DWORDLONG value); diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitcompiler.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitcompiler.cpp index da110199d8621a..0b0a22476c3bde 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitcompiler.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitcompiler.cpp @@ -10,6 +10,12 @@ #define fatMC // this is nice to have on so ildump works... +void interceptor_ICJC::setTargetOS(CORINFO_OS os) +{ + currentOs = os; + original_ICorJitCompiler->setTargetOS(os); +} + CorJitResult interceptor_ICJC::compileMethod(ICorJitInfo* comp, /* IN */ struct CORINFO_METHOD_INFO* info, /* IN */ unsigned /* code:CorJitFlag */ flags, /* IN */ @@ -24,7 +30,7 @@ CorJitResult interceptor_ICJC::compileMethod(ICorJitInfo* comp, our_ICorJitInfo.mc = mc; our_ICorJitInfo.mc->cr->recProcessName(GetCommandLineA()); - our_ICorJitInfo.mc->recCompileMethod(info, flags); + our_ICorJitInfo.mc->recCompileMethod(info, flags, currentOs); // force some extra data into our tables.. // data probably not needed with RyuJIT, but needed in 4.5 and 4.5.1 to help with catching cached values diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitcompiler.h b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitcompiler.h index 687f75a2f2d31a..3a711bd0765951 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitcompiler.h +++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/icorjitcompiler.h @@ -15,6 +15,7 @@ class interceptor_ICJC : public ICorJitCompiler // Added to help us track the original icjc and be able to easily indirect to it. ICorJitCompiler* original_ICorJitCompiler; HANDLE hFile; + CORINFO_OS currentOs; }; #endif diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp index 9cd22007107534..70bbb071229769 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp @@ -193,6 +193,16 @@ extern "C" DLLEXPORT ICorJitCompiler* getJit() pJitInstance = new interceptor_ICJC(); pJitInstance->original_ICorJitCompiler = tICJI; +#ifdef TARGET_WINDOWS + pJitInstance->currentOs = CORINFO_WINNT; +#elif defined(TARGET_OSX) + pJitInstance->currentOs = CORINFO_MACOS; +#elif defined(TARGET_UNIX) + pJitInstance->currentOs = CORINFO_UNIX; +#else +#error No target os defined +#endif + // create our datafile pJitInstance->hFile = CreateFileW(g_dataFileName, GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN, NULL); diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-counter/icorjitcompiler.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shim-counter/icorjitcompiler.cpp index 36f262611d9d63..1f1f1d210ef6e1 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shim-counter/icorjitcompiler.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-counter/icorjitcompiler.cpp @@ -5,6 +5,11 @@ #include "icorjitcompiler.h" #include "icorjitinfo.h" +void interceptor_ICJC::setTargetOS(CORINFO_OS os) +{ + original_ICorJitCompiler->setTargetOS(os); +} + CorJitResult interceptor_ICJC::compileMethod(ICorJitInfo* comp, /* IN */ struct CORINFO_METHOD_INFO* info, /* IN */ unsigned /* code:CorJitFlag */ flags, /* IN */ diff --git a/src/coreclr/ToolBox/superpmi/superpmi-shim-simple/icorjitcompiler.cpp b/src/coreclr/ToolBox/superpmi/superpmi-shim-simple/icorjitcompiler.cpp index c634843e442896..53442ced042e17 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi-shim-simple/icorjitcompiler.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi-shim-simple/icorjitcompiler.cpp @@ -5,6 +5,11 @@ #include "icorjitcompiler.h" #include "icorjitinfo.h" +void interceptor_ICJC::setTargetOS(CORINFO_OS os) +{ + original_ICorJitCompiler->setTargetOS(os); +} + CorJitResult interceptor_ICJC::compileMethod(ICorJitInfo* comp, /* IN */ struct CORINFO_METHOD_INFO* info, /* IN */ unsigned /* code:CorJitFlag */ flags, /* IN */ diff --git a/src/coreclr/ToolBox/superpmi/superpmi/jitinstance.cpp b/src/coreclr/ToolBox/superpmi/superpmi/jitinstance.cpp index c1264b6c6cf492..fafc367669d47c 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi/jitinstance.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi/jitinstance.cpp @@ -303,14 +303,16 @@ JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, i PAL_TRY(Param*, pParam, ¶m) { - uint8_t* NEntryBlock = nullptr; - uint32_t NCodeSizeBlock = 0; + uint8_t* NEntryBlock = nullptr; + uint32_t NCodeSizeBlock = 0; + CORINFO_OS os = CORINFO_WINNT; - pParam->pThis->mc->repCompileMethod(&pParam->info, &pParam->flags); + pParam->pThis->mc->repCompileMethod(&pParam->info, &pParam->flags, &os); if (pParam->collectThroughput) { pParam->pThis->lt.Start(); } + pParam->pThis->pJitInstance->setTargetOS(os); CorJitResult jitResult = pParam->pThis->pJitInstance->compileMethod(pParam->pThis->icji, &pParam->info, pParam->flags, &NEntryBlock, &NCodeSizeBlock); if (pParam->collectThroughput) diff --git a/src/coreclr/ToolBox/superpmi/superpmi/methodstatsemitter.cpp b/src/coreclr/ToolBox/superpmi/superpmi/methodstatsemitter.cpp index 51bf5b1fddb8c1..51ecfab64fb464 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi/methodstatsemitter.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi/methodstatsemitter.cpp @@ -60,7 +60,8 @@ void MethodStatsEmitter::Emit(int methodNumber, MethodContext* mc, ULONGLONG fir // Obtain the IL code size for this method CORINFO_METHOD_INFO info; unsigned flags = 0; - mc->repCompileMethod(&info, &flags); + CORINFO_OS os = CORINFO_WINNT; + mc->repCompileMethod(&info, &flags, &os); charCount += sprintf_s(rowData + charCount, _countof(rowData) - charCount, "%d,", info.ILCodeSize); } diff --git a/src/coreclr/ToolBox/superpmi/superpmi/neardiffer.cpp b/src/coreclr/ToolBox/superpmi/superpmi/neardiffer.cpp index cc11b287419d81..b9921e189de285 100644 --- a/src/coreclr/ToolBox/superpmi/superpmi/neardiffer.cpp +++ b/src/coreclr/ToolBox/superpmi/superpmi/neardiffer.cpp @@ -930,7 +930,8 @@ bool NearDiffer::compareVars(MethodContext* mc, CompileResult* cr1, CompileResul CORINFO_METHOD_INFO info; unsigned flags = 0; - mc->repCompileMethod(&info, &flags); + CORINFO_OS os = CORINFO_WINNT; + mc->repCompileMethod(&info, &flags, &os); bool set1 = cr1->repSetVars(&ftn_1, &cVars_1, &vars_1); bool set2 = cr2->repSetVars(&ftn_2, &cVars_2, &vars_2); @@ -1003,7 +1004,8 @@ bool NearDiffer::compareBoundaries(MethodContext* mc, CompileResult* cr1, Compil CORINFO_METHOD_INFO info; unsigned flags = 0; - mc->repCompileMethod(&info, &flags); + CORINFO_OS os = CORINFO_WINNT; + mc->repCompileMethod(&info, &flags, &os); bool set1 = cr1->repSetBoundaries(&ftn_1, &cMap_1, &map_1); bool set2 = cr2->repSetBoundaries(&ftn_2, &cMap_2, &map_2); diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index be2335b5910d57..ffadad35521002 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -244,7 +244,10 @@ function(set_target_definitions_to_custom_os_and_arch) if ((TARGETDETAILS_ARCH STREQUAL "arm64") AND (TARGETDETAILS_OS STREQUAL "unix_osx")) target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE OSX_ARM64_ABI) endif() - else() + if (TARGETDETAILS_OS STREQUAL "unix_osx") + target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_OSX) + endif() + elseif (TARGETDETAILS_OS STREQUAL "win") target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_WINDOWS) endif((TARGETDETAILS_OS MATCHES "^unix")) diff --git a/src/coreclr/crosscomponents.cmake b/src/coreclr/crosscomponents.cmake index 864bdf9e722184..ceb5b5384e5a74 100644 --- a/src/coreclr/crosscomponents.cmake +++ b/src/coreclr/crosscomponents.cmake @@ -2,27 +2,33 @@ if (CLR_CMAKE_HOST_OS STREQUAL CLR_CMAKE_TARGET_OS) install_clr (TARGETS - clrjit - DESTINATIONS . sharedFramework - COMPONENT crosscomponents - ) - install_clr (TARGETS - clrjit jitinterface_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT crosscomponents ) - if(CLR_CMAKE_TARGET_OSX AND ARCH_TARGET_NAME STREQUAL arm64) + if (CLR_CMAKE_TARGET_OSX AND ARCH_TARGET_NAME STREQUAL arm64) + install_clr (TARGETS + clrjit_universal_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} + DESTINATIONS . + COMPONENT crosscomponents + ) + elseif (CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) install_clr (TARGETS - clrjit_unix_osx_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} - DESTINATIONS . sharedFramework + clrjit_universal_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} + DESTINATIONS . COMPONENT crosscomponents ) elseif (CLR_CMAKE_TARGET_UNIX) install_clr (TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} - DESTINATIONS . sharedFramework + DESTINATIONS . + COMPONENT crosscomponents + ) + else() + install_clr (TARGETS + clrjit_win_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} + DESTINATIONS . COMPONENT crosscomponents ) endif() diff --git a/src/coreclr/gcinfo/CMakeLists.txt b/src/coreclr/gcinfo/CMakeLists.txt index 96319fd98b4252..8c966bb3403b5e 100644 --- a/src/coreclr/gcinfo/CMakeLists.txt +++ b/src/coreclr/gcinfo/CMakeLists.txt @@ -64,16 +64,18 @@ else() set(TARGET_OS_NAME win) endif() +# For clrjit we need to build an exact targetted gcinfo instead of the universal one +if (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_ARM) + create_gcinfo_lib(TARGET gcinfo_${TARGET_OS_NAME}_${ARCH_TARGET_NAME} OS ${TARGET_OS_NAME} ARCH ${ARCH_TARGET_NAME}) +endif() + if (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64) - create_gcinfo_lib(TARGET gcinfo_unix_arm64 OS unix ARCH arm64) + create_gcinfo_lib(TARGET gcinfo_universal_arm64 OS universal ARCH arm64) create_gcinfo_lib(TARGET gcinfo_unix_x64 OS unix ARCH x64) - create_gcinfo_lib(TARGET gcinfo_win_arm64 OS win ARCH arm64) create_gcinfo_lib(TARGET gcinfo_win_x64 OS win ARCH x64) endif (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64) -create_gcinfo_lib(TARGET gcinfo_unix_armel OS unix ARCH armel) -create_gcinfo_lib(TARGET gcinfo_unix_arm OS unix ARCH arm) -create_gcinfo_lib(TARGET gcinfo_win_arm OS win ARCH arm) +create_gcinfo_lib(TARGET gcinfo_universal_arm OS universal ARCH arm) create_gcinfo_lib(TARGET gcinfo_win_x86 OS win ARCH x86) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) diff --git a/src/coreclr/gcinfo/gcinfoencoder.cpp b/src/coreclr/gcinfo/gcinfoencoder.cpp index 8f56607e22ba5d..8be8b262f1a4a1 100644 --- a/src/coreclr/gcinfo/gcinfoencoder.cpp +++ b/src/coreclr/gcinfo/gcinfoencoder.cpp @@ -10,6 +10,7 @@ #include #include "gcinfoencoder.h" +#include "targetosarch.h" #ifdef _DEBUG #ifndef LOGGING @@ -577,10 +578,11 @@ GcSlotId GcInfoEncoder::GetStackSlotId( INT32 spOffset, GcSlotFlags flags, GcSta _ASSERTE( (flags & (GC_SLOT_IS_REGISTER | GC_SLOT_IS_DELETED)) == 0 ); -#if !defined(OSX_ARM64_ABI) - // the spOffset for the stack slot is required to be pointer size aligned - _ASSERTE((spOffset % TARGET_POINTER_SIZE) == 0); -#endif + if (!(TargetOS::IsMacOS && TargetArchitecture::IsArm64)) + { + // the spOffset for the stack slot is required to be pointer size aligned + _ASSERTE((spOffset % TARGET_POINTER_SIZE) == 0); + } m_SlotTable[ m_NumSlots ].Slot.Stack.SpOffset = spOffset; m_SlotTable[ m_NumSlots ].Slot.Stack.Base = spBase; diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h index e552d195fbf74c..0db622f21adb43 100644 --- a/src/coreclr/inc/corinfo.h +++ b/src/coreclr/inc/corinfo.h @@ -1719,6 +1719,7 @@ enum CORINFO_OS { CORINFO_WINNT, CORINFO_UNIX, + CORINFO_MACOS, }; struct CORINFO_CPU diff --git a/src/coreclr/inc/corjit.h b/src/coreclr/inc/corjit.h index d679c99ce90270..122fcd0d7b0ba2 100644 --- a/src/coreclr/inc/corjit.h +++ b/src/coreclr/inc/corjit.h @@ -188,6 +188,7 @@ class ICorJitCompiler // // * In the 32 bit jit this is implemented by code:CILJit.compileMethod // * For the 64 bit jit this is implemented by code:PreJit.compileMethod + // Note: setTargetOS must be called before this api is used. virtual CorJitResult compileMethod ( ICorJitInfo *comp, /* IN */ struct CORINFO_METHOD_INFO *info, /* IN */ @@ -211,6 +212,13 @@ class ICorJitCompiler // SIMD vector it supports as an intrinsic type. Zero means that the JIT does not support SIMD // intrinsics, so the EE should use the default size (i.e. the size of the IL implementation). virtual unsigned getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags) { return 0; } + + // Some JIT's may support multiple OSs. This api provides a means to specify to the JIT what OS it should + // be trying to compile. This api does not produce any errors, any errors are to be generated by the + // the compileMethod call, which will call back into the VM to ensure bits are correctly setup. + // + // Note: this api MUST be called before the compileMethod is called for the first time in the process. + virtual void setTargetOS(CORINFO_OS os) = 0; }; //------------------------------------------------------------------------------------------ diff --git a/src/coreclr/inc/jiteeversionguid.h b/src/coreclr/inc/jiteeversionguid.h index 6aca52f5884e54..8f99d9b4bb8608 100644 --- a/src/coreclr/inc/jiteeversionguid.h +++ b/src/coreclr/inc/jiteeversionguid.h @@ -43,11 +43,11 @@ typedef const GUID *LPCGUID; #define GUID_DEFINED #endif // !GUID_DEFINED -constexpr GUID JITEEVersionIdentifier = { /* 802cceb2-2ebd-4ff9-ac31-4c3546a02aa5 */ - 0x802cceb2, - 0x2ebd, - 0x4ff9, - {0xac, 0x31, 0x4c, 0x35, 0x46, 0xa0, 0x2a, 0xa5} +constexpr GUID JITEEVersionIdentifier = { /* 017b4b2e-80e1-41eb-afc3-f6f643df6bbc */ + 0x017b4b2e, + 0x80e1, + 0x41eb, + {0xaf, 0xc3, 0xf6, 0xf6, 0x43, 0xdf, 0x6b, 0xbc} }; ////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/coreclr/inc/switches.h b/src/coreclr/inc/switches.h index ab75ce0b7f18ba..91947135de94af 100644 --- a/src/coreclr/inc/switches.h +++ b/src/coreclr/inc/switches.h @@ -174,5 +174,3 @@ #if !defined(TARGET_UNIX) #define FEATURE_STACK_SAMPLING #endif // defined (ALLOW_SXS_JIT) - - diff --git a/src/coreclr/inc/targetosarch.h b/src/coreclr/inc/targetosarch.h new file mode 100644 index 00000000000000..b2d1c06a22d669 --- /dev/null +++ b/src/coreclr/inc/targetosarch.h @@ -0,0 +1,67 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#ifndef targetosarch_h +#define targetosarch_h + +class TargetOS +{ +public: +#ifdef TARGET_WINDOWS +#define TARGET_WINDOWS_POSSIBLY_SUPPORTED + static const bool IsWindows = true; + static const bool IsUnix = false; + static const bool IsMacOS = false; +#elif defined(TARGET_UNIX) +#define TARGET_UNIX_POSSIBLY_SUPPORTED + static const bool IsWindows = false; + static const bool IsUnix = true; +#if defined(TARGET_OSX) + static const bool IsMacOS = true; +#else + static const bool IsMacOS = false; +#endif +#else +#define TARGET_WINDOWS_POSSIBLY_SUPPORTED +#define TARGET_UNIX_POSSIBLY_SUPPORTED +#define TARGET_OS_RUNTIMEDETERMINED + static bool OSSettingConfigured; + static bool IsWindows; + static bool IsUnix; + static bool IsMacOS; +#endif +}; + +class TargetArchitecture +{ +public: +#ifdef TARGET_ARM + static const bool IsX86 = false; + static const bool IsX64 = false; + static const bool IsArm64 = false; + static const bool IsArm32 = true; + static const bool IsArmArch = true; +#elif defined(TARGET_ARM64) + static const bool IsX86 = false; + static const bool IsX64 = false; + static const bool IsArm64 = true; + static const bool IsArm32 = false; + static const bool IsArmArch = true; +#elif defined(TARGET_AMD64) + static const bool IsX86 = false; + static const bool IsX64 = true; + static const bool IsArm64 = false; + static const bool IsArm32 = false; + static const bool IsArmArch = false; +#elif defined(TARGET_X86) + static const bool IsX86 = true; + static const bool IsX64 = false; + static const bool IsArm64 = false; + static const bool IsArm32 = false; + static const bool IsArmArch = false; +#else +#error Unknown architecture +#endif +}; + +#endif // targetosarch_h \ No newline at end of file diff --git a/src/coreclr/inc/winwrap.h b/src/coreclr/inc/winwrap.h index 570ca5ba19c03d..15c193ee36720d 100644 --- a/src/coreclr/inc/winwrap.h +++ b/src/coreclr/inc/winwrap.h @@ -259,11 +259,11 @@ inline DWORD GetMaxDBCSCharByteSize() #endif // HOST_UNIX } -#ifndef TARGET_UNIX +#ifndef HOST_UNIX BOOL RunningInteractive(); -#else // !TARGET_UNIX +#else // !HOST_UNIX #define RunningInteractive() FALSE -#endif // !TARGET_UNIX +#endif // !HOST_UNIX #ifndef Wsz_mbstowcs #define Wsz_mbstowcs(szOut, szIn, iSize) WszMultiByteToWideChar(CP_ACP, 0, szIn, -1, szOut, iSize) diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt index a3f8a99380e3df..e468b3a59c7c75 100644 --- a/src/coreclr/jit/CMakeLists.txt +++ b/src/coreclr/jit/CMakeLists.txt @@ -500,16 +500,13 @@ install_clr(TARGETS clrjit DESTINATIONS . sharedFramework COMPONENT jit) add_pgo(clrjit) if (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64) - create_standalone_jit(TARGET clrjit_unix_arm64_${ARCH_HOST_NAME} OS unix ARCH arm64 DESTINATIONS .) - create_standalone_jit(TARGET clrjit_unix_osx_arm64_${ARCH_HOST_NAME} OS unix_osx ARCH arm64 DESTINATIONS .) + create_standalone_jit(TARGET clrjit_universal_arm64_${ARCH_HOST_NAME} OS universal ARCH arm64 DESTINATIONS .) create_standalone_jit(TARGET clrjit_unix_x64_${ARCH_HOST_NAME} OS unix ARCH x64 DESTINATIONS .) - create_standalone_jit(TARGET clrjit_win_arm64_${ARCH_HOST_NAME} OS win ARCH arm64 DESTINATIONS .) create_standalone_jit(TARGET clrjit_win_x64_${ARCH_HOST_NAME} OS win ARCH x64 DESTINATIONS .) endif (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64) -create_standalone_jit(TARGET clrjit_unix_arm_${ARCH_HOST_NAME} OS unix ARCH arm DESTINATIONS .) -target_compile_definitions(clrjit_unix_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) -create_standalone_jit(TARGET clrjit_win_arm_${ARCH_HOST_NAME} OS win ARCH arm DESTINATIONS .) +create_standalone_jit(TARGET clrjit_universal_arm_${ARCH_HOST_NAME} OS universal ARCH arm DESTINATIONS .) +target_compile_definitions(clrjit_universal_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) create_standalone_jit(TARGET clrjit_win_x86_${ARCH_HOST_NAME} OS win ARCH x86 DESTINATIONS .) if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) @@ -518,11 +515,12 @@ endif (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX) if (CLR_CMAKE_TARGET_UNIX) if (NOT ARCH_TARGET_NAME STREQUAL s390x) - install_clr(TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) + if(CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64) + install_clr(TARGETS clrjit_universal_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) + else() + install_clr(TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit) + endif() endif(NOT ARCH_TARGET_NAME STREQUAL s390x) - if (ARCH_TARGET_NAME STREQUAL arm) - target_compile_definitions(clrjit_unix_arm_${ARCH_HOST_NAME} PRIVATE ARM_SOFTFP CONFIGURABLE_ARM_ABI) - endif (ARCH_TARGET_NAME STREQUAL arm) endif() if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_PGO_INSTRUMENT) diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 1baed06c632642..90ad7f5ded5110 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -246,12 +246,10 @@ void CodeGen::genPrologSaveRegPair(regNumber reg1, assert((spOffset % 8) == 0); GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset); -#if defined(TARGET_UNIX) - if (compiler->generateCFIUnwindCodes()) + if (TargetOS::IsUnix && compiler->generateCFIUnwindCodes()) { useSaveNextPair = false; } -#endif // TARGET_UNIX if (useSaveNextPair) { @@ -381,12 +379,10 @@ void CodeGen::genEpilogRestoreRegPair(regNumber reg1, { GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset); -#if defined(TARGET_UNIX) - if (compiler->generateCFIUnwindCodes()) + if (TargetOS::IsUnix && compiler->generateCFIUnwindCodes()) { useSaveNextPair = false; } -#endif // TARGET_UNIX if (useSaveNextPair) { diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index cc26945cd18ebe..d540c522af52f7 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -398,11 +398,9 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genPutArgReg(treeNode->AsOp()); break; -#if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: genPutArgSplit(treeNode->AsPutArgSplit()); break; -#endif // FEATURE_ARG_SPLIT case GT_CALL: genCall(treeNode->AsCall()); @@ -681,12 +679,17 @@ void CodeGen::genIntrinsic(GenTree* treeNode) void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) { assert(treeNode->OperIs(GT_PUTARG_STK)); - GenTree* source = treeNode->gtOp1; -#if !defined(OSX_ARM64_ABI) - var_types targetType = genActualType(source->TypeGet()); -#else - var_types targetType = source->TypeGet(); -#endif + GenTree* source = treeNode->gtOp1; + var_types targetType; + + if (!compMacOsArm64Abi()) + { + targetType = genActualType(source->TypeGet()); + } + else + { + targetType = source->TypeGet(); + } emitter* emit = GetEmitter(); // This is the varNum for our store operations, @@ -741,17 +744,17 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) regNumber srcReg = genConsumeReg(source); assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg))); -#if !defined(OSX_ARM64_ABI) - assert(treeNode->GetStackByteSize() % TARGET_POINTER_SIZE == 0); -#else // OSX_ARM64_ABI - if (treeNode->GetStackByteSize() == 12) + assert(compMacOsArm64Abi() || treeNode->GetStackByteSize() % TARGET_POINTER_SIZE == 0); + +#ifdef TARGET_ARM64 + if (compMacOsArm64Abi() && (treeNode->GetStackByteSize() == 12)) { regNumber tmpReg = treeNode->GetSingleTempReg(); GetEmitter()->emitStoreSIMD12ToLclOffset(varNumOut, argOffsetOut, srcReg, tmpReg); argOffsetOut += 12; } else -#endif // OSX_ARM64_ABI +#endif // TARGET_ARM64 { emitAttr storeAttr = emitTypeSize(targetType); emit->emitIns_S_R(INS_str, storeAttr, srcReg, varNumOut, argOffsetOut); @@ -761,20 +764,21 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode) return; } -#if defined(OSX_ARM64_ABI) - switch (treeNode->GetStackByteSize()) + if (compMacOsArm64Abi()) { - case 1: - targetType = TYP_BYTE; - break; - case 2: - targetType = TYP_SHORT; - break; - default: - assert(treeNode->GetStackByteSize() >= 4); - break; + switch (treeNode->GetStackByteSize()) + { + case 1: + targetType = TYP_BYTE; + break; + case 2: + targetType = TYP_SHORT; + break; + default: + assert(treeNode->GetStackByteSize() >= 4); + break; + } } -#endif instruction storeIns = ins_Store(targetType); emitAttr storeAttr = emitTypeSize(targetType); @@ -1136,7 +1140,6 @@ void CodeGen::genPutArgReg(GenTreeOp* tree) genProduceReg(tree); } -#if FEATURE_ARG_SPLIT //--------------------------------------------------------------------- // genPutArgSplit - generate code for a GT_PUTARG_SPLIT node // @@ -1357,7 +1360,6 @@ void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode) } genProduceReg(treeNode); } -#endif // FEATURE_ARG_SPLIT #ifdef FEATURE_SIMD //---------------------------------------------------------------------------------- @@ -2300,9 +2302,9 @@ void CodeGen::genCall(GenTreeCall* call) #endif // TARGET_ARM } } -#if FEATURE_ARG_SPLIT else if (curArgTabEntry->IsSplit()) { + assert(compFeatureArgSplit()); assert(curArgTabEntry->numRegs >= 1); genConsumeArgSplitStruct(argNode->AsPutArgSplit()); for (unsigned idx = 0; idx < curArgTabEntry->numRegs; idx++) @@ -2313,7 +2315,6 @@ void CodeGen::genCall(GenTreeCall* call) emitActualTypeSize(TYP_I_IMPL)); } } -#endif // FEATURE_ARG_SPLIT else { regNumber argReg = curArgTabEntry->GetRegNum(); diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 2fbdee1f6be381..0156bf4adddfa2 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2181,11 +2181,18 @@ void CodeGen::genGenerateMachineCode() printf("unknown architecture"); } -#if defined(TARGET_WINDOWS) - printf(" - Windows"); -#elif defined(TARGET_UNIX) - printf(" - Unix"); -#endif + if (TargetOS::IsWindows) + { + printf(" - Windows"); + } + else if (TargetOS::IsMacOS) + { + printf(" - MacOS"); + } + else if (TargetOS::IsUnix) + { + printf(" - Unix"); + } printf("\n"); @@ -3393,11 +3400,9 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere // Check if this is an HFA register arg and return the HFA type if (varDsc.lvIsHfaRegArg()) { -#if defined(TARGET_WINDOWS) // Cannot have hfa types on windows arm targets // in vararg methods. - assert(!compiler->info.compIsVarArgs); -#endif // defined(TARGET_WINDOWS) + assert(!TargetOS::IsWindows || !compiler->info.compIsVarArgs); return varDsc.GetHfaType(); } return compiler->mangleVarArgsType(varDsc.lvType); @@ -3464,12 +3469,12 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere // Change regType to the HFA type when we have a HFA argument if (varDsc->lvIsHfaRegArg()) { -#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) - if (compiler->info.compIsVarArgs) +#if defined(TARGET_ARM64) + if (TargetOS::IsWindows && compiler->info.compIsVarArgs) { assert(!"Illegal incoming HFA arg encountered in Vararg method."); } -#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) +#endif // defined(TARGET_ARM64) regType = varDsc->GetHfaType(); } diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 6e1013b99a6e54..7b9be941ab4e8a 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -1850,16 +1850,16 @@ void CodeGen::genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArg // Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing // argument area. -#if defined(FEATURE_SIMD) && defined(OSX_ARM64_ABI) +#if defined(FEATURE_SIMD) && defined(TARGET_ARM64) // storing of TYP_SIMD12 (i.e. Vector3) argument. - if (type == TYP_SIMD12) + if (compMacOsArm64Abi() && (type == TYP_SIMD12)) { // Need an additional integer register to extract upper 4 bytes from data. regNumber tmpReg = nextArgNode->GetSingleTempReg(); GetEmitter()->emitStoreSIMD12ToLclOffset(outArgVarNum, thisFieldOffset, reg, tmpReg); } else -#endif // FEATURE_SIMD && OSX_ARM64_ABI +#endif // FEATURE_SIMD { emitAttr attr = emitTypeSize(type); GetEmitter()->emitIns_S_R(ins_Store(type), attr, reg, outArgVarNum, thisFieldOffset); @@ -2136,6 +2136,7 @@ void CodeGen::genProduceReg(GenTree* tree) #if FEATURE_ARG_SPLIT else if (tree->OperIsPutArgSplit()) { + assert(compFeatureArgSplit()); GenTreePutArgSplit* argSplit = tree->AsPutArgSplit(); unsigned regCount = argSplit->gtNumRegs; @@ -2151,7 +2152,7 @@ void CodeGen::genProduceReg(GenTree* tree) } } #ifdef TARGET_ARM - else if (tree->OperIsMultiRegOp()) + else if (compFeatureArgSplit() && tree->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = tree->AsMultiRegOp(); unsigned regCount = multiReg->GetRegCount(); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 54c41b3d5ba13a..6fcbf0da75c48a 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -1978,30 +1978,33 @@ void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode) inst_RV_RV_IV(INS_shufpd, EA_16BYTE, targetReg, targetReg, 0x01); } genProduceReg(lclNode); -#elif defined(TARGET_X86) && defined(TARGET_WINDOWS) - assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(0))); - assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(1))); - assert(lclNode->TypeIs(TYP_SIMD8)); +#elif defined(TARGET_X86) + if (TargetOS::IsWindows) + { + assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(0))); + assert(varTypeIsIntegral(retTypeDesc->GetReturnRegType(1))); + assert(lclNode->TypeIs(TYP_SIMD8)); - // This is a case where a SIMD8 struct returned as [EAX, EDX] - // and needs to be assembled into a single xmm register, - // note we can't check reg0=EAX, reg1=EDX because they could be already moved. + // This is a case where a SIMD8 struct returned as [EAX, EDX] + // and needs to be assembled into a single xmm register, + // note we can't check reg0=EAX, reg1=EDX because they could be already moved. - inst_Mov(TYP_FLOAT, targetReg, reg0, /* canSkip */ false); - const emitAttr size = emitTypeSize(TYP_SIMD8); - if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41)) - { - GetEmitter()->emitIns_SIMD_R_R_R_I(INS_pinsrd, size, targetReg, targetReg, reg1, 1); - } - else - { - regNumber tempXmm = lclNode->GetSingleTempReg(); - assert(tempXmm != targetReg); - inst_Mov(TYP_FLOAT, tempXmm, reg1, /* canSkip */ false); - GetEmitter()->emitIns_SIMD_R_R_R(INS_punpckldq, size, targetReg, targetReg, tempXmm); + inst_Mov(TYP_FLOAT, targetReg, reg0, /* canSkip */ false); + const emitAttr size = emitTypeSize(TYP_SIMD8); + if (compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41)) + { + GetEmitter()->emitIns_SIMD_R_R_R_I(INS_pinsrd, size, targetReg, targetReg, reg1, 1); + } + else + { + regNumber tempXmm = lclNode->GetSingleTempReg(); + assert(tempXmm != targetReg); + inst_Mov(TYP_FLOAT, tempXmm, reg1, /* canSkip */ false); + GetEmitter()->emitIns_SIMD_R_R_R(INS_punpckldq, size, targetReg, targetReg, tempXmm); + } } -#elif defined(TARGET_WINDOWS) && defined(TARGET_AMD64) - assert(!"Multireg store to SIMD reg not supported on Windows x64"); +#elif defined(TARGET_AMD64) + assert(!TargetOS::IsWindows || !"Multireg store to SIMD reg not supported on Windows x64"); #else #error Unsupported or unset target architecture #endif @@ -5221,18 +5224,16 @@ void CodeGen::genCall(GenTreeCall* call) emitActualTypeSize(TYP_I_IMPL)); } -#if FEATURE_VARARG // In the case of a varargs call, // the ABI dictates that if we have floating point args, // we must pass the enregistered arguments in both the // integer and floating point registers so, let's do that. - if (call->IsVarargs() && varTypeIsFloating(argNode)) + if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode)) { regNumber srcReg = argNode->GetRegNum(); regNumber targetReg = compiler->getCallArgIntRegister(argNode->GetRegNum()); inst_Mov(TYP_LONG, targetReg, srcReg, /* canSkip */ false, emitActualTypeSize(TYP_I_IMPL)); } -#endif // FEATURE_VARARG } #if defined(TARGET_X86) || defined(UNIX_AMD64_ABI) @@ -5978,12 +5979,12 @@ void CodeGen::genJmpMethod(GenTree* jmp) } } -#if FEATURE_VARARG && defined(TARGET_AMD64) +#if defined(TARGET_AMD64) // In case of a jmp call to a vararg method also pass the float/double arg in the corresponding int arg // register. This is due to the AMD64 ABI which requires floating point values passed to varargs functions to // be passed in both integer and floating point registers. It doesn't apply to x86, which passes floating point // values on the stack. - if (compiler->info.compIsVarArgs) + if (compFeatureVarArg() && compiler->info.compIsVarArgs) { regNumber intArgReg; var_types loadType = varDsc->GetRegisterType(); @@ -6007,10 +6008,10 @@ void CodeGen::genJmpMethod(GenTree* jmp) firstArgVarNum = varNum; } } -#endif // FEATURE_VARARG +#endif // TARGET_AMD64 } -#if FEATURE_VARARG && defined(TARGET_AMD64) +#if defined(TARGET_AMD64) // Jmp call to a vararg method - if the method has fewer than 4 fixed arguments, // load the remaining arg registers (both int and float) from the corresponding // shadow stack slots. This is for the reason that we don't know the number and type @@ -6023,7 +6024,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) // The caller could have passed gc-ref/byref type var args. Since these are var args // the callee no way of knowing their gc-ness. Therefore, mark the region that loads // remaining arg registers from shadow stack slots as non-gc interruptible. - if (fixedIntArgMask != RBM_NONE) + if (compFeatureVarArg() && fixedIntArgMask != RBM_NONE) { assert(compiler->info.compIsVarArgs); assert(firstArgVarNum != BAD_VAR_NUM); @@ -6052,7 +6053,7 @@ void CodeGen::genJmpMethod(GenTree* jmp) GetEmitter()->emitEnableGC(); } } -#endif // FEATURE_VARARG +#endif // TARGET_AMD64 } // produce code for a GT_LEA subnode @@ -8854,13 +8855,11 @@ void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed) GetEmitter()->emitIns_R_S(load_ins, emitTypeSize(loadType), argReg, varNum, 0); -#if FEATURE_VARARG - if (compiler->info.compIsVarArgs && varTypeIsFloating(loadType)) + if (compFeatureVarArg() && compiler->info.compIsVarArgs && varTypeIsFloating(loadType)) { regNumber intArgReg = compiler->getCallArgIntRegister(argReg); inst_Mov(TYP_LONG, intArgReg, argReg, /* canSkip */ false, emitActualTypeSize(loadType)); } -#endif // FEATURE_VARARG } // If initReg is one of RBM_CALLEE_TRASH, then it needs to be zero'ed before using. diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 1141feee1e20da..019efffbefbdd7 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -615,11 +615,9 @@ var_types Compiler::getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS // Start by determining if we have an HFA/HVA with a single element. if (GlobalJitOptions::compFeatureHfa) { -#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) // Arm64 Windows VarArg methods arguments will not classify HFA types, they will need to be treated // as if they are not HFA types. - if (!isVarArg) -#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) + if (!(TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg)) { switch (structSize) { @@ -810,13 +808,11 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, // Arm64 Windows VarArg methods arguments will not classify HFA/HVA types, they will need to be treated // as if they are not HFA/HVA types. var_types hfaType; -#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) - if (isVarArg) + if (TargetArchitecture::IsArm64 && TargetOS::IsWindows && isVarArg) { hfaType = TYP_UNDEF; } else -#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) { hfaType = GetHfaType(clsHnd); } @@ -1028,14 +1024,14 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } -#elif defined(TARGET_WINDOWS) && !defined(TARGET_ARM) - if (callConvIsInstanceMethodCallConv(callConv) && !isNativePrimitiveStructType(clsHnd)) +#endif + if (TargetOS::IsWindows && !TargetArchitecture::IsArm32 && callConvIsInstanceMethodCallConv(callConv) && + !isNativePrimitiveStructType(clsHnd)) { canReturnInRegister = false; howToReturnStruct = SPK_ByReference; useType = TYP_UNKNOWN; } -#endif // Check for cases where a small struct is returned in a register // via a primitive type. @@ -3190,7 +3186,7 @@ void Compiler::compInitOptions(JitFlags* jitFlags) if (verbose) { printf("****** START compiling %s (MethodHash=%08x)\n", info.compFullName, info.compMethodHash()); - printf("Generating code for %s %s\n", Target::g_tgtPlatformName, Target::g_tgtCPUName); + printf("Generating code for %s %s\n", Target::g_tgtPlatformName(), Target::g_tgtCPUName); printf(""); // in our logic this causes a flush } @@ -5190,13 +5186,14 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl m_pLowering = new (this, CMK_LSRA) Lowering(this, m_pLinearScan); // PHASE_LOWERING m_pLowering->Run(); -#if !defined(OSX_ARM64_ABI) - // Set stack levels; this information is necessary for x86 - // but on other platforms it is used only in asserts. - // TODO: do not run it in release on other platforms, see https://github.com/dotnet/runtime/issues/42673. - StackLevelSetter stackLevelSetter(this); - stackLevelSetter.Run(); -#endif // !OSX_ARM64_ABI + if (!compMacOsArm64Abi()) + { + // Set stack levels; this information is necessary for x86 + // but on other platforms it is used only in asserts. + // TODO: do not run it in release on other platforms, see https://github.com/dotnet/runtime/issues/42673. + StackLevelSetter stackLevelSetter(this); + stackLevelSetter.Run(); + } // We can not add any new tracked variables after this point. lvaTrackedFixed = true; @@ -5540,12 +5537,33 @@ int Compiler::compCompile(CORINFO_MODULE_HANDLE classPtr, // Match OS for compMatchedVM CORINFO_EE_INFO* eeInfo = eeGetEEInfo(); -#ifdef TARGET_UNIX - info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); -#else - info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); + +#ifdef TARGET_OS_RUNTIMEDETERMINED + noway_assert(TargetOS::OSSettingConfigured); #endif + if (TargetOS::IsMacOS) + { + info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_MACOS); + } + else if (TargetOS::IsUnix) + { + if (TargetArchitecture::IsX64) + { + // MacOS x64 uses the Unix jit variant in crossgen2, not a special jit + info.compMatchedVM = + info.compMatchedVM && ((eeInfo->osType == CORINFO_UNIX) || (eeInfo->osType == CORINFO_MACOS)); + } + else + { + info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_UNIX); + } + } + else if (TargetOS::IsWindows) + { + info.compMatchedVM = info.compMatchedVM && (eeInfo->osType == CORINFO_WINNT); + } + // If we are not compiling for a matched VM, then we are getting JIT flags that don't match our target // architecture. The two main examples here are an ARM targeting altjit hosted on x86 and an ARM64 // targeting altjit hosted on x64. (Though with cross-bitness work, the host doesn't necessarily need diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 99e8629a15ddde..c01a086d813786 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -1602,12 +1602,10 @@ struct FuncInfoDsc #elif defined(TARGET_X86) -#if defined(TARGET_UNIX) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; -#endif // TARGET_UNIX #elif defined(TARGET_ARMARCH) @@ -1619,18 +1617,16 @@ struct FuncInfoDsc // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. -#if defined(TARGET_UNIX) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; -#endif // TARGET_UNIX #endif // TARGET_ARMARCH -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) jitstd::vector* cfiCodes; -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. @@ -1728,7 +1724,7 @@ struct fgArgTabEntry bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. -#ifdef FEATURE_ARG_SPLIT +#if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT @@ -1823,32 +1819,29 @@ struct fgArgTabEntry bool IsSplit() const { -#ifdef FEATURE_ARG_SPLIT - return _isSplit; +#if FEATURE_ARG_SPLIT + return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { -#ifdef FEATURE_ARG_SPLIT +#if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { -#ifdef FEATURE_VARARG - return _isVararg; -#else - return false; -#endif + return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { -#ifdef FEATURE_VARARG - _isVararg = value; -#endif // FEATURE_VARARG + if (compFeatureVarArg()) + { + _isVararg = value; + } } bool IsHfaArg() const @@ -2114,20 +2107,23 @@ struct fgArgTabEntry void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { -#ifdef OSX_ARM64_ABI unsigned roundedByteSize; - // Only struct types need extension or rounding to pointer size, but HFA does not. - if (isStruct && !isFloatHfa) + if (compMacOsArm64Abi()) { - roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); + // Only struct types need extension or rounding to pointer size, but HFA does not. + if (isStruct && !isFloatHfa) + { + roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); + } + else + { + roundedByteSize = byteSize; + } } else { - roundedByteSize = byteSize; + roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } -#else // OSX_ARM64_ABI - unsigned roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); -#endif // OSX_ARM64_ABI #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment @@ -2137,7 +2133,7 @@ struct fgArgTabEntry #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) - if (!isStruct) + if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } @@ -4045,8 +4041,8 @@ class Compiler assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); -#if defined(TARGET_64BIT) && !defined(OSX_ARM64_ABI) - assert(varDsc->lvSize() == 16); +#if defined(TARGET_64BIT) + assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. @@ -4739,10 +4735,8 @@ class Compiler bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); -#if FEATURE_VARARG regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); -#endif // FEATURE_VARARG #if defined(DEBUG) static unsigned jitTotalMethodCompiled; @@ -8051,8 +8045,8 @@ class Compiler bool generateCFIUnwindCodes() { -#if defined(TARGET_UNIX) - return IsTargetAbi(CORINFO_CORERT_ABI); +#if defined(FEATURE_CFI_SUPPORT) + return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif @@ -8494,7 +8488,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // TARGET_ARM -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); @@ -8511,7 +8505,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX const CFI_CODE* const pCfiCode); #endif -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) @@ -9993,13 +9987,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; -#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) - auto callConv = info.compCallConv; - if (callConvIsInstanceMethodCallConv(callConv)) +#if defined(TARGET_ARM64) + if (TargetOS::IsWindows) { - return (info.compRetBuffArg != BAD_VAR_NUM); + auto callConv = info.compCallConv; + if (callConvIsInstanceMethodCallConv(callConv)) + { + return (info.compRetBuffArg != BAD_VAR_NUM); + } } -#endif // TARGET_WINDOWS && TARGET_ARM64 +#endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) @@ -11874,12 +11871,12 @@ const instruction INS_SQRT = INS_vsqrt; #ifdef TARGET_ARM64 -const instruction INS_MULADD = INS_madd; -#if defined(TARGET_UNIX) -const instruction INS_BREAKPOINT = INS_brk; -#else -const instruction INS_BREAKPOINT = INS_bkpt; -#endif +const instruction INS_MULADD = INS_madd; +inline const instruction INS_BREAKPOINT_osHelper() +{ + return TargetOS::IsUnix ? INS_brk : INS_bkpt; +} +#define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 50da76b5111173..bea5af5d6baa6c 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -2357,11 +2357,7 @@ inline unsigned Compiler::compMapILargNum(unsigned ILargNum) inline var_types Compiler::mangleVarArgsType(var_types type) { #if defined(TARGET_ARMARCH) - if (opts.compUseSoftFP -#if defined(TARGET_WINDOWS) - || info.compIsVarArgs -#endif // defined(TARGET_WINDOWS) - ) + if (opts.compUseSoftFP || (TargetOS::IsWindows && info.compIsVarArgs)) { switch (type) { @@ -2378,9 +2374,9 @@ inline var_types Compiler::mangleVarArgsType(var_types type) } // For CORECLR there is no vararg on System V systems. -#if FEATURE_VARARG inline regNumber Compiler::getCallArgIntRegister(regNumber floatReg) { + assert(compFeatureVarArg()); #ifdef TARGET_AMD64 switch (floatReg) { @@ -2404,6 +2400,7 @@ inline regNumber Compiler::getCallArgIntRegister(regNumber floatReg) inline regNumber Compiler::getCallArgFloatRegister(regNumber intReg) { + assert(compFeatureVarArg()); #ifdef TARGET_AMD64 switch (intReg) { @@ -2424,7 +2421,6 @@ inline regNumber Compiler::getCallArgFloatRegister(regNumber intReg) return REG_NA; #endif // !TARGET_AMD64 } -#endif // FEATURE_VARARG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/src/coreclr/jit/ee_il_dll.cpp b/src/coreclr/jit/ee_il_dll.cpp index 8904992524f8b5..097d20debbbb95 100644 --- a/src/coreclr/jit/ee_il_dll.cpp +++ b/src/coreclr/jit/ee_il_dll.cpp @@ -300,6 +300,27 @@ void CILJit::getVersionIdentifier(GUID* versionIdentifier) memcpy(versionIdentifier, &JITEEVersionIdentifier, sizeof(GUID)); } +#ifdef TARGET_OS_RUNTIMEDETERMINED +bool TargetOS::OSSettingConfigured = false; +bool TargetOS::IsWindows = false; +bool TargetOS::IsUnix = false; +bool TargetOS::IsMacOS = false; +#endif + +/***************************************************************************** + * Set the OS that this JIT should be generating code for. The contract with the VM + * is that this must be called before compileMethod is called. + */ +void CILJit::setTargetOS(CORINFO_OS os) +{ +#ifdef TARGET_OS_RUNTIMEDETERMINED + TargetOS::IsMacOS = os == CORINFO_MACOS; + TargetOS::IsUnix = (os == CORINFO_UNIX) || (os == CORINFO_MACOS); + TargetOS::IsWindows = os == CORINFO_WINNT; + TargetOS::OSSettingConfigured = true; +#endif +} + /***************************************************************************** * Determine the maximum length of SIMD vector supported by this JIT. */ @@ -418,15 +439,13 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* if (structSize > (2 * TARGET_POINTER_SIZE)) { -#ifndef TARGET_UNIX - if (info.compIsVarArgs) + if (TargetOS::IsWindows && info.compIsVarArgs) { // Arm64 Varargs ABI requires passing in general purpose // registers. Force the decision of whether this is an HFA // to false to correctly pass as if it was not an HFA. isHfa = false; } -#endif // TARGET_UNIX if (!isHfa) { // This struct is passed by reference using a single 'slot' @@ -471,22 +490,25 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* // static unsigned Compiler::eeGetArgAlignment(var_types type, bool isFloatHfa) { -#if defined(OSX_ARM64_ABI) - if (isFloatHfa) + if (compMacOsArm64Abi()) { - assert(varTypeIsStruct(type)); - return sizeof(float); + if (isFloatHfa) + { + assert(varTypeIsStruct(type)); + return sizeof(float); + } + if (varTypeIsStruct(type)) + { + return TARGET_POINTER_SIZE; + } + const unsigned argSize = genTypeSize(type); + assert((0 < argSize) && (argSize <= TARGET_POINTER_SIZE)); + return argSize; } - if (varTypeIsStruct(type)) + else { return TARGET_POINTER_SIZE; } - const unsigned argSize = genTypeSize(type); - assert((0 < argSize) && (argSize <= TARGET_POINTER_SIZE)); - return argSize; -#else - return TARGET_POINTER_SIZE; -#endif } /*****************************************************************************/ diff --git a/src/coreclr/jit/ee_il_dll.hpp b/src/coreclr/jit/ee_il_dll.hpp index e9dd74e96db833..bc6503bc4f8bbb 100644 --- a/src/coreclr/jit/ee_il_dll.hpp +++ b/src/coreclr/jit/ee_il_dll.hpp @@ -18,6 +18,8 @@ class CILJit : public ICorJitCompiler ); unsigned getMaxIntrinsicSIMDVectorLength(CORJIT_FLAGS cpuCompileFlags); + + void setTargetOS(CORINFO_OS os); }; /***************************************************************************** diff --git a/src/coreclr/jit/error.h b/src/coreclr/jit/error.h index a63643c0ee5b8b..126a8665a34e80 100644 --- a/src/coreclr/jit/error.h +++ b/src/coreclr/jit/error.h @@ -213,7 +213,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line); // clang-format on -#if defined(HOST_X86) && !defined(TARGET_UNIX) +#if defined(HOST_X86) && !defined(HOST_UNIX) // While debugging in an Debugger, the "int 3" will cause the program to break // Outside, the exception handler will just filter out the "int 3". diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 48a1e04d189b48..7c31b76142ed1b 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -770,7 +770,7 @@ regMaskTP GenTree::gtGetRegMask() const } } #if FEATURE_ARG_SPLIT - else if (OperIsPutArgSplit()) + else if (compFeatureArgSplit() && OperIsPutArgSplit()) { const GenTreePutArgSplit* splitArg = AsPutArgSplit(); const unsigned regCount = splitArg->gtNumRegs; @@ -11810,8 +11810,15 @@ void Compiler::gtDispTree(GenTree* tree, #if !defined(DEBUG_ARG_SLOTS) printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset()); #else - printf(" (%d slots), (%d stackByteSize), (%d slot), (%d byteOffset)", putArg->gtNumSlots, - putArg->GetStackByteSize(), putArg->gtSlotNum, putArg->getArgOffset()); + if (compMacOsArm64Abi()) + { + printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset()); + } + else + { + printf(" (%d slots), (%d stackByteSize), (%d slot), (%d byteOffset)", putArg->gtNumSlots, + putArg->GetStackByteSize(), putArg->gtSlotNum, putArg->getArgOffset()); + } #endif if (putArg->gtPutArgStkKind != GenTreePutArgStk::Kind::Invalid) { @@ -11841,8 +11848,15 @@ void Compiler::gtDispTree(GenTree* tree, #if !defined(DEBUG_ARG_SLOTS) printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs); #else - printf(" (%d slots), (%d stackByteSize), (%d numRegs)", putArg->gtNumSlots, putArg->GetStackByteSize(), - putArg->gtNumRegs); + if (compMacOsArm64Abi()) + { + printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs); + } + else + { + printf(" (%d slots), (%d stackByteSize), (%d numRegs)", putArg->gtNumSlots, putArg->GetStackByteSize(), + putArg->gtNumRegs); + } #endif } #endif // FEATURE_ARG_SPLIT diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index b54475bd2ce88b..619fd9fa551580 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -1287,6 +1287,7 @@ struct GenTree bool OperIsPutArgSplit() const { #if FEATURE_ARG_SPLIT + assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit()); return gtOper == GT_PUTARG_SPLIT; #else // !FEATURE_ARG_SPLIT return false; @@ -4496,10 +4497,14 @@ struct GenTreeCall final : public GenTree bool HasFixedRetBufArg() const { -#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM) - return hasFixedRetBuffReg() && HasRetBufArg() && !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv()); + if (!(hasFixedRetBuffReg() && HasRetBufArg())) + { + return false; + } +#if !defined(TARGET_ARM) + return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv()); #else - return hasFixedRetBuffReg() && HasRetBufArg(); + return true; #endif } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index edc7075afb58b4..512e0eeb5bfc53 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1300,9 +1300,9 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; -#if (defined(TARGET_WINDOWS) && !defined(TARGET_ARM)) || defined(UNIX_X86_ABI) - // Unmanaged instance methods on Windows need the retbuf arg after the first (this) parameter - if (srcCall->IsUnmanaged()) +#if !defined(TARGET_ARM) + // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter + if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { @@ -1366,7 +1366,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, } } else -#endif // (defined(TARGET_WINDOWS) && !defined(TARGET_ARM)) || defined(UNIX_X86_ABI) +#endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); @@ -8798,14 +8798,12 @@ var_types Compiler::impImportCall(OPCODE opcode, CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; -#if !FEATURE_VARARG /* Check for varargs */ - if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || - (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) + if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || + (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } -#endif // !FEATURE_VARARG if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) @@ -17519,10 +17517,10 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } -#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) +#if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. - else if (callConvIsInstanceMethodCallConv(info.compCallConv)) + else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h index da7cbed98c879c..e212226851c7c3 100644 --- a/src/coreclr/jit/jit.h +++ b/src/coreclr/jit/jit.h @@ -183,6 +183,7 @@ #include "utilcode.h" // this defines assert as _ASSERTE #include "host.h" // this redefines assert for the JIT to use assertAbort #include "utils.h" +#include "targetosarch.h" #ifdef DEBUG #define INDEBUG(x) x @@ -206,17 +207,17 @@ #define UNIX_AMD64_ABI_ONLY(x) #endif // defined(UNIX_AMD64_ABI) -#if defined(DEBUG) && !defined(OSX_ARM64_ABI) -// On all platforms except Arm64 OSX arguments on the stack are taking -// register size slots. On these platforms we could check that stack slots count -// matches our new byte size calculations. +#if defined(DEBUG) #define DEBUG_ARG_SLOTS #endif #if defined(DEBUG_ARG_SLOTS) #define DEBUG_ARG_SLOTS_ARG(x) , x #define DEBUG_ARG_SLOTS_ONLY(x) x -#define DEBUG_ARG_SLOTS_ASSERT(x) assert(x) +// On all platforms except Arm64 OSX arguments on the stack are taking +// register size slots. On these platforms we could check that stack slots count +// matches our new byte size calculations. +#define DEBUG_ARG_SLOTS_ASSERT(x) assert(compMacOsArm64Abi() || (x)) #else #define DEBUG_ARG_SLOTS_ARG(x) #define DEBUG_ARG_SLOTS_ONLY(x) @@ -248,11 +249,11 @@ // Arm64 Windows supports FEATURE_ARG_SPLIT, note this is different from // the official Arm64 ABI. // Case: splitting 16 byte struct between x7 and stack -#if (defined(TARGET_ARM) || (defined(TARGET_WINDOWS) && defined(TARGET_ARM64))) +#if defined(TARGET_ARM) || defined(TARGET_ARM64) #define FEATURE_ARG_SPLIT 1 #else #define FEATURE_ARG_SPLIT 0 -#endif // (defined(TARGET_ARM) || (defined(TARGET_WINDOWS) && defined(TARGET_ARM64))) +#endif // To get rid of warning 4701 : local variable may be used without being initialized #define DUMMY_INIT(x) (x) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 3ca855ec0651ef..142dfc653ea062 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -378,8 +378,8 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) unsigned numUserArgsToSkip = 0; unsigned numUserArgs = info.compMethodInfo->args.numArgs; -#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM) - if (callConvIsInstanceMethodCallConv(info.compCallConv)) +#if !defined(TARGET_ARM) + if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { // If we are a native instance method, handle the first user arg // (the unmanaged this parameter) and then handle the hidden @@ -688,13 +688,9 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un bool isHfaArg = false; var_types hfaType = TYP_UNDEF; -#if defined(TARGET_ARM64) && defined(TARGET_UNIX) + // Methods that use VarArg or SoftFP cannot have HFA arguments except // Native varargs on arm64 unix use the regular calling convention. - if (!opts.compUseSoftFP) -#else - // Methods that use VarArg or SoftFP cannot have HFA arguments - if (!info.compIsVarArgs && !opts.compUseSoftFP) -#endif // defined(TARGET_ARM64) && defined(TARGET_UNIX) + if (((TargetOS::IsUnix && TargetArchitecture::IsArm64) || !info.compIsVarArgs) && !opts.compUseSoftFP) { // If the argType is a struct, then check if it is an HFA if (varTypeIsStruct(argType)) @@ -706,14 +702,15 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un } else if (info.compIsVarArgs) { -#ifdef TARGET_UNIX // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. - NYI("InitUserArgs for Vararg callee is not yet implemented on non Windows targets."); -#endif + if (TargetOS::IsUnix) + { + NYI("InitUserArgs for Vararg callee is not yet implemented on non Windows targets."); + } } if (isHfaArg) @@ -731,23 +728,26 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un // it enregistered, as long as we can split the rest onto the stack. unsigned cSlotsToEnregister = cSlots; -#if defined(TARGET_ARM64) && FEATURE_ARG_SPLIT +#if defined(TARGET_ARM64) - // On arm64 Windows we will need to properly handle the case where a >8byte <=16byte - // struct is split between register r7 and virtual stack slot s[0] - // We will only do this for calls to vararg methods on Windows Arm64 - // - // !!This does not affect the normal arm64 calling convention or Unix Arm64!! - if (this->info.compIsVarArgs && argType == TYP_STRUCT) + if (compFeatureArgSplit()) { - if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register - !varDscInfo->canEnreg(TYP_INT, cSlots)) // The end of the struct can't fit in a register + // On arm64 Windows we will need to properly handle the case where a >8byte <=16byte + // struct is split between register r7 and virtual stack slot s[0] + // We will only do this for calls to vararg methods on Windows Arm64 + // + // !!This does not affect the normal arm64 calling convention or Unix Arm64!! + if (this->info.compIsVarArgs && argType == TYP_STRUCT) { - cSlotsToEnregister = 1; // Force the split + if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register + !varDscInfo->canEnreg(TYP_INT, cSlots)) // The end of the struct can't fit in a register + { + cSlotsToEnregister = 1; // Force the split + } } } -#endif // defined(TARGET_ARM64) && FEATURE_ARG_SPLIT +#endif // defined(TARGET_ARM64) #ifdef TARGET_ARM // On ARM we pass the first 4 words of integer arguments and non-HFA structs in registers. @@ -1087,9 +1087,10 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un #if FEATURE_FASTTAILCALL const unsigned argAlignment = eeGetArgAlignment(origArgType, (hfaType == TYP_FLOAT)); -#if defined(OSX_ARM64_ABI) - varDscInfo->stackArgSize = roundUp(varDscInfo->stackArgSize, argAlignment); -#endif // OSX_ARM64_ABI + if (compMacOsArm64Abi()) + { + varDscInfo->stackArgSize = roundUp(varDscInfo->stackArgSize, argAlignment); + } assert((argSize % argAlignment) == 0); assert((varDscInfo->stackArgSize % argAlignment) == 0); @@ -2889,14 +2890,14 @@ void Compiler::makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int lev void Compiler::lvaSetStructUsedAsVarArg(unsigned varNum) { - if (GlobalJitOptions::compFeatureHfa) + if (GlobalJitOptions::compFeatureHfa && TargetOS::IsWindows) { -#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) +#if defined(TARGET_ARM64) LclVarDsc* varDsc = &lvaTable[varNum]; // For varargs methods incoming and outgoing arguments should not be treated // as HFA. varDsc->SetHfaType(TYP_UNDEF); -#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) +#endif // defined(TARGET_ARM64) } } @@ -5462,9 +5463,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() /* Update the argOffs to reflect arguments that are passed in registers */ noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG); -#if !defined(OSX_ARM64_ABI) - noway_assert(compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES); -#endif + noway_assert(compMacOsArm64Abi() || compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES); if (info.compArgOrder == Target::ARG_ORDER_L2R) { @@ -5487,19 +5486,19 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() } unsigned userArgsToSkip = 0; -#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM) +#if !defined(TARGET_ARM) // In the native instance method calling convention on Windows, // the this parameter comes before the hidden return buffer parameter. // So, we want to process the native "this" parameter before we process // the native return buffer parameter. - if (callConvIsInstanceMethodCallConv(info.compCallConv)) + if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { #ifdef TARGET_X86 if (!lvaTable[lclNum].lvIsRegArg) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); } -#else +#elif !defined(UNIX_AMD64_ABI) argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); #endif // TARGET_X86 lclNum++; @@ -5613,14 +5612,12 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() } lclNum += argLcls; -#else // !TARGET_ARM +#else // !TARGET_ARM for (unsigned i = 0; i < argSigLen; i++) { unsigned argumentSize = eeGetArgSize(argLst, &info.compMethodInfo->args); -#if !defined(OSX_ARM64_ABI) - assert(argumentSize % TARGET_POINTER_SIZE == 0); -#endif // !defined(OSX_ARM64_ABI) + assert(compMacOsArm64Abi() || argumentSize % TARGET_POINTER_SIZE == 0); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); @@ -5791,10 +5788,9 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, varDsc->SetStackOffset(argOffs); argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) -// Register arguments on ARM64 only take stack space when they have a frame home. -// Unless on windows and in a vararg method. -#if FEATURE_ARG_SPLIT - if (this->info.compIsVarArgs) + // Register arguments on ARM64 only take stack space when they have a frame home. + // Unless on windows and in a vararg method. + if (compFeatureArgSplit() && this->info.compIsVarArgs) { if (varDsc->lvType == TYP_STRUCT && varDsc->GetOtherArgReg() >= MAX_REG_ARG && varDsc->GetOtherArgReg() != REG_NA) @@ -5805,7 +5801,6 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, argOffs += TARGET_POINTER_SIZE; } } -#endif // FEATURE_ARG_SPLIT #elif defined(TARGET_ARM) // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg @@ -6002,9 +5997,10 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, #endif // TARGET_ARM const bool isFloatHfa = (varDsc->lvIsHfa() && (varDsc->GetHfaType() == TYP_FLOAT)); const unsigned argAlignment = eeGetArgAlignment(varDsc->lvType, isFloatHfa); -#if defined(OSX_ARM64_ABI) - argOffs = roundUp(argOffs, argAlignment); -#endif // OSX_ARM64_ABI + if (compMacOsArm64Abi()) + { + argOffs = roundUp(argOffs, argAlignment); + } assert((argSize % argAlignment) == 0); assert((argOffs % argAlignment) == 0); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 2205d229778acd..92388eb0ea8864 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -593,17 +593,18 @@ GenTree* Lowering::LowerSwitch(GenTree* node) bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; -#if defined(TARGET_UNIX) && defined(TARGET_ARM) - // Force using an inlined jumping instead switch table generation. - // Switch jump table is generated with incorrect values in CoreRT case, - // so any large switch will crash after loading to PC any such value. - // I think this is due to the fact that we use absolute addressing - // instead of relative. But in CoreRT is used as a rule relative - // addressing when we generate an executable. - // See also https://github.com/dotnet/runtime/issues/8683 - // Also https://github.com/dotnet/coreclr/pull/13197 - useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI); -#endif // defined(TARGET_UNIX) && defined(TARGET_ARM) + if (TargetOS::IsUnix && TargetArchitecture::IsArm32) + { + // Force using an inlined jumping instead switch table generation. + // Switch jump table is generated with incorrect values in CoreRT case, + // so any large switch will crash after loading to PC any such value. + // I think this is due to the fact that we use absolute addressing + // instead of relative. But in CoreRT is used as a rule relative + // addressing when we generate an executable. + // See also https://github.com/dotnet/runtime/issues/8683 + // Also https://github.com/dotnet/coreclr/pull/13197 + useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI); + } // If we originally had 2 unique successors, check to see whether there is a unique // non-default case, in which case we can eliminate the switch altogether. @@ -1045,7 +1046,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf #if FEATURE_ARG_SPLIT // Struct can be split into register(s) and stack on ARM - if (info->IsSplit()) + if (compFeatureArgSplit() && info->IsSplit()) { assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST); // TODO: Need to check correctness for FastTailCall @@ -1166,11 +1167,18 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* inf #if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) if (type == TYP_SIMD12) { -#if !defined(TARGET_64BIT) || defined(OSX_ARM64_ABI) +#if !defined(TARGET_64BIT) assert(info->GetByteSize() == 12); -#else // TARGET_64BIT && !OSX_ARM64_ABI - assert(info->GetByteSize() == 16); -#endif // FEATURE_SIMD && FEATURE_PUT_STRUCT_ARG_STK +#else // TARGET_64BIT + if (compMacOsArm64Abi()) + { + assert(info->GetByteSize() == 12); + } + else + { + assert(info->GetByteSize() == 16); + } +#endif // TARGET_64BIT } else #endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) @@ -1934,9 +1942,7 @@ void Lowering::LowerFastTailCall(GenTreeCall* call) unsigned int overwrittenStart = put->getArgOffset(); unsigned int overwrittenEnd = overwrittenStart + put->GetStackByteSize(); -#if !(defined(TARGET_WINDOWS) && defined(TARGET_AMD64)) - int baseOff = -1; // Stack offset of first arg on stack -#endif + int baseOff = -1; // Stack offset of first arg on stack for (unsigned callerArgLclNum = 0; callerArgLclNum < comp->info.compArgsCount; callerArgLclNum++) { @@ -1947,27 +1953,34 @@ void Lowering::LowerFastTailCall(GenTreeCall* call) continue; } -#if defined(TARGET_WINDOWS) && defined(TARGET_AMD64) - // On Windows x64, the argument position determines the stack slot uniquely, and even the - // register args take up space in the stack frame (shadow space). - unsigned int argStart = callerArgLclNum * TARGET_POINTER_SIZE; - unsigned int argEnd = argStart + static_cast(callerArgDsc->lvArgStackSize()); -#else - assert(callerArgDsc->GetStackOffset() != BAD_STK_OFFS); - - if (baseOff == -1) + unsigned int argStart; + unsigned int argEnd; +#if defined(TARGET_AMD64) + if (TargetOS::IsWindows) { - baseOff = callerArgDsc->GetStackOffset(); + // On Windows x64, the argument position determines the stack slot uniquely, and even the + // register args take up space in the stack frame (shadow space). + argStart = callerArgLclNum * TARGET_POINTER_SIZE; + argEnd = argStart + static_cast(callerArgDsc->lvArgStackSize()); } + else +#endif // TARGET_AMD64 + { + assert(callerArgDsc->GetStackOffset() != BAD_STK_OFFS); - // On all ABIs where we fast tail call the stack args should come in order. - assert(baseOff <= callerArgDsc->GetStackOffset()); + if (baseOff == -1) + { + baseOff = callerArgDsc->GetStackOffset(); + } - // Compute offset of this stack argument relative to the first stack arg. - // This will be its offset into the incoming arg space area. - unsigned int argStart = static_cast(callerArgDsc->GetStackOffset() - baseOff); - unsigned int argEnd = argStart + comp->lvaLclSize(callerArgLclNum); -#endif + // On all ABIs where we fast tail call the stack args should come in order. + assert(baseOff <= callerArgDsc->GetStackOffset()); + + // Compute offset of this stack argument relative to the first stack arg. + // This will be its offset into the incoming arg space area. + argStart = static_cast(callerArgDsc->GetStackOffset() - baseOff); + argEnd = argStart + comp->lvaLclSize(callerArgLclNum); + } // If ranges do not overlap then this PUTARG_STK will not mess up the arg. if ((overwrittenEnd <= argStart) || (overwrittenStart >= argEnd)) @@ -2542,8 +2555,8 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) op2->SetIconValue(0xff); op2->gtType = castOp->gtType; #else - castOp->gtType = castToType; - op2->gtType = castToType; + castOp->gtType = castToType; + op2->gtType = castToType; #endif // If we have any contained memory ops on castOp, they must now not be contained. if (castOp->OperIsLogical()) diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 7a2c1f942aa02b..aa22146de03242 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -6919,7 +6919,7 @@ void LinearScan::resolveRegisters() splitArg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); } #ifdef TARGET_ARM - else if (treeNode->OperIsMultiRegOp()) + else if (compFeatureArgSplit() && treeNode->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = treeNode->AsMultiRegOp(); multiReg->SetRegSpillFlagByIdx(GTF_SPILL, currentRefPosition->getMultiRegIdx()); diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp index 88644ead77225f..4e3fc015ae98a7 100644 --- a/src/coreclr/jit/lsraarm64.cpp +++ b/src/coreclr/jit/lsraarm64.cpp @@ -484,7 +484,7 @@ int LinearScan::BuildNode(GenTree* tree) srcCount = BuildPutArgSplit(tree->AsPutArgSplit()); dstCount = tree->AsPutArgSplit()->gtNumRegs; break; -#endif // FEATURE _SPLIT_ARG +#endif // FEATURE_ARG_SPLIT case GT_PUTARG_STK: srcCount = BuildPutArgStk(tree->AsPutArgStk()); diff --git a/src/coreclr/jit/lsraarmarch.cpp b/src/coreclr/jit/lsraarmarch.cpp index 7cde2796b5a1d1..1b439f0a4b7839 100644 --- a/src/coreclr/jit/lsraarmarch.cpp +++ b/src/coreclr/jit/lsraarmarch.cpp @@ -280,7 +280,6 @@ int LinearScan::BuildCall(GenTreeCall* call) srcCount++; } } -#if FEATURE_ARG_SPLIT else if (argNode->OperGet() == GT_PUTARG_SPLIT) { unsigned regCount = argNode->AsPutArgSplit()->gtNumRegs; @@ -291,7 +290,6 @@ int LinearScan::BuildCall(GenTreeCall* call) } srcCount += regCount; } -#endif // FEATURE_ARG_SPLIT else { assert(argNode->OperIs(GT_PUTARG_REG)); @@ -334,11 +332,9 @@ int LinearScan::BuildCall(GenTreeCall* call) { fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, arg); assert(curArgTabEntry != nullptr); -#if FEATURE_ARG_SPLIT // PUTARG_SPLIT nodes must be in the gtCallLateArgs list, since they // define registers used by the call. assert(arg->OperGet() != GT_PUTARG_SPLIT); -#endif // FEATURE_ARG_SPLIT if (arg->gtOper == GT_PUTARG_STK) { assert(curArgTabEntry->GetRegNum() == REG_STK); @@ -411,15 +407,18 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* argNode) BuildUse(use.GetNode()); srcCount++; -#if defined(FEATURE_SIMD) && defined(OSX_ARM64_ABI) - if (use.GetType() == TYP_SIMD12) +#if defined(FEATURE_SIMD) + if (compMacOsArm64Abi()) { - // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. - // To assemble the vector properly we would need an additional int register. - // The other platforms can write it as 16-byte using 1 write. - buildInternalIntRegisterDefForNode(use.GetNode()); + if (use.GetType() == TYP_SIMD12) + { + // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. + // To assemble the vector properly we would need an additional int register. + // The other platforms can write it as 16-byte using 1 write. + buildInternalIntRegisterDefForNode(use.GetNode()); + } } -#endif // FEATURE_SIMD && OSX_ARM64_ABI +#endif // FEATURE_SIMD } } else @@ -460,21 +459,20 @@ int LinearScan::BuildPutArgStk(GenTreePutArgStk* argNode) { assert(!putArgChild->isContained()); srcCount = BuildOperandUses(putArgChild); -#if defined(FEATURE_SIMD) && defined(OSX_ARM64_ABI) - if (argNode->GetStackByteSize() == 12) +#if defined(FEATURE_SIMD) + if (compMacOsArm64Abi() && argNode->GetStackByteSize() == 12) { // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. // To assemble the vector properly we would need an additional int register. // The other platforms can write it as 16-byte using 1 write. buildInternalIntRegisterDefForNode(argNode); } -#endif // FEATURE_SIMD && OSX_ARM64_ABI +#endif // FEATURE_SIMD } buildInternalRegisterUses(); return srcCount; } -#if FEATURE_ARG_SPLIT //------------------------------------------------------------------------ // BuildPutArgSplit: Set the NodeInfo for a GT_PUTARG_SPLIT node // @@ -576,7 +574,6 @@ int LinearScan::BuildPutArgSplit(GenTreePutArgSplit* argNode) BuildDefs(argNode, dstCount, argMask); return srcCount; } -#endif // FEATURE_ARG_SPLIT //------------------------------------------------------------------------ // BuildBlockStore: Build the RefPositions for a block store node. diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp index c894ed7479c300..0287746e452dab 100644 --- a/src/coreclr/jit/lsrabuild.cpp +++ b/src/coreclr/jit/lsrabuild.cpp @@ -3406,8 +3406,8 @@ int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc) { BuildUse(op1, RBM_NONE, i); } -#if defined(FEATURE_SIMD) && defined(TARGET_X86) && defined(TARGET_WINDOWS) - if (!compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41)) +#if defined(FEATURE_SIMD) && defined(TARGET_X86) + if (TargetOS::IsWindows && !compiler->compOpportunisticallyDependsOn(InstructionSet_SSE41)) { if (varTypeIsSIMD(storeLoc) && op1->IsCall()) { @@ -3417,7 +3417,7 @@ int LinearScan::BuildStoreLoc(GenTreeLclVarCommon* storeLoc) setInternalRegsDelayFree = true; } } -#endif // FEATURE_SIMD && TARGET_X86 && TARGET_WINDOWS +#endif // FEATURE_SIMD && TARGET_X86 } else if (op1->isContained() && op1->OperIs(GT_BITCAST)) { @@ -3838,8 +3838,7 @@ int LinearScan::BuildPutArgReg(GenTreeUnOp* node) // (e.g. for the target). void LinearScan::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* callHasFloatRegArgs) { -#if FEATURE_VARARG - if (call->IsVarargs() && varTypeIsFloating(argNode)) + if (compFeatureVarArg() && call->IsVarargs() && varTypeIsFloating(argNode)) { *callHasFloatRegArgs = true; @@ -3849,7 +3848,6 @@ void LinearScan::HandleFloatVarArgs(GenTreeCall* call, GenTree* argNode, bool* c buildInternalIntRegisterDefForNode(call, genRegMask(targetReg)); } -#endif // FEATURE_VARARG } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index e230601b60fdee..06e46b94e45e6a 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -1235,17 +1235,15 @@ int LinearScan::BuildCall(GenTreeCall* call) } #endif // TARGET_X86 -#if FEATURE_VARARG // If it is a fast tail call, it is already preferenced to use RAX. // Therefore, no need set src candidates on call tgt again. - if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall()) + if (compFeatureVarArg() && call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall()) { // Don't assign the call target to any of the argument registers because // we will use them to also pass floating point arguments as required // by Amd64 ABI. ctrlExprCandidates = allRegs(TYP_INT) & ~(RBM_ARG_REGS); } -#endif // !FEATURE_VARARG srcCount += BuildOperandUses(ctrlExpr, ctrlExprCandidates); } diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 32a910348f8f24..26b34c574376fc 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -1198,7 +1198,10 @@ fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum, fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; #if defined(DEBUG_ARG_SLOTS) - nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE); + if (!compMacOsArm64Abi()) + { + nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE); + } #endif nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment); @@ -1297,9 +1300,12 @@ void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit()); assert(curArgTabEntry->use->GetNode() == node); #if defined(DEBUG_ARG_SLOTS) - nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE); - assert(curArgTabEntry->slotNum == nextSlotNum); - nextSlotNum += curArgTabEntry->numSlots; + if (!compMacOsArm64Abi()) + { + nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE); + assert(curArgTabEntry->slotNum == nextSlotNum); + nextSlotNum += curArgTabEntry->numSlots; + } #endif nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment()); @@ -1520,7 +1526,7 @@ void fgArgInfo::ArgsComplete() { prevArgTabEntry->needPlace = true; } -#endif // TARGET_ARM +#endif // FEATURE_ARG_SPLIT #endif } } @@ -2542,8 +2548,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) // At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined. assert(call->gtCallLateArgs == nullptr); -#ifdef TARGET_UNIX - if (callIsVararg) + if (TargetOS::IsUnix && callIsVararg) { // Currently native varargs is not implemented on non windows targets. // @@ -2552,7 +2557,6 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) // such as amd64 Unix, which just expects RAX to pass numFPArguments. NYI("Morphing Vararg call not yet implemented on non Windows targets."); } -#endif // TARGET_UNIX // Data structure for keeping track of non-standard args. Non-standard args are those that are not passed // following the normal calling convention or in the normal argument registers. We either mark existing @@ -3049,22 +3053,23 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) var_types hfaType = TYP_UNDEF; unsigned hfaSlots = 0; - bool passUsingFloatRegs; -#if !defined(OSX_ARM64_ABI) + bool passUsingFloatRegs; unsigned argAlignBytes = TARGET_POINTER_SIZE; -#endif - unsigned size = 0; - unsigned byteSize = 0; + unsigned size = 0; + unsigned byteSize = 0; if (GlobalJitOptions::compFeatureHfa) { hfaType = GetHfaType(argx); isHfaArg = varTypeIsValidHfaType(hfaType); -#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64) - // Make sure for vararg methods isHfaArg is not true. - isHfaArg = callIsVararg ? false : isHfaArg; -#endif // defined(TARGET_WINDOWS) && defined(TARGET_ARM64) +#if defined(TARGET_ARM64) + if (TargetOS::IsWindows) + { + // Make sure for vararg methods isHfaArg is not true. + isHfaArg = callIsVararg ? false : isHfaArg; + } +#endif // defined(TARGET_ARM64) if (isHfaArg) { @@ -3299,12 +3304,13 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) assert(size != 0); assert(byteSize != 0); -#if defined(OSX_ARM64_ABI) - // Arm64 Apple has a special ABI for passing small size arguments on stack, - // bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc. - // It means passing 8 1-byte arguments on stack can take as small as 8 bytes. - unsigned argAlignBytes = eeGetArgAlignment(argType, isFloatHfa); -#endif + if (compMacOsArm64Abi()) + { + // Arm64 Apple has a special ABI for passing small size arguments on stack, + // bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc. + // It means passing 8 1-byte arguments on stack can take as small as 8 bytes. + argAlignBytes = eeGetArgAlignment(argType, isFloatHfa); + } // // Figure out if the argument will be passed in a register. @@ -3384,16 +3390,14 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) // if (!isRegArg && (size > 1)) { -#if defined(TARGET_WINDOWS) // Arm64 windows native varargs allows splitting a 16 byte struct between stack // and the last general purpose register. - if (callIsVararg) + if (TargetOS::IsWindows && callIsVararg) { // Override the decision and force a split. isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs; } else -#endif // defined(TARGET_WINDOWS) { // We also must update intArgRegNum so that we no longer try to // allocate any new general purpose registers for args @@ -3402,7 +3406,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) } } } -#else // not TARGET_ARM or TARGET_ARM64 +#else // not TARGET_ARM or TARGET_ARM64 #if defined(UNIX_AMD64_ABI) @@ -3577,7 +3581,7 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) { #if FEATURE_ARG_SPLIT // Check for a split (partially enregistered) struct - if (!passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG)) + if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG)) { // This indicates a partial enregistration of a struct type assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() || @@ -3781,11 +3785,14 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE; #if defined(DEBUG_ARG_SLOTS) - if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE) + if (!compMacOsArm64Abi()) { - if (argSlots % 2 == 1) + if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE) { - argSlots++; + if (argSlots % 2 == 1) + { + argSlots++; + } } } #endif // DEBUG @@ -4237,8 +4244,12 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset()); #if defined(DEBUG_ARG_SLOTS) - unsigned preallocatedArgCount = call->fgArgInfo->GetNextSlotNum(); - assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES); + unsigned preallocatedArgCount = 0; + if (!compMacOsArm64Abi()) + { + preallocatedArgCount = call->fgArgInfo->GetNextSlotNum(); + assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES); + } #endif call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL)); @@ -4247,10 +4258,18 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) { const fgArgInfo* argInfo = call->fgArgInfo; #if defined(DEBUG_ARG_SLOTS) - printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, " - "outgoingArgSpaceSize=%d\n", - argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(), - outgoingArgSpaceSize); + if (!compMacOsArm64Abi()) + { + printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, " + "outgoingArgSpaceSize=%d\n", + argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(), + outgoingArgSpaceSize); + } + else + { + printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), + outgoingArgSpaceSize); + } #else printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); @@ -6984,13 +7003,11 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) // work required to shuffle arguments to the correct locations. CLANG_FORMAT_COMMENT_ANCHOR; -#if (defined(TARGET_WINDOWS) && defined(TARGET_ARM)) || (defined(TARGET_WINDOWS) && defined(TARGET_ARM64)) - if (info.compIsVarArgs || callee->IsVarargs()) + if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs())) { reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64"); return false; } -#endif // (defined(TARGET_WINDOWS) && defined(TARGET_ARM)) || defined(TARGET_WINDOWS) && defined(TARGET_ARM64)) if (compLocallocUsed) { diff --git a/src/coreclr/jit/target.h b/src/coreclr/jit/target.h index 16986b16ca71c9..4c339fb3b80f3e 100644 --- a/src/coreclr/jit/target.h +++ b/src/coreclr/jit/target.h @@ -5,13 +5,42 @@ #ifndef TARGET_H_ #define TARGET_H_ -// Native Varargs are not supported on Unix (all architectures) and Windows ARM -#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM) -#define FEATURE_VARARG 1 -#else -#define FEATURE_VARARG 0 +#ifdef TARGET_UNIX_POSSIBLY_SUPPORTED +#define FEATURE_CFI_SUPPORT +#endif + +// Undefine all of the target OS macros +// Within the JIT codebase we use the TargetOS features +#ifdef TARGET_UNIX +#undef TARGET_UNIX +#endif + +#ifdef TARGET_OSX +#undef TARGET_OSX +#endif + +#ifdef TARGET_WINDOWS +#undef TARGET_WINDOWS #endif +// Native Varargs are not supported on Unix (all architectures) and Windows ARM +inline bool compFeatureVarArg() +{ + return TargetOS::IsWindows && !TargetArchitecture::IsArm32; +} +inline bool compMacOsArm64Abi() +{ + return TargetArchitecture::IsArm64 && TargetOS::IsMacOS; +} +inline bool compFeatureArgSplit() +{ + return TargetArchitecture::IsArm32 || (TargetOS::IsWindows && TargetArchitecture::IsArm64); +} +inline bool compUnixX86Abi() +{ + return TargetArchitecture::IsX86 && TargetOS::IsUnix; +} + /*****************************************************************************/ // The following are human readable names for the target architectures #if defined(TARGET_X86) @@ -268,7 +297,10 @@ class Target { public: static const char* g_tgtCPUName; - static const char* g_tgtPlatformName; + static const char* g_tgtPlatformName() + { + return TargetOS::IsWindows ? "Windows" : "Unix"; + }; enum ArgOrder { diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h index a3d41541e02db1..76d4d903884a72 100644 --- a/src/coreclr/jit/targetamd64.h +++ b/src/coreclr/jit/targetamd64.h @@ -404,10 +404,10 @@ #define REG_STACK_PROBE_HELPER_ARG REG_R11 #define RBM_STACK_PROBE_HELPER_ARG RBM_R11 -#ifdef TARGET_UNIX +#ifdef UNIX_AMD64_ABI #define RBM_STACK_PROBE_HELPER_TRASH RBM_NONE -#else // !TARGET_UNIX +#else // !UNIX_AMD64_ABI #define RBM_STACK_PROBE_HELPER_TRASH RBM_RAX -#endif // !TARGET_UNIX +#endif // !UNIX_AMD64_ABI // clang-format on diff --git a/src/coreclr/jit/unwind.cpp b/src/coreclr/jit/unwind.cpp index f616cba2a2ac3b..8d5efd0051906d 100644 --- a/src/coreclr/jit/unwind.cpp +++ b/src/coreclr/jit/unwind.cpp @@ -117,7 +117,7 @@ void Compiler::unwindGetFuncLocations(FuncInfoDsc* func, #endif // FEATURE_EH_FUNCLETS -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) void Compiler::createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR cfiOpcode, short dwarfReg, INT offset) { @@ -389,7 +389,7 @@ void Compiler::DumpCfiInfo(bool isHotCode, } #endif // DEBUG -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT //------------------------------------------------------------------------ // Compiler::unwindGetCurrentOffset: Calculate the current byte offset of the @@ -411,12 +411,16 @@ UNATIVE_OFFSET Compiler::unwindGetCurrentOffset(FuncInfoDsc* func) } else { -#if defined(TARGET_AMD64) || (defined(TARGET_UNIX) && (defined(TARGET_ARMARCH) || defined(TARGET_X86))) - assert(func->startLoc != nullptr); - offset = func->startLoc->GetFuncletPrologOffset(GetEmitter()); -#else - offset = 0; // TODO ??? -#endif + if (TargetArchitecture::IsX64 || + (TargetOS::IsUnix && (TargetArchitecture::IsArmArch || TargetArchitecture::IsX86))) + { + assert(func->startLoc != nullptr); + offset = func->startLoc->GetFuncletPrologOffset(GetEmitter()); + } + else + { + offset = 0; // TODO ??? + } } return offset; diff --git a/src/coreclr/jit/unwindarm.cpp b/src/coreclr/jit/unwindarm.cpp index da4219b1d53bd3..2b25ed82942cc5 100644 --- a/src/coreclr/jit/unwindarm.cpp +++ b/src/coreclr/jit/unwindarm.cpp @@ -15,7 +15,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #pragma hdrstop #endif -#if defined(TARGET_ARM) && defined(TARGET_UNIX) +#if defined(TARGET_ARM) && defined(FEATURE_CFI_SUPPORT) short Compiler::mapRegNumToDwarfReg(regNumber reg) { short dwarfReg = DWARF_REG_ILLEGAL; @@ -124,7 +124,7 @@ short Compiler::mapRegNumToDwarfReg(regNumber reg) return dwarfReg; } -#endif // TARGET_ARM && TARGET_UNIX +#endif // TARGET_ARM && FEATURE_CFI_SUPPORT #ifdef TARGET_ARMARCH @@ -141,13 +141,13 @@ void Compiler::unwindBegProlog() { assert(compGeneratingProlog); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { unwindBegPrologCFI(); return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT FuncInfoDsc* func = funCurrentFunc(); @@ -173,12 +173,12 @@ void Compiler::unwindBegEpilog() { assert(compGeneratingEpilog); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT funCurrentFunc()->uwi.AddEpilog(); } @@ -319,7 +319,7 @@ void Compiler::unwindPushMaskInt(regMaskTP maskInt) ~(RBM_R0 | RBM_R1 | RBM_R2 | RBM_R3 | RBM_R4 | RBM_R5 | RBM_R6 | RBM_R7 | RBM_R8 | RBM_R9 | RBM_R10 | RBM_R11 | RBM_R12 | RBM_LR)) == 0); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { // If we are pushing LR, we should give unwind codes in terms of caller's PC @@ -330,7 +330,7 @@ void Compiler::unwindPushMaskInt(regMaskTP maskInt) unwindPushPopMaskCFI(maskInt, false); return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT bool useOpsize16 = ((maskInt & (RBM_LOW_REGS | RBM_LR)) == maskInt); // Can PUSH use the 16-bit encoding? unwindPushPopMaskInt(maskInt, useOpsize16); @@ -341,25 +341,25 @@ void Compiler::unwindPushMaskFloat(regMaskTP maskFloat) // Only floating point registers should be in maskFloat assert((maskFloat & RBM_ALLFLOAT) == maskFloat); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { unwindPushPopMaskCFI(maskFloat, true); return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT unwindPushPopMaskFloat(maskFloat); } void Compiler::unwindPopMaskInt(regMaskTP maskInt) { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT // Only r0-r12 and lr and pc are supported (pc is mapped to lr when encoding) assert((maskInt & @@ -382,12 +382,12 @@ void Compiler::unwindPopMaskInt(regMaskTP maskInt) void Compiler::unwindPopMaskFloat(regMaskTP maskFloat) { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT // Only floating point registers should be in maskFloat assert((maskFloat & RBM_ALLFLOAT) == maskFloat); @@ -396,7 +396,7 @@ void Compiler::unwindPopMaskFloat(regMaskTP maskFloat) void Compiler::unwindAllocStack(unsigned size) { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -405,7 +405,7 @@ void Compiler::unwindAllocStack(unsigned size) } return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -450,7 +450,7 @@ void Compiler::unwindAllocStack(unsigned size) void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset) { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -459,7 +459,7 @@ void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset) } return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -478,12 +478,12 @@ void Compiler::unwindSaveReg(regNumber reg, unsigned offset) void Compiler::unwindBranch16() { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -494,12 +494,12 @@ void Compiler::unwindBranch16() void Compiler::unwindNop(unsigned codeSizeInBytes) // codeSizeInBytes is 2 or 4 bytes for Thumb2 instruction { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -535,12 +535,12 @@ void Compiler::unwindNop(unsigned codeSizeInBytes) // codeSizeInBytes is 2 or 4 // for them. void Compiler::unwindPadding() { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; GetEmitter()->emitUnwindNopPadding(pu->GetCurrentEmitterLocation(), this); @@ -565,7 +565,7 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func) BOOL isFunclet = (func->funKind == FUNC_ROOT) ? FALSE : TRUE; bool funcHasColdSection = false; -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { DWORD unwindCodeBytes = 0; @@ -578,7 +578,7 @@ void Compiler::unwindReserveFunc(FuncInfoDsc* func) return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT // If there is cold code, split the unwind data between the hot section and the // cold section. This needs to be done before we split into fragments, as each @@ -638,13 +638,13 @@ void Compiler::unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode static_assert_no_msg(FUNC_HANDLER == (FuncKind)CORJIT_FUNC_HANDLER); static_assert_no_msg(FUNC_FILTER == (FuncKind)CORJIT_FUNC_FILTER); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { unwindEmitFuncCFI(func, pHotCode, pColdCode); return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT func->uwi.Allocate((CorJitFuncKind)func->funKind, pHotCode, pColdCode, true); diff --git a/src/coreclr/jit/unwindarm64.cpp b/src/coreclr/jit/unwindarm64.cpp index a027097cd19610..0725eb41dfdba5 100644 --- a/src/coreclr/jit/unwindarm64.cpp +++ b/src/coreclr/jit/unwindarm64.cpp @@ -17,7 +17,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #if defined(TARGET_ARM64) -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) short Compiler::mapRegNumToDwarfReg(regNumber reg) { short dwarfReg = DWARF_REG_ILLEGAL; @@ -223,7 +223,7 @@ short Compiler::mapRegNumToDwarfReg(regNumber reg) return dwarfReg; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT void Compiler::unwindPush(regNumber reg) { @@ -232,7 +232,7 @@ void Compiler::unwindPush(regNumber reg) void Compiler::unwindAllocStack(unsigned size) { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -242,7 +242,7 @@ void Compiler::unwindAllocStack(unsigned size) return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -276,7 +276,7 @@ void Compiler::unwindAllocStack(unsigned size) void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset) { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -286,7 +286,7 @@ void Compiler::unwindSetFrameReg(regNumber reg, unsigned offset) return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -348,7 +348,7 @@ void Compiler::unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset) assert(0 <= offset && offset <= 504); assert((offset % 8) == 0); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -362,7 +362,7 @@ void Compiler::unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset) return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -430,7 +430,7 @@ void Compiler::unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int o assert(offset < 0); assert((offset % 8) == 0); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -445,7 +445,7 @@ void Compiler::unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int o return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -519,7 +519,7 @@ void Compiler::unwindSaveReg(regNumber reg, int offset) assert(0 <= offset && offset <= 504); assert((offset % 8) == 0); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -532,7 +532,7 @@ void Compiler::unwindSaveReg(regNumber reg, int offset) return; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT int z = offset / 8; assert(0 <= z && z <= 0x3F); @@ -573,7 +573,7 @@ void Compiler::unwindSaveRegPreindexed(regNumber reg, int offset) assert(-256 <= offset && offset < 0); assert((offset % 8) == 0); -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) if (generateCFIUnwindCodes()) { if (compGeneratingProlog) @@ -587,7 +587,7 @@ void Compiler::unwindSaveRegPreindexed(regNumber reg, int offset) return; } -#endif // _TARGET_UNIX_ +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; @@ -622,10 +622,10 @@ void Compiler::unwindSaveRegPreindexed(regNumber reg, int offset) void Compiler::unwindSaveNext() { -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) // do not use unwindSaveNext when generating CFI codes as there is no code for this assert(!generateCFIUnwindCodes()); -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT UnwindInfo* pu = &funCurrentFunc()->uwi; diff --git a/src/coreclr/jit/unwindx86.cpp b/src/coreclr/jit/unwindx86.cpp index aba58bc93b322d..0cd88fd29ba1e5 100644 --- a/src/coreclr/jit/unwindx86.cpp +++ b/src/coreclr/jit/unwindx86.cpp @@ -19,7 +19,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #error "This should be included only for x86" #endif // TARGET_X86 -#if defined(TARGET_UNIX) +#if defined(FEATURE_CFI_SUPPORT) short Compiler::mapRegNumToDwarfReg(regNumber reg) { short dwarfReg = DWARF_REG_ILLEGAL; @@ -28,7 +28,7 @@ short Compiler::mapRegNumToDwarfReg(regNumber reg) return dwarfReg; } -#endif // TARGET_UNIX +#endif // FEATURE_CFI_SUPPORT void Compiler::unwindBegProlog() { diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp index b8e7cfe7e6fdb7..673ee0c8c4176a 100644 --- a/src/coreclr/jit/utils.cpp +++ b/src/coreclr/jit/utils.cpp @@ -23,19 +23,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "opcode.h" -/*****************************************************************************/ -// Define the string platform name based on compilation #ifdefs. This is the -// same code for all platforms, hence it is here instead of in the targetXXX.cpp -// files. - -#ifdef TARGET_UNIX -// Should we distinguish Mac? Can we? -// Should we distinguish flavors of Unix? Can we? -const char* Target::g_tgtPlatformName = "Unix"; -#else // !TARGET_UNIX -const char* Target::g_tgtPlatformName = "Windows"; -#endif // !TARGET_UNIX - /*****************************************************************************/ #define DECLARE_DATA @@ -1927,7 +1914,7 @@ double FloatingPointUtils::convertUInt64ToDouble(unsigned __int64 uIntVal) uint64_t adjHex = 0x43F0000000000000UL; d = (double)s64 + *(double*)&adjHex; #else - d = (double)uIntVal; + d = (double)uIntVal; #endif } else @@ -1975,7 +1962,7 @@ unsigned __int64 FloatingPointUtils::convertDoubleToUInt64(double d) u64 = UINT64(INT64(d)); #else - u64 = UINT64(d); + u64 = UINT64(d); #endif // TARGET_XARCH return u64; diff --git a/src/coreclr/scripts/jitrollingbuild.py b/src/coreclr/scripts/jitrollingbuild.py index c5bf6d065b590e..0003805b49960d 100644 --- a/src/coreclr/scripts/jitrollingbuild.py +++ b/src/coreclr/scripts/jitrollingbuild.py @@ -309,8 +309,8 @@ def upload_blob(file, blob_name): # Next, look for any and all cross-compilation JITs. These are named, e.g.: # clrjit_unix_x64_x64.dll - # clrjit_win_arm_x64.dll - # clrjit_win_arm64_x64.dll + # clrjit_universal_arm_x64.dll + # clrjit_universal_arm64_x64.dll # and so on, and live in the same product directory as the primary JIT. # # Note that the expression below explicitly filters out the primary JIT since we added that above. diff --git a/src/coreclr/scripts/superpmi-replay.py b/src/coreclr/scripts/superpmi-replay.py index 5b10e569de8510..158c50162340c9 100644 --- a/src/coreclr/scripts/superpmi-replay.py +++ b/src/coreclr/scripts/superpmi-replay.py @@ -92,6 +92,7 @@ def main(main_args): os_name = "win" if platform_name.lower() == "windows" else "unix" arch_name = coreclr_args.arch host_arch_name = "x64" if arch_name.endswith("64") else "x86" + os_name = "universal" if arch_name.startswith("arm") else os_name jit_path = path.join(coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name)) print("Running superpmi.py download") diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py index cf7edff7801f32..4b502f0bffa4f5 100755 --- a/src/coreclr/scripts/superpmi.py +++ b/src/coreclr/scripts/superpmi.py @@ -254,7 +254,7 @@ superpmi_common_parser.add_argument("--skip_cleanup", action="store_true", help=skip_cleanup_help) superpmi_common_parser.add_argument("--sequential", action="store_true", help="Run SuperPMI in sequential mode. Default is to run in parallel for faster runs.") superpmi_common_parser.add_argument("-spmi_log_file", help=spmi_log_file_help) -superpmi_common_parser.add_argument("-jit_name", help="Specify the filename of the jit to use, e.g., 'clrjit_win_arm64_x64.dll'. Default is clrjit.dll/libclrjit.so") +superpmi_common_parser.add_argument("-jit_name", help="Specify the filename of the jit to use, e.g., 'clrjit_universal_arm64_x64.dll'. Default is clrjit.dll/libclrjit.so") superpmi_common_parser.add_argument("--altjit", action="store_true", help="Set the altjit variables on replay.") superpmi_common_parser.add_argument("-error_limit", help=error_limit_help) diff --git a/src/coreclr/scripts/superpmi_replay_setup.py b/src/coreclr/scripts/superpmi_replay_setup.py index 34cc8301fc5fd4..c6c6fcb507f892 100644 --- a/src/coreclr/scripts/superpmi_replay_setup.py +++ b/src/coreclr/scripts/superpmi_replay_setup.py @@ -94,7 +94,7 @@ def match_correlation_files(full_path): file_name = os.path.basename(full_path) if file_name.startswith("clrjit_") and file_name.endswith(".dll") and file_name.find( - "osx") == -1 and file_name.find("armel") == -1: + "osx") == -1: return True if file_name == "superpmi.exe" or file_name == "mcs.exe": diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 60455f67ca6dec..9ecec3bd9dd234 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -128,6 +128,12 @@ "clrjit_win_x86_arm64.dll", "clrjit_win_x86_x64.dll", "clrjit_win_x86_x86.dll", + "clrjit_universal_arm_arm.dll", + "clrjit_universal_arm_arm64.dll", + "clrjit_universal_arm_x64.dll", + "clrjit_universal_arm_x86.dll", + "clrjit_universal_arm64_arm64.dll", + "clrjit_universal_arm64_x64.dll", "coreclr.dll", "CoreConsole.exe", "coredistools.dll", diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs index ba2cc59af8bcd4..7bdfabc0f5c29f 100644 --- a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs +++ b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs @@ -140,6 +140,9 @@ private extern static CorJitResult JitCompileMethod(out IntPtr exception, [DllImport(JitSupportLibrary)] private extern static IntPtr AllocException([MarshalAs(UnmanagedType.LPWStr)]string message, int messageLength); + [DllImport(JitSupportLibrary)] + private extern static void JitSetOs(IntPtr jit, CORINFO_OS os); + private IntPtr AllocException(Exception ex) { _lastException = ExceptionDispatchInfo.Capture(ex); @@ -160,9 +163,10 @@ private IntPtr AllocException(Exception ex) [DllImport(JitSupportLibrary)] private extern static char* GetExceptionMessage(IntPtr obj); - public static void Startup() + public static void Startup(CORINFO_OS os) { jitStartup(GetJitHost(JitConfigProvider.Instance.UnmanagedInstance)); + JitSetOs(JitPointerAccessor.Get(), os); } public CorInfoImpl() @@ -2910,6 +2914,12 @@ private void ThrowExceptionForJitResult(HRESULT result) private void ThrowExceptionForHelper(ref CORINFO_HELPER_DESC throwHelper) { throw new NotImplementedException("ThrowExceptionForHelper"); } + public static CORINFO_OS TargetToOs(TargetDetails target) + { + return target.IsWindows ? CORINFO_OS.CORINFO_WINNT : + target.IsOSX ? CORINFO_OS.CORINFO_MACOS : CORINFO_OS.CORINFO_UNIX; + } + private void getEEInfo(ref CORINFO_EE_INFO pEEInfoOut) { pEEInfoOut = new CORINFO_EE_INFO(); @@ -2936,7 +2946,7 @@ private void getEEInfo(ref CORINFO_EE_INFO pEEInfoOut) new UIntPtr(32 * 1024 - 1) : new UIntPtr((uint)pEEInfoOut.osPageSize / 2 - 1); pEEInfoOut.targetAbi = TargetABI; - pEEInfoOut.osType = _compilation.NodeFactory.Target.IsWindows ? CORINFO_OS.CORINFO_WINNT : CORINFO_OS.CORINFO_UNIX; + pEEInfoOut.osType = TargetToOs(_compilation.NodeFactory.Target); } private char* getJitTimeLogFilename() diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs b/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs index 087ab9a5c2730c..4ace5372053a78 100644 --- a/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs +++ b/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs @@ -827,6 +827,7 @@ public enum CORINFO_OS { CORINFO_WINNT, CORINFO_UNIX, + CORINFO_MACOS, } public enum CORINFO_RUNTIME_ABI diff --git a/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs b/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs index 2dc973c709e8ae..0fad745ec6701e 100644 --- a/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs +++ b/src/coreclr/tools/Common/JitInterface/JitConfigProvider.cs @@ -62,7 +62,7 @@ public static void Initialize( return libHandle; }); - CorInfoImpl.Startup(); + CorInfoImpl.Startup(CorInfoImpl.TargetToOs(target)); } public IntPtr UnmanagedInstance @@ -141,9 +141,9 @@ private static string GetTargetSpec(TargetDetails target) _ => throw new NotImplementedException(target.Architecture.ToString()) }; - if ((target.Architecture == TargetArchitecture.ARM64) && (target.OperatingSystem == TargetOS.OSX)) + if ((target.Architecture == TargetArchitecture.ARM64) || (target.Architecture == TargetArchitecture.ARM)) { - targetOSComponent = "unix_osx"; + targetOSComponent = "universal"; } return targetOSComponent + '_' + targetArchComponent + "_" + RuntimeInformation.ProcessArchitecture.ToString().ToLowerInvariant(); diff --git a/src/coreclr/tools/aot/crossgen2/crossgen2.props b/src/coreclr/tools/aot/crossgen2/crossgen2.props index 7f38cdc728a750..e69b66ceecb3e2 100644 --- a/src/coreclr/tools/aot/crossgen2/crossgen2.props +++ b/src/coreclr/tools/aot/crossgen2/crossgen2.props @@ -82,17 +82,6 @@ /> - - - - - - setTargetOS(os); +} + DLL_EXPORT void JitProcessShutdownWork(ICorJitCompiler * pJit) { return pJit->ProcessShutdownWork(nullptr); diff --git a/src/coreclr/utilcode/winfix.cpp b/src/coreclr/utilcode/winfix.cpp index acc91c6d5ba382..d572c8edb26ef6 100644 --- a/src/coreclr/utilcode/winfix.cpp +++ b/src/coreclr/utilcode/winfix.cpp @@ -181,7 +181,7 @@ WszCreateProcess( return fResult; } -#ifndef TARGET_UNIX +#ifndef HOST_UNIX #include "psapi.h" @@ -290,11 +290,11 @@ HRESULT SetThreadName(HANDLE hThread, PCWSTR lpThreadDescription) return g_pfnSetThreadDescription(hThread, lpThreadDescription); } -#else //!TARGET_UNIX +#else //!HOST_UNIX HRESULT SetThreadName(HANDLE hThread, PCWSTR lpThreadDescription) { return SetThreadDescription(hThread, lpThreadDescription); } -#endif //!TARGET_UNIX +#endif //!HOST_UNIX diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 4e8117c7b7c6f6..f95419134084bc 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -1566,6 +1566,8 @@ static bool ValidateJitName(LPCWSTR pwzJitName) return true; } +CORINFO_OS getClrVmOs(); + // LoadAndInitializeJIT: load the JIT dll into the process, and initialize it (call the UtilCode initialization function, // check the JIT-EE interface GUID, etc.) // @@ -1673,6 +1675,9 @@ static void LoadAndInitializeJIT(LPCWSTR pwzJitName, OUT HINSTANCE* phJit, OUT I { pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_VERSION_CHECK; + // Specify to the JIT that it is working with the OS that we are compiled against + pICorJitCompiler->setTargetOS(getClrVmOs()); + // The JIT has loaded and passed the version identifier test, so publish the JIT interface to the caller. *ppICorJitCompiler = pICorJitCompiler; diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index e2b3089826ed08..71f197444c9b69 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -9911,6 +9911,17 @@ void InlinedCallFrame::GetEEInfo(CORINFO_EE_INFO::InlinedCallFrameInfo *pInfo) #endif // TARGET_ARM } +CORINFO_OS getClrVmOs() +{ +#ifdef TARGET_OSX + return CORINFO_MACOS; +#elif defined(TARGET_UNIX) + return CORINFO_UNIX; +#else + return CORINFO_WINNT; +#endif +} + /*********************************************************************/ // Return details about EE internal data structures void CEEInfo::getEEInfo(CORINFO_EE_INFO *pEEInfoOut) @@ -9956,12 +9967,7 @@ void CEEInfo::getEEInfo(CORINFO_EE_INFO *pEEInfoOut) pEEInfoOut->osPageSize = GetOsPageSize(); pEEInfoOut->maxUncheckedOffsetForNullObject = MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT; pEEInfoOut->targetAbi = CORINFO_CORECLR_ABI; - -#ifdef TARGET_UNIX - pEEInfoOut->osType = CORINFO_UNIX; -#else - pEEInfoOut->osType = CORINFO_WINNT; -#endif + pEEInfoOut->osType = getClrVmOs(); EE_TO_JIT_TRANSITION(); } diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj index a463c32e904a5f..81990fdeea9388 100644 --- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj +++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj @@ -37,13 +37,10 @@ - - + - - - +