diff --git a/src/coreclr/src/inc/corinfo.h b/src/coreclr/src/inc/corinfo.h
index fa9db2c759394..8105e520fc8b8 100644
--- a/src/coreclr/src/inc/corinfo.h
+++ b/src/coreclr/src/inc/corinfo.h
@@ -733,17 +733,25 @@ inline bool IsCallerPop(CorInfoCallConv callConv)
}
#endif // UNIX_X86_ABI
+// Represents the calling conventions supported with the extensible calling convention syntax
+// as well as the original metadata-encoded calling conventions.
enum CorInfoUnmanagedCallConv
{
// These correspond to CorUnmanagedCallingConvention
-
CORINFO_UNMANAGED_CALLCONV_UNKNOWN,
CORINFO_UNMANAGED_CALLCONV_C,
CORINFO_UNMANAGED_CALLCONV_STDCALL,
CORINFO_UNMANAGED_CALLCONV_THISCALL,
CORINFO_UNMANAGED_CALLCONV_FASTCALL
+ // New calling conventions supported with the extensible calling convention encoding go here.
};
+// Determines whether or not this calling convention is an instance method calling convention.
+inline bool callConvIsInstanceMethodCallConv(CorInfoUnmanagedCallConv callConv)
+{
+ return callConv == CORINFO_UNMANAGED_CALLCONV_THISCALL;
+}
+
// These are returned from getMethodOptions
enum CorInfoOptions
{
diff --git a/src/coreclr/src/jit/codegencommon.cpp b/src/coreclr/src/jit/codegencommon.cpp
index 1cbf0aa7232f3..3613312f75fcb 100644
--- a/src/coreclr/src/jit/codegencommon.cpp
+++ b/src/coreclr/src/jit/codegencommon.cpp
@@ -10070,7 +10070,7 @@ void CodeGen::genVzeroupperIfNeeded(bool check256bitOnly /* = true*/)
// Return Value:
// true if type is returned in multiple registers, false otherwise.
//
-bool Compiler::IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass)
+bool Compiler::IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv)
{
if (hClass == NO_CLASS_HANDLE)
{
@@ -10078,7 +10078,7 @@ bool Compiler::IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass)
}
structPassingKind howToReturnStruct;
- var_types returnType = getReturnTypeForStruct(hClass, &howToReturnStruct);
+ var_types returnType = getReturnTypeForStruct(hClass, callConv, &howToReturnStruct);
#ifdef TARGET_ARM64
return (varTypeIsStruct(returnType) && (howToReturnStruct != SPK_PrimitiveType));
@@ -11534,7 +11534,11 @@ void CodeGen::genReturn(GenTree* treeNode)
}
else // we must have a struct return type
{
- retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass);
+ CorInfoCallConvExtension callConv =
+ compiler->compMethodInfoGetEntrypointCallConv(compiler->info.compMethodInfo);
+
+ retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass,
+ callConv);
}
regCount = retTypeDesc.GetReturnRegCount();
}
@@ -11659,7 +11663,9 @@ void CodeGen::genStructReturn(GenTree* treeNode)
if (actualOp1->OperIs(GT_LCL_VAR))
{
varDsc = compiler->lvaGetDesc(actualOp1->AsLclVar()->GetLclNum());
- retTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd());
+ retTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd(),
+ compiler->compMethodInfoGetEntrypointCallConv(
+ compiler->info.compMethodInfo));
assert(varDsc->lvIsMultiRegRet);
}
else
@@ -11839,7 +11845,8 @@ void CodeGen::genMultiRegStoreToLocal(GenTreeLclVar* lclNode)
hasRegs = true;
if (varReg != reg)
{
- inst_RV_RV(ins_Copy(type), varReg, reg, type);
+ // We may need a cross register-file copy here.
+ inst_RV_RV(ins_Copy(reg, type), varReg, reg, type);
}
fieldVarDsc->SetRegNum(varReg);
}
diff --git a/src/coreclr/src/jit/codegenxarch.cpp b/src/coreclr/src/jit/codegenxarch.cpp
index 6828c99cced12..6f1ba9ab9bb8b 100644
--- a/src/coreclr/src/jit/codegenxarch.cpp
+++ b/src/coreclr/src/jit/codegenxarch.cpp
@@ -140,7 +140,9 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg)
}
else // we must have a struct return type
{
- retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass);
+ retTypeDesc.InitializeStructReturnType(compiler, compiler->info.compMethodInfo->args.retTypeClass,
+ compiler->compMethodInfoGetEntrypointCallConv(
+ compiler->info.compMethodInfo));
}
const unsigned regCount = retTypeDesc.GetReturnRegCount();
@@ -5688,12 +5690,29 @@ void CodeGen::genJmpMethod(GenTree* jmp)
#endif // !defined(UNIX_AMD64_ABI)
{
// Register argument
+ CLANG_FORMAT_COMMENT_ANCHOR;
+#ifdef TARGET_X86
+ noway_assert(
+ isRegParamType(genActualType(varDsc->TypeGet())) ||
+ (varTypeIsStruct(varDsc->TypeGet()) && compiler->isTrivialPointerSizedStruct(varDsc->GetStructHnd())));
+#else
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
+#endif // TARGET_X86
// Is register argument already in the right register?
// If not load it from its stack location.
var_types loadType = varDsc->lvaArgType();
- regNumber argReg = varDsc->GetArgReg(); // incoming arg register
+
+#ifdef TARGET_X86
+ if (varTypeIsStruct(varDsc->TypeGet()))
+ {
+ // Treat trivial pointer-sized structs as a pointer sized primitive
+ // for the purposes of registers.
+ loadType = TYP_I_IMPL;
+ }
+#endif
+
+ regNumber argReg = varDsc->GetArgReg(); // incoming arg register
if (varDsc->GetRegNum() != argReg)
{
diff --git a/src/coreclr/src/jit/compiler.cpp b/src/coreclr/src/jit/compiler.cpp
index 3a6ea5006b268..1b76513211484 100644
--- a/src/coreclr/src/jit/compiler.cpp
+++ b/src/coreclr/src/jit/compiler.cpp
@@ -511,6 +511,55 @@ bool Compiler::isSingleFloat32Struct(CORINFO_CLASS_HANDLE clsHnd)
}
#endif // ARM_SOFTFP
+#ifdef TARGET_X86
+//---------------------------------------------------------------------------
+// isTrivialPointerSizedStruct:
+// Check if the given struct type contains only one pointer-sized integer value type
+//
+// Arguments:
+// clsHnd - the handle for the struct type.
+//
+// Return Value:
+// true if the given struct type contains only one pointer-sized integer value type,
+// false otherwise.
+//
+bool Compiler::isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const
+{
+ assert(info.compCompHnd->isValueClass(clsHnd));
+ if (info.compCompHnd->getClassSize(clsHnd) != TARGET_POINTER_SIZE)
+ {
+ return false;
+ }
+ for (;;)
+ {
+ // all of class chain must be of value type and must have only one field
+ if (!info.compCompHnd->isValueClass(clsHnd) || info.compCompHnd->getClassNumInstanceFields(clsHnd) != 1)
+ {
+ return false;
+ }
+
+ CORINFO_CLASS_HANDLE* pClsHnd = &clsHnd;
+ CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
+ CorInfoType fieldType = info.compCompHnd->getFieldType(fldHnd, pClsHnd);
+
+ var_types vt = JITtype2varType(fieldType);
+
+ if (fieldType == CORINFO_TYPE_VALUECLASS)
+ {
+ clsHnd = *pClsHnd;
+ }
+ else if (varTypeIsI(vt) && !varTypeIsGC(vt))
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+}
+#endif // TARGET_X86
+
//-----------------------------------------------------------------------------
// getPrimitiveTypeForStruct:
// Get the "primitive" type that is is used for a struct
@@ -692,7 +741,7 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
assert(structSize != 0);
// Determine if we can pass the struct as a primitive type.
-// Note that on x86 we never pass structs as primitive types (unless the VM unwraps them for us).
+// Note that on x86 we only pass specific pointer-sized structs that satisfy isTrivialPointerSizedStruct checks.
#ifndef TARGET_X86
#ifdef UNIX_AMD64_ABI
@@ -727,7 +776,11 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// and also examine the clsHnd to see if it is an HFA of count one
useType = getPrimitiveTypeForStruct(structSize, clsHnd, isVarArg);
}
-
+#else
+ if (isTrivialPointerSizedStruct(clsHnd))
+ {
+ useType = TYP_I_IMPL;
+ }
#endif // !TARGET_X86
// Did we change this struct type into a simple "primitive" type?
@@ -875,6 +928,8 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
//
// Arguments:
// clsHnd - the handle for the struct type
+// callConv - the calling convention of the function
+// that returns this struct.
// wbReturnStruct - An "out" argument with information about how
// the struct is to be returned
// structSize - the size of the struct type,
@@ -909,9 +964,10 @@ var_types Compiler::getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// Whenever this method's return value is TYP_STRUCT it always means
// that multiple registers are used to return this struct.
//
-var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
- structPassingKind* wbReturnStruct /* = nullptr */,
- unsigned structSize /* = 0 */)
+var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
+ CorInfoCallConvExtension callConv,
+ structPassingKind* wbReturnStruct /* = nullptr */,
+ unsigned structSize /* = 0 */)
{
var_types useType = TYP_UNKNOWN;
structPassingKind howToReturnStruct = SPK_Unknown; // We must change this before we return
@@ -949,9 +1005,28 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
{
// Return classification is not always size based...
canReturnInRegister = structDesc.passedInRegisters;
+ if (!canReturnInRegister)
+ {
+ assert(structDesc.eightByteCount == 0);
+ howToReturnStruct = SPK_ByReference;
+ useType = TYP_UNKNOWN;
+ }
}
-
-#endif // UNIX_AMD64_ABI
+#elif UNIX_X86_ABI
+ if (callConv != CorInfoCallConvExtension::Managed)
+ {
+ canReturnInRegister = false;
+ howToReturnStruct = SPK_ByReference;
+ useType = TYP_UNKNOWN;
+ }
+#elif defined(TARGET_WINDOWS) && !defined(TARGET_ARM)
+ if (callConvIsInstanceMethodCallConv(callConv))
+ {
+ canReturnInRegister = false;
+ howToReturnStruct = SPK_ByReference;
+ useType = TYP_UNKNOWN;
+ }
+#endif
// Check for cases where a small struct is returned in a register
// via a primitive type.
@@ -1007,7 +1082,7 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
// If so, we should have already set howToReturnStruct, too.
assert(howToReturnStruct != SPK_Unknown);
}
- else // We can't replace the struct with a "primitive" type
+ else if (canReturnInRegister) // We can't replace the struct with a "primitive" type
{
// See if we can return this struct by value, possibly in multiple registers
// or if we should return it using a return buffer register
@@ -1030,24 +1105,13 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
#ifdef UNIX_AMD64_ABI
- // The case of (structDesc.eightByteCount == 1) should have already been handled
- if (structDesc.eightByteCount > 1)
- {
- // setup wbPassType and useType indicate that this is returned by value in multiple registers
- howToReturnStruct = SPK_ByValue;
- useType = TYP_STRUCT;
- assert(structDesc.passedInRegisters == true);
- }
- else
- {
- assert(structDesc.eightByteCount == 0);
- // Otherwise we return this struct using a return buffer
- // setup wbPassType and useType indicate that this is return using a return buffer register
- // (reference to a return buffer)
- howToReturnStruct = SPK_ByReference;
- useType = TYP_UNKNOWN;
- assert(structDesc.passedInRegisters == false);
- }
+ // The cases of (structDesc.eightByteCount == 1) and (structDesc.eightByteCount == 0)
+ // should have already been handled
+ assert(structDesc.eightByteCount > 1);
+ // setup wbPassType and useType indicate that this is returned by value in multiple registers
+ howToReturnStruct = SPK_ByValue;
+ useType = TYP_STRUCT;
+ assert(structDesc.passedInRegisters == true);
#elif defined(TARGET_ARM64)
@@ -1070,8 +1134,26 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
howToReturnStruct = SPK_ByReference;
useType = TYP_UNKNOWN;
}
+#elif defined(TARGET_X86)
-#elif defined(TARGET_ARM) || defined(TARGET_X86)
+ // Only 8-byte structs are return in multiple registers.
+ // We also only support multireg struct returns on x86 to match the native calling convention.
+ // So return 8-byte structs only when the calling convention is a native calling convention.
+ if (structSize == MAX_RET_MULTIREG_BYTES && callConv != CorInfoCallConvExtension::Managed)
+ {
+ // setup wbPassType and useType indicate that this is return by value in multiple registers
+ howToReturnStruct = SPK_ByValue;
+ useType = TYP_STRUCT;
+ }
+ else
+ {
+ // Otherwise we return this struct using a return buffer
+ // setup wbPassType and useType indicate that this is returned using a return buffer register
+ // (reference to a return buffer)
+ howToReturnStruct = SPK_ByReference;
+ useType = TYP_UNKNOWN;
+ }
+#elif defined(TARGET_ARM)
// Otherwise we return this struct using a return buffer
// setup wbPassType and useType indicate that this is returned using a return buffer register
@@ -1973,6 +2055,22 @@ unsigned Compiler::compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd)
return sigSize;
}
+CorInfoCallConvExtension Compiler::compMethodInfoGetEntrypointCallConv(CORINFO_METHOD_INFO* mthInfo)
+{
+ CorInfoCallConv callConv = mthInfo->args.getCallConv();
+ if (callConv == CORINFO_CALLCONV_DEFAULT || callConv == CORINFO_CALLCONV_VARARG)
+ {
+ // Both the default and the varargs calling conventions represent a managed callconv.
+ return CorInfoCallConvExtension::Managed;
+ }
+
+ static_assert_no_msg((unsigned)CorInfoCallConvExtension::C == (unsigned)CORINFO_CALLCONV_C);
+ static_assert_no_msg((unsigned)CorInfoCallConvExtension::Stdcall == (unsigned)CORINFO_CALLCONV_STDCALL);
+ static_assert_no_msg((unsigned)CorInfoCallConvExtension::Thiscall == (unsigned)CORINFO_CALLCONV_THISCALL);
+
+ return (CorInfoCallConvExtension)callConv;
+}
+
#ifdef DEBUG
static bool DidComponentUnitTests = false;
@@ -2207,7 +2305,7 @@ void Compiler::compSetProcessor()
#elif defined(TARGET_ARM64)
info.genCPU = CPU_ARM64;
#elif defined(TARGET_AMD64)
- info.genCPU = CPU_X64;
+ info.genCPU = CPU_X64;
#elif defined(TARGET_X86)
if (jitFlags.IsSet(JitFlags::JIT_FLAG_TARGET_P4))
info.genCPU = CPU_X86_PENTIUM_4;
@@ -6015,6 +6113,9 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
case CORINFO_CALLCONV_NATIVEVARARG:
info.compIsVarArgs = true;
break;
+ case CORINFO_CALLCONV_C:
+ case CORINFO_CALLCONV_STDCALL:
+ case CORINFO_CALLCONV_THISCALL:
case CORINFO_CALLCONV_DEFAULT:
info.compIsVarArgs = false;
break;
diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h
index 15e7db0570df8..1bf93fbce8468 100644
--- a/src/coreclr/src/jit/compiler.h
+++ b/src/coreclr/src/jit/compiler.h
@@ -2249,7 +2249,8 @@ class Compiler
#endif
#if FEATURE_MULTIREG_RET
- GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass);
+ GenTree* impAssignMultiRegTypeToVar(GenTree* op,
+ CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv));
#endif // FEATURE_MULTIREG_RET
GenTree* impAssignSmallStructTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass);
@@ -2258,6 +2259,10 @@ class Compiler
bool isSingleFloat32Struct(CORINFO_CLASS_HANDLE hClass);
#endif // ARM_SOFTFP
+#ifdef TARGET_X86
+ bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const;
+#endif // TARGET_X86
+
//-------------------------------------------------------------------------
// Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64.
// HFAs are one to four element structs where each element is the same
@@ -2276,7 +2281,7 @@ class Compiler
var_types GetHfaType(CORINFO_CLASS_HANDLE hClass);
unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass);
- bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass);
+ bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv);
//-------------------------------------------------------------------------
// The following is used for validating format of EH table
@@ -3433,8 +3438,8 @@ class Compiler
void lvaInitArgs(InitVarDscInfo* varDscInfo);
void lvaInitThisPtr(InitVarDscInfo* varDscInfo);
- void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo);
- void lvaInitUserArgs(InitVarDscInfo* varDscInfo);
+ void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg);
+ void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs);
void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo);
void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo);
@@ -3846,7 +3851,9 @@ class Compiler
GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd);
- GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd);
+ GenTree* impFixupStructReturnType(GenTree* op,
+ CORINFO_CLASS_HANDLE retClsHnd,
+ CorInfoCallConvExtension unmgdCallConv);
#ifdef DEBUG
var_types impImportJitTestLabelMark(int numArgs);
@@ -4078,8 +4085,12 @@ class Compiler
GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass);
- bool VarTypeIsMultiByteAndCanEnreg(
- var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg);
+ bool VarTypeIsMultiByteAndCanEnreg(var_types type,
+ CORINFO_CLASS_HANDLE typeClass,
+ unsigned* typeSize,
+ bool forReturn,
+ bool isVarArg,
+ CorInfoCallConvExtension callConv);
bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName);
bool IsTargetIntrinsic(NamedIntrinsic intrinsicName);
@@ -4385,10 +4396,12 @@ class Compiler
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo);
- bool impTailCallRetTypeCompatible(var_types callerRetType,
- CORINFO_CLASS_HANDLE callerRetTypeClass,
- var_types calleeRetType,
- CORINFO_CLASS_HANDLE calleeRetTypeClass);
+ bool impTailCallRetTypeCompatible(var_types callerRetType,
+ CORINFO_CLASS_HANDLE callerRetTypeClass,
+ CorInfoCallConvExtension callerCallConv,
+ var_types calleeRetType,
+ CORINFO_CLASS_HANDLE calleeRetTypeClass,
+ CorInfoCallConvExtension calleeCallConv);
bool impIsTailCallILPattern(
bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive);
@@ -5057,9 +5070,10 @@ class Compiler
// Get the type that is used to return values of the given struct type.
// If the size is unknown, pass 0 and it will be determined from 'clsHnd'.
- var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
- structPassingKind* wbPassStruct = nullptr,
- unsigned structSize = 0);
+ var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
+ CorInfoCallConvExtension callConv,
+ structPassingKind* wbPassStruct = nullptr,
+ unsigned structSize = 0);
#ifdef DEBUG
// Print a representation of "vnp" or "vn" on standard output.
@@ -9336,24 +9350,36 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// There are cases where implicit RetBuf argument should be explicitly returned in a register.
// In such cases the return type is changed to TYP_BYREF and appropriate IR is generated.
// These cases are:
- // 1. Profiler Leave calllback expects the address of retbuf as return value for
+ CLANG_FORMAT_COMMENT_ANCHOR;
+#ifdef TARGET_AMD64
+ // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by
+ // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
+ // returning the address of RetBuf.
+ return (info.compRetBuffArg != BAD_VAR_NUM);
+#else // TARGET_AMD64
+#ifdef PROFILING_SUPPORTED
+ // 2. Profiler Leave callback expects the address of retbuf as return value for
// methods with hidden RetBuf argument. impReturnInstruction() when profiler
// callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for
// methods with hidden RetBufArg.
- //
- // 2. As per the System V ABI, the address of RetBuf needs to be returned by
- // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF,
- // returning the address of RetBuf.
- //
- // 3. Windows 64-bit native calling convention also requires the address of RetBuff
- // to be returned in RAX.
+ if (compIsProfilerHookNeeded())
+ {
+ return (info.compRetBuffArg != BAD_VAR_NUM);
+ }
+#endif
+ // 3. Windows ARM64 native instance calling convention requires the address of RetBuff
+ // to be returned in x0.
CLANG_FORMAT_COMMENT_ANCHOR;
+#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64)
+ auto callConv = compMethodInfoGetEntrypointCallConv(info.compMethodInfo);
+ if (callConvIsInstanceMethodCallConv(callConv))
+ {
+ return (info.compRetBuffArg != BAD_VAR_NUM);
+ }
+#endif // TARGET_WINDOWS && TARGET_ARM64
-#ifdef TARGET_AMD64
- return (info.compRetBuffArg != BAD_VAR_NUM);
-#else // !TARGET_AMD64
- return (compIsProfilerHookNeeded()) && (info.compRetBuffArg != BAD_VAR_NUM);
-#endif // !TARGET_AMD64
+ return false;
+#endif // TARGET_AMD64
}
bool compDoOldStructRetyping()
@@ -9368,8 +9394,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
- // On x86 only 64-bit longs are returned in multiple registers
- return varTypeIsLong(info.compRetNativeType);
+ // On x86, 64-bit longs and structs are returned in multiple registers
+ return varTypeIsLong(info.compRetNativeType) ||
+ (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
// On all other targets that support multireg return values:
// Methods returning a struct in multiple registers have a return value of TYP_STRUCT.
@@ -9393,8 +9420,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
{
#if FEATURE_MULTIREG_RET
#if defined(TARGET_X86)
- // On x86 only 64-bit longs are returned in multiple registers
- return varTypeIsLong(info.compRetNativeType);
+ // On x86, 64-bit longs and structs are returned in multiple registers
+ return varTypeIsLong(info.compRetNativeType) ||
+ (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM));
#else // targets: X64-UNIX, ARM64 or ARM32
#if defined(TARGET_ARM64)
// TYP_SIMD* are returned in one register.
@@ -9549,6 +9577,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// size of the type these describe.
unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd);
+ // Gets the calling convention the method's entry point should have.
+ CorInfoCallConvExtension compMethodInfoGetEntrypointCallConv(CORINFO_METHOD_INFO* mthInfo);
+
#ifdef DEBUG
// Components used by the compiler may write unit test suites, and
// have them run within this method. They will be run only once per process, and only
diff --git a/src/coreclr/src/jit/compiler.hpp b/src/coreclr/src/jit/compiler.hpp
index 8d3207c2112ee..972a66052e202 100644
--- a/src/coreclr/src/jit/compiler.hpp
+++ b/src/coreclr/src/jit/compiler.hpp
@@ -692,9 +692,14 @@ inline bool isRegParamType(var_types type)
// isVarArg - whether or not this is a vararg fixed arg or variable argument
// - if so on arm64 windows getArgTypeForStruct will ignore HFA
// - types
+// callConv - the calling convention of the call
//
-inline bool Compiler::VarTypeIsMultiByteAndCanEnreg(
- var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg)
+inline bool Compiler::VarTypeIsMultiByteAndCanEnreg(var_types type,
+ CORINFO_CLASS_HANDLE typeClass,
+ unsigned* typeSize,
+ bool forReturn,
+ bool isVarArg,
+ CorInfoCallConvExtension callConv)
{
bool result = false;
unsigned size = 0;
@@ -706,7 +711,7 @@ inline bool Compiler::VarTypeIsMultiByteAndCanEnreg(
if (forReturn)
{
structPassingKind howToReturnStruct;
- type = getReturnTypeForStruct(typeClass, &howToReturnStruct, size);
+ type = getReturnTypeForStruct(typeClass, callConv, &howToReturnStruct, size);
}
else
{
diff --git a/src/coreclr/src/jit/flowgraph.cpp b/src/coreclr/src/jit/flowgraph.cpp
index 7dc9865f16d9a..c1a5f3c602b8a 100644
--- a/src/coreclr/src/jit/flowgraph.cpp
+++ b/src/coreclr/src/jit/flowgraph.cpp
@@ -23087,8 +23087,9 @@ Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTr
if (retClsHnd != NO_CLASS_HANDLE)
{
structPassingKind howToReturnStruct;
- var_types returnType = comp->getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
- GenTree* parent = data->parent;
+ var_types returnType =
+ comp->getReturnTypeForStruct(retClsHnd, CorInfoCallConvExtension::Managed, &howToReturnStruct);
+ GenTree* parent = data->parent;
switch (howToReturnStruct)
{
@@ -23201,7 +23202,8 @@ Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTr
GenTree* effectiveValue = value->gtEffectiveVal(/*commaOnly*/ true);
noway_assert(!varTypeIsStruct(effectiveValue) || (effectiveValue->OperGet() != GT_RET_EXPR) ||
- !comp->IsMultiRegReturnedType(effectiveValue->AsRetExpr()->gtRetClsHnd));
+ !comp->IsMultiRegReturnedType(effectiveValue->AsRetExpr()->gtRetClsHnd,
+ CorInfoCallConvExtension::Managed));
}
}
diff --git a/src/coreclr/src/jit/gcencode.cpp b/src/coreclr/src/jit/gcencode.cpp
index d315ef9a430db..0147932eb1712 100644
--- a/src/coreclr/src/jit/gcencode.cpp
+++ b/src/coreclr/src/jit/gcencode.cpp
@@ -50,7 +50,9 @@ ReturnKind GCInfo::getReturnKind()
case TYP_STRUCT:
{
CORINFO_CLASS_HANDLE structType = compiler->info.compMethodInfo->args.retTypeClass;
- var_types retType = compiler->getReturnTypeForStruct(structType);
+ var_types retType =
+ compiler->getReturnTypeForStruct(structType, compiler->compMethodInfoGetEntrypointCallConv(
+ compiler->info.compMethodInfo));
switch (retType)
{
diff --git a/src/coreclr/src/jit/gentree.cpp b/src/coreclr/src/jit/gentree.cpp
index a55d157714c7e..d850e56af9a28 100644
--- a/src/coreclr/src/jit/gentree.cpp
+++ b/src/coreclr/src/jit/gentree.cpp
@@ -15655,7 +15655,7 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr,
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(call))
{
- call->InitializeStructReturnType(this, structType);
+ call->InitializeStructReturnType(this, structType, call->GetUnmanagedCallConv());
}
#endif // FEATURE_MULTIREG_RET
@@ -19152,7 +19152,9 @@ GenTree* Compiler::gtNewMustThrowException(unsigned helper, var_types type, CORI
// Return Value
// None
//
-void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd)
+void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp,
+ CORINFO_CLASS_HANDLE retClsHnd,
+ CorInfoCallConvExtension callConv)
{
assert(!m_inited);
@@ -19162,7 +19164,7 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
Compiler::structPassingKind howToReturnStruct;
- var_types returnType = comp->getReturnTypeForStruct(retClsHnd, &howToReturnStruct, structSize);
+ var_types returnType = comp->getReturnTypeForStruct(retClsHnd, callConv, &howToReturnStruct, structSize);
switch (howToReturnStruct)
{
@@ -19234,6 +19236,18 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HA
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
+#elif defined(TARGET_X86)
+
+ // an 8-byte struct returned using two registers
+ assert(structSize == 8);
+
+ BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
+ comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
+ for (unsigned i = 0; i < 2; ++i)
+ {
+ m_regType[i] = comp->getJitGCType(gcPtrs[i]);
+ }
+
#else // TARGET_XXX
// This target needs support here!
diff --git a/src/coreclr/src/jit/gentree.h b/src/coreclr/src/jit/gentree.h
index 7391a8be15f4f..334a62f537449 100644
--- a/src/coreclr/src/jit/gentree.h
+++ b/src/coreclr/src/jit/gentree.h
@@ -3637,7 +3637,7 @@ struct ReturnTypeDesc
}
// Initialize the Return Type Descriptor for a method that returns a struct type
- void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd);
+ void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
@@ -3954,7 +3954,11 @@ struct GenTreeCall final : public GenTree
CORINFO_SIG_INFO* callSig;
#endif
- TailCallSiteInfo* tailCallInfo;
+ union {
+ TailCallSiteInfo* tailCallInfo;
+ // Only used for unmanaged calls, which cannot be tail-called
+ CorInfoCallConvExtension unmgdCallConv;
+ };
#if FEATURE_MULTIREG_RET
@@ -3997,10 +4001,10 @@ struct GenTreeCall final : public GenTree
#endif
}
- void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd)
+ void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
- gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd);
+ gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
@@ -4162,7 +4166,7 @@ struct GenTreeCall final : public GenTree
// importer has performed tail call checks
#define GTF_CALL_M_TAILCALL 0x00000002 // GT_CALL -- the call is a tailcall
#define GTF_CALL_M_VARARGS 0x00000004 // GT_CALL -- the call uses varargs ABI
-#define GTF_CALL_M_RETBUFFARG 0x00000008 // GT_CALL -- first parameter is the return buffer argument
+#define GTF_CALL_M_RETBUFFARG 0x00000008 // GT_CALL -- call has a return buffer argument
#define GTF_CALL_M_DELEGATE_INV 0x00000010 // GT_CALL -- call to Delegate.Invoke
#define GTF_CALL_M_NOGCCHECK 0x00000020 // GT_CALL -- not a call for computing full interruptability and therefore no GC check is required.
#define GTF_CALL_M_SPECIAL_INTRINSIC 0x00000040 // GT_CALL -- function that could be optimized as an intrinsic
@@ -4277,6 +4281,15 @@ struct GenTreeCall final : public GenTree
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
+ bool HasFixedRetBufArg() const
+ {
+#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM)
+ return hasFixedRetBuffReg() && HasRetBufArg() && !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
+#else
+ return hasFixedRetBuffReg() && HasRetBufArg();
+#endif
+ }
+
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
@@ -4561,6 +4574,11 @@ struct GenTreeCall final : public GenTree
bool AreArgsComplete() const;
+ CorInfoCallConvExtension GetUnmanagedCallConv() const
+ {
+ return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
+ }
+
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp
index fb9e10c6a9809..88e60351aaae6 100644
--- a/src/coreclr/src/jit/importer.cpp
+++ b/src/coreclr/src/jit/importer.cpp
@@ -1299,12 +1299,50 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
if (src->gtOper == GT_CALL)
{
- if (src->AsCall()->TreatAsHasRetBufArg(this))
+ GenTreeCall* srcCall = src->AsCall();
+ if (srcCall->TreatAsHasRetBufArg(this))
{
// Case of call returning a struct via hidden retbuf arg
+ CLANG_FORMAT_COMMENT_ANCHOR;
- // insert the return value buffer into the argument list as first byref parameter
- src->AsCall()->gtCallArgs = gtPrependNewCallArg(destAddr, src->AsCall()->gtCallArgs);
+#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM)
+ // Unmanaged instance methods on Windows need the retbuf arg after the first (this) parameter
+ if (srcCall->IsUnmanaged())
+ {
+ if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv()))
+ {
+ GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs);
+ }
+ else
+ {
+#ifdef TARGET_X86
+ // The argument list has already been reversed.
+ // Insert the return buffer as the last node so it will be pushed on to the stack last
+ // as required by the native ABI.
+ assert(srcCall->gtCallType == CT_INDIRECT);
+ GenTreeCall::Use* lastArg = srcCall->gtCallArgs;
+ if (lastArg == nullptr)
+ {
+ srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
+ }
+ else
+ {
+ for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext())
+ ;
+ gtInsertNewCallArgAfter(destAddr, lastArg);
+ }
+#else
+ // insert the return value buffer into the argument list as first byref parameter
+ srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
+#endif
+ }
+ }
+ else
+#endif
+ {
+ // insert the return value buffer into the argument list as first byref parameter
+ srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs);
+ }
// now returns void, not a struct
src->gtType = TYP_VOID;
@@ -1316,7 +1354,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
{
// Case of call returning a struct in one or more registers.
- var_types returnType = (var_types)src->AsCall()->gtReturnType;
+ var_types returnType = (var_types)srcCall->gtReturnType;
if (compDoOldStructRetyping())
{
@@ -7083,7 +7121,12 @@ void Compiler::impCheckForPInvokeCall(
JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
+ static_assert_no_msg((unsigned)CorInfoCallConvExtension::C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
+ static_assert_no_msg((unsigned)CorInfoCallConvExtension::Stdcall == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
+ static_assert_no_msg((unsigned)CorInfoCallConvExtension::Thiscall == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
+
call->gtFlags |= GTF_CALL_UNMANAGED;
+ call->unmgdCallConv = CorInfoCallConvExtension(unmanagedCallConv);
if (!call->IsSuppressGCTransition())
{
info.compUnmanagedCallCountWithGCTransition++;
@@ -7629,10 +7672,12 @@ void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
// so that callee can be tail called. Note that here we don't check
// compatibility in IL Verifier sense, but on the lines of return type
// sizes are equal and get returned in the same return register.
-bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
- CORINFO_CLASS_HANDLE callerRetTypeClass,
- var_types calleeRetType,
- CORINFO_CLASS_HANDLE calleeRetTypeClass)
+bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
+ CORINFO_CLASS_HANDLE callerRetTypeClass,
+ CorInfoCallConvExtension callerCallConv,
+ var_types calleeRetType,
+ CORINFO_CLASS_HANDLE calleeRetTypeClass,
+ CorInfoCallConvExtension calleeCallConv)
{
// Note that we can not relax this condition with genActualType() as the
// calling convention dictates that the caller of a function with a small
@@ -7669,10 +7714,10 @@ bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
// trust code can make those tail calls.
unsigned callerRetTypeSize = 0;
unsigned calleeRetTypeSize = 0;
- bool isCallerRetTypMBEnreg =
- VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs);
- bool isCalleeRetTypMBEnreg =
- VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs);
+ bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize,
+ true, info.compIsVarArgs, callerCallConv);
+ bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize,
+ true, info.compIsVarArgs, calleeCallConv);
if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
{
@@ -8868,8 +8913,9 @@ var_types Compiler::impImportCall(OPCODE opcode,
// a small-typed return value is responsible for normalizing the return val
if (canTailCall &&
- !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
- sig->retTypeClass))
+ !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass,
+ compMethodInfoGetEntrypointCallConv(info.compMethodInfo), callRetTyp,
+ sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv()))
{
canTailCall = false;
szCanTailCallFailReason = "Return types are not tail call compatible";
@@ -9219,10 +9265,11 @@ bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
{
// We have some kind of STRUCT being returned
-
structPassingKind howToReturnStruct = SPK_Unknown;
- var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
+ var_types returnType =
+ getReturnTypeForStruct(methInfo->args.retTypeClass, compMethodInfoGetEntrypointCallConv(methInfo),
+ &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
@@ -9313,7 +9360,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
call->gtRetClsHnd = retClsHnd;
#if FEATURE_MULTIREG_RET
- call->InitializeStructReturnType(this, retClsHnd);
+ call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv());
#endif // FEATURE_MULTIREG_RET
#ifdef UNIX_AMD64_ABI
@@ -9370,7 +9417,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
// No need to assign a multi-reg struct to a local var if:
// - It is a tail call or
// - The call is marked for in-lining later
- return impAssignMultiRegTypeToVar(call, retClsHnd);
+ return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv()));
}
}
@@ -9381,7 +9428,9 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
// and we change the return type on those calls here.
//
structPassingKind howToReturnStruct;
- var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
+ var_types returnType;
+
+ returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
if (howToReturnStruct == SPK_ByReference)
{
@@ -9447,7 +9496,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
// No need to assign a multi-reg struct to a local var if:
// - It is a tail call or
// - The call is marked for in-lining later
- return impAssignMultiRegTypeToVar(call, retClsHnd);
+ return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv()));
}
}
#endif // FEATURE_MULTIREG_RET
@@ -9461,10 +9510,11 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
/*****************************************************************************
For struct return values, re-type the operand in the case where the ABI
does not use a struct return buffer
- Note that this method is only call for !TARGET_X86
*/
-GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
+GenTree* Compiler::impFixupStructReturnType(GenTree* op,
+ CORINFO_CLASS_HANDLE retClsHnd,
+ CorInfoCallConvExtension unmgdCallConv)
{
assert(varTypeIsStruct(info.compRetType));
assert(info.compRetBuffArg == BAD_VAR_NUM);
@@ -9474,12 +9524,12 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
#if defined(TARGET_XARCH)
-#ifdef UNIX_AMD64_ABI
+#if FEATURE_MULTIREG_RET
// No VarArgs for CoreCLR on x64 Unix
- assert(!info.compIsVarArgs);
+ UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs));
// Is method returning a multi-reg struct?
- if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
+ if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv))
{
// In case of multi-reg struct return, we force IR to be one of the following:
// GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
@@ -9487,7 +9537,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
if (op->gtOper == GT_LCL_VAR)
{
- // Make sure that this struct stays in memory and doesn't get promoted.
+ // Note that this is a multi-reg return.
unsigned lclNum = op->AsLclVarCommon()->GetLclNum();
lvaTable[lclNum].lvIsMultiRegRet = true;
@@ -9502,11 +9552,11 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
return op;
}
- return impAssignMultiRegTypeToVar(op, retClsHnd);
+ return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
-#else // !UNIX_AMD64_ABI
+#else
assert(info.compRetNativeType != TYP_STRUCT);
-#endif // !UNIX_AMD64_ABI
+#endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86)
#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM)
@@ -9539,13 +9589,13 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
return op;
}
}
- return impAssignMultiRegTypeToVar(op, retClsHnd);
+ return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64)
// Is method returning a multi-reg struct?
- if (IsMultiRegReturnedType(retClsHnd))
+ if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv))
{
if (op->gtOper == GT_LCL_VAR)
{
@@ -9578,7 +9628,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
return op;
}
}
- return impAssignMultiRegTypeToVar(op, retClsHnd);
+ return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv));
}
#endif // FEATURE_MULTIREG_RET && FEATURE_HFA
@@ -9664,7 +9714,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
}
else if (op->gtOper == GT_COMMA)
{
- op->AsOp()->gtOp2 = impFixupStructReturnType(op->AsOp()->gtOp2, retClsHnd);
+ op->AsOp()->gtOp2 = impFixupStructReturnType(op->AsOp()->gtOp2, retClsHnd, unmgdCallConv);
}
op->gtType = info.compRetNativeType;
@@ -13498,7 +13548,8 @@ void Compiler::impImportBlockCode(BasicBlock* block)
// Calls with large struct return value have to go through this.
// Helper calls with small struct return value also have to go
// through this since they do not follow Unix calling convention.
- if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
+ if (op1->gtOper != GT_CALL ||
+ !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) ||
op1->AsCall()->gtCallType == CT_HELPER)
#endif // UNIX_AMD64_ABI
{
@@ -15591,7 +15642,8 @@ void Compiler::impImportBlockCode(BasicBlock* block)
{
op1->AsCall()->gtRetClsHnd = classHandle;
#if FEATURE_MULTIREG_RET
- op1->AsCall()->InitializeStructReturnType(this, classHandle);
+ op1->AsCall()->InitializeStructReturnType(this, classHandle,
+ op1->AsCall()->GetUnmanagedCallConv());
#endif
}
@@ -15638,7 +15690,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
if (!compDoOldStructRetyping())
{
#if FEATURE_MULTIREG_RET
- op1->AsCall()->InitializeStructReturnType(this, tokenType);
+ op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv());
#endif
op1->AsCall()->gtRetClsHnd = tokenType;
}
@@ -15892,7 +15944,8 @@ void Compiler::impImportBlockCode(BasicBlock* block)
#if FEATURE_MULTIREG_RET
- if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
+ if (varTypeIsStruct(op1) &&
+ IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed))
{
// Unbox nullable helper returns a TYP_STRUCT.
// For the multi-reg case we need to spill it to a temp so that
@@ -16768,7 +16821,8 @@ GenTree* Compiler::impAssignSmallStructTypeToVar(GenTree* op, CORINFO_CLASS_HAND
// Returns:
// Tree with reference to struct local to use as call return value.
-GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
+GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op,
+ CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv))
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return"));
impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
@@ -16777,7 +16831,7 @@ GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
ret->gtFlags |= GTF_DONT_CSE;
- assert(IsMultiRegReturnedType(hClass));
+ assert(IsMultiRegReturnedType(hClass, callConv));
// Mark the var so that fields are not promoted and stay together.
lvaTable[tmpNum].lvIsMultiRegRet = true;
@@ -16931,7 +16985,8 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
// adjust the type away from struct to integral
// and no normalizing
- op2 = impFixupStructReturnType(op2, retClsHnd);
+ op2 = impFixupStructReturnType(op2, retClsHnd,
+ compMethodInfoGetEntrypointCallConv(info.compMethodInfo));
}
else
{
@@ -17131,7 +17186,8 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
// Same as !IsHfa but just don't bother with impAssignStructPtr.
#else // defined(UNIX_AMD64_ABI)
ReturnTypeDesc retTypeDesc;
- retTypeDesc.InitializeStructReturnType(this, retClsHnd);
+ retTypeDesc.InitializeStructReturnType(this, retClsHnd,
+ compMethodInfoGetEntrypointCallConv(info.compMethodInfo));
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
@@ -17165,7 +17221,8 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
else
#elif defined(TARGET_ARM64)
ReturnTypeDesc retTypeDesc;
- retTypeDesc.InitializeStructReturnType(this, retClsHnd);
+ retTypeDesc.InitializeStructReturnType(this, retClsHnd,
+ compMethodInfoGetEntrypointCallConv(info.compMethodInfo));
unsigned retRegCount = retTypeDesc.GetReturnRegCount();
if (retRegCount != 0)
@@ -17187,6 +17244,31 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
}
}
else
+#elif defined(TARGET_X86)
+ ReturnTypeDesc retTypeDesc;
+ retTypeDesc.InitializeStructReturnType(this, retClsHnd,
+ compMethodInfoGetEntrypointCallConv(info.compMethodInfo));
+ unsigned retRegCount = retTypeDesc.GetReturnRegCount();
+
+ if (retRegCount != 0)
+ {
+ assert(!iciCall->HasRetBufArg());
+ assert(retRegCount == MAX_RET_REG_COUNT);
+ if (fgNeedReturnSpillTemp())
+ {
+ if (!impInlineInfo->retExpr)
+ {
+ // The inlinee compiler has figured out the type of the temp already. Use it here.
+ impInlineInfo->retExpr =
+ gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
+ }
+ }
+ else
+ {
+ impInlineInfo->retExpr = op2;
+ }
+ }
+ else
#endif // defined(TARGET_ARM64)
{
assert(iciCall->HasRetBufArg());
@@ -17243,7 +17325,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
// return the implicit return buffer explicitly (in RAX).
// Change the return type to be BYREF.
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
-#else // !defined(TARGET_AMD64)
+#else // !defined(TARGET_AMD64)
// In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
// In such case the return value of the function is changed to BYREF.
// If profiler hook is not needed the return type of the function is TYP_VOID.
@@ -17251,6 +17333,14 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
{
op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
}
+#if defined(TARGET_WINDOWS) && defined(TARGET_ARM64)
+ // On ARM64, the native instance calling convention variant
+ // requires the implicit ByRef to be explicitly returned.
+ else if (callConvIsInstanceMethodCallConv(compMethodInfoGetEntrypointCallConv(info.compMethodInfo)))
+ {
+ op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
+ }
+#endif
else
{
// return void
@@ -17265,7 +17355,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
// Also on System V AMD64 the multireg structs returns are also left as structs.
noway_assert(info.compRetNativeType != TYP_STRUCT);
#endif
- op2 = impFixupStructReturnType(op2, retClsHnd);
+ op2 = impFixupStructReturnType(op2, retClsHnd, compMethodInfoGetEntrypointCallConv(info.compMethodInfo));
// return op2
var_types returnType;
if (compDoOldStructRetyping())
diff --git a/src/coreclr/src/jit/lclvars.cpp b/src/coreclr/src/jit/lclvars.cpp
index 8ea469a6e3684..37f7c29bb6d3a 100644
--- a/src/coreclr/src/jit/lclvars.cpp
+++ b/src/coreclr/src/jit/lclvars.cpp
@@ -145,7 +145,9 @@ void Compiler::lvaInitTypeRef()
CORINFO_CLASS_HANDLE retClsHnd = info.compMethodInfo->args.retTypeClass;
Compiler::structPassingKind howToReturnStruct;
- var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
+ var_types returnType =
+ getReturnTypeForStruct(retClsHnd, compMethodInfoGetEntrypointCallConv(info.compMethodInfo),
+ &howToReturnStruct);
// We can safely widen the return type for enclosed structs.
if ((howToReturnStruct == SPK_PrimitiveType) || (howToReturnStruct == SPK_EnclosingType))
@@ -348,8 +350,27 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo)
/* Is there a "this" pointer ? */
lvaInitThisPtr(varDscInfo);
- /* If we have a hidden return-buffer parameter, that comes here */
- lvaInitRetBuffArg(varDscInfo);
+ unsigned numUserArgsToSkip = 0;
+ unsigned numUserArgs = info.compMethodInfo->args.numArgs;
+#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM)
+ if (callConvIsInstanceMethodCallConv(compMethodInfoGetEntrypointCallConv(info.compMethodInfo)))
+ {
+ // If we are a native instance method, handle the first user arg
+ // (the unmanaged this parameter) and then handle the hidden
+ // return buffer parameter.
+ assert(numUserArgs >= 1);
+ lvaInitUserArgs(varDscInfo, 0, 1);
+ numUserArgsToSkip++;
+ numUserArgs--;
+
+ lvaInitRetBuffArg(varDscInfo, false);
+ }
+ else
+#endif
+ {
+ /* If we have a hidden return-buffer parameter, that comes here */
+ lvaInitRetBuffArg(varDscInfo, true);
+ }
//======================================================================
@@ -365,7 +386,7 @@ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo)
//-------------------------------------------------------------------------
// Now walk the function signature for the explicit user arguments
//-------------------------------------------------------------------------
- lvaInitUserArgs(varDscInfo);
+ lvaInitUserArgs(varDscInfo, numUserArgsToSkip, numUserArgs);
#if !USER_ARGS_COME_LAST
//@GENERICS: final instantiation-info argument for shared generic methods
@@ -481,7 +502,7 @@ void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo)
}
/*****************************************************************************/
-void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo)
+void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg)
{
LclVarDsc* varDsc = varDscInfo->varDsc;
bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo);
@@ -496,7 +517,7 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo)
varDsc->lvIsParam = 1;
varDsc->lvIsRegArg = 1;
- if (hasFixedRetBuffReg())
+ if (useFixedRetBufReg && hasFixedRetBuffReg())
{
varDsc->SetArgReg(theFixedRetBuffReg());
}
@@ -555,8 +576,16 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo)
}
}
-/*****************************************************************************/
-void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
+//-----------------------------------------------------------------------------
+// lvaInitUserArgs:
+// Initialize local var descriptions for incoming user arguments
+//
+// Arguments:
+// varDscInfo - the local var descriptions
+// skipArgs - the number of user args to skip processing.
+// takeArgs - the number of user args to process (after skipping skipArgs number of args)
+//
+void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs)
{
//-------------------------------------------------------------------------
// Walk the function signature for the explicit arguments
@@ -574,11 +603,26 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
const unsigned argSigLen = info.compMethodInfo->args.numArgs;
+ // We will process at most takeArgs arguments from the signature after skipping skipArgs arguments
+ const int64_t numUserArgs = min(takeArgs, (argSigLen - (int64_t)skipArgs));
+
+ // If there are no user args or less than skipArgs args, return here since there's no work to do.
+ if (numUserArgs <= 0)
+ {
+ return;
+ }
+
#ifdef TARGET_ARM
regMaskTP doubleAlignMask = RBM_NONE;
#endif // TARGET_ARM
- for (unsigned i = 0; i < argSigLen;
+ // Skip skipArgs arguments from the signature.
+ for (unsigned i = 0; i < skipArgs; i++, argLst = info.compCompHnd->getArgNext(argLst))
+ {
+ ;
+ }
+
+ for (unsigned i = 0; i < numUserArgs;
i++, varDscInfo->varNum++, varDscInfo->varDsc++, argLst = info.compCompHnd->getArgNext(argLst))
{
LclVarDsc* varDsc = varDscInfo->varDsc;
@@ -805,6 +849,12 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo)
canPassArgInRegisters = structDesc.passedInRegisters;
}
else
+#elif defined(TARGET_X86)
+ if (varTypeIsStruct(argType) && isTrivialPointerSizedStruct(typeHnd))
+ {
+ canPassArgInRegisters = varDscInfo->canEnreg(TYP_I_IMPL, cSlotsToEnregister);
+ }
+ else
#endif // defined(UNIX_AMD64_ABI)
{
canPassArgInRegisters = varDscInfo->canEnreg(argType, cSlotsToEnregister);
@@ -2282,8 +2332,6 @@ void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum)
#endif
-#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM)
-
// Do we have a parameter that can be enregistered?
//
if (varDsc->lvIsRegArg)
@@ -2322,7 +2370,6 @@ void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum)
fieldVarDsc->SetArgReg(parentArgReg);
}
}
-#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM)
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(pFieldInfo->fldType))
@@ -5052,12 +5099,18 @@ void Compiler::lvaFixVirtualFrameOffsets()
// Is this a non-param promoted struct field?
// if so then set doAssignStkOffs to false.
//
- if (varDsc->lvIsStructField && !varDsc->lvIsParam)
+ if (varDsc->lvIsStructField)
{
LclVarDsc* parentvarDsc = &lvaTable[varDsc->lvParentLcl];
lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc);
- if (promotionType == PROMOTION_TYPE_DEPENDENT)
+#if defined(TARGET_X86)
+ // On x86, we set the stack offset for a promoted field
+ // to match a struct parameter in lvAssignFrameOffsetsToPromotedStructs.
+ if ((!varDsc->lvIsParam || parentvarDsc->lvIsParam) && promotionType == PROMOTION_TYPE_DEPENDENT)
+#else
+ if (!varDsc->lvIsParam && promotionType == PROMOTION_TYPE_DEPENDENT)
+#endif
{
doAssignStkOffs = false; // Assigned later in lvaAssignFrameOffsetsToPromotedStructs()
}
@@ -5255,6 +5308,23 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
lclNum++;
}
+ unsigned userArgsToSkip = 0;
+#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM)
+ // In the native instance method calling convention on Windows,
+ // the this parameter comes before the hidden return buffer parameter.
+ // So, we want to process the native "this" parameter before we process
+ // the native return buffer parameter.
+ if (callConvIsInstanceMethodCallConv(compMethodInfoGetEntrypointCallConv(info.compMethodInfo)))
+ {
+ noway_assert(lvaTable[lclNum].lvIsRegArg);
+#ifndef TARGET_X86
+ argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs);
+#endif // TARGET_X86
+ lclNum++;
+ userArgsToSkip++;
+ }
+#endif
+
/* if we have a hidden buffer parameter, that comes here */
if (info.compRetBuffArg != BAD_VAR_NUM)
@@ -5288,6 +5358,13 @@ void Compiler::lvaAssignVirtualFrameOffsetsToArgs()
CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args;
unsigned argSigLen = info.compMethodInfo->args.numArgs;
+ // Skip any user args that we've already processed.
+ assert(userArgsToSkip <= argSigLen);
+ argSigLen -= userArgsToSkip;
+ for (unsigned i = 0; i < userArgsToSkip; i++, argLst = info.compCompHnd->getArgNext(argLst))
+ {
+ ;
+ }
#ifdef TARGET_ARM
//
@@ -6862,17 +6939,16 @@ void Compiler::lvaAssignFrameOffsetsToPromotedStructs()
// outgoing args space. Assign the dependently promoted fields properly.
//
if (varDsc->lvIsStructField
-#ifndef UNIX_AMD64_ABI
-#if !defined(TARGET_ARM)
+#if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARM) && !defined(TARGET_X86)
// ARM: lo/hi parts of a promoted long arg need to be updated.
// For System V platforms there is no outgoing args space.
- // A register passed struct arg is homed on the stack in a separate local var.
+
+ // For System V and x86, a register passed struct arg is homed on the stack in a separate local var.
// The offset of these structs is already calculated in lvaAssignVirtualFrameOffsetToArg methos.
// Make sure the code below is not executed for these structs and the offset is not changed.
&& !varDsc->lvIsParam
-#endif // !defined(TARGET_ARM)
-#endif // !UNIX_AMD64_ABI
+#endif // !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARM) && !defined(TARGET_X86)
)
{
LclVarDsc* parentvarDsc = &lvaTable[varDsc->lvParentLcl];
diff --git a/src/coreclr/src/jit/lower.cpp b/src/coreclr/src/jit/lower.cpp
index 96a14f9428956..176eac0d15a99 100644
--- a/src/coreclr/src/jit/lower.cpp
+++ b/src/coreclr/src/jit/lower.cpp
@@ -3016,7 +3016,9 @@ void Lowering::LowerRet(GenTreeUnOp* ret)
ReturnTypeDesc retTypeDesc;
LclVarDsc* varDsc = nullptr;
varDsc = comp->lvaGetDesc(retVal->AsLclVar()->GetLclNum());
- retTypeDesc.InitializeStructReturnType(comp, varDsc->GetStructHnd());
+ retTypeDesc.InitializeStructReturnType(comp, varDsc->GetStructHnd(),
+ comp->compMethodInfoGetEntrypointCallConv(
+ comp->info.compMethodInfo));
if (retTypeDesc.GetReturnRegCount() > 1)
{
CheckMultiRegLclVar(retVal->AsLclVar(), &retTypeDesc);
@@ -3477,7 +3479,7 @@ void Lowering::LowerCallStruct(GenTreeCall* call)
assert(!comp->compDoOldStructRetyping());
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
- var_types returnType = comp->getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
+ var_types returnType = comp->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
assert(returnType != TYP_STRUCT && returnType != TYP_UNKNOWN);
var_types origType = call->TypeGet();
call->gtType = genActualType(returnType);
diff --git a/src/coreclr/src/jit/lsrabuild.cpp b/src/coreclr/src/jit/lsrabuild.cpp
index 49e486b70f81e..c231a6e72c06e 100644
--- a/src/coreclr/src/jit/lsrabuild.cpp
+++ b/src/coreclr/src/jit/lsrabuild.cpp
@@ -3481,7 +3481,9 @@ int LinearScan::BuildReturn(GenTree* tree)
assert(compiler->lvaEnregMultiRegVars);
LclVarDsc* varDsc = compiler->lvaGetDesc(op1->AsLclVar()->GetLclNum());
ReturnTypeDesc retTypeDesc;
- retTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd());
+ retTypeDesc.InitializeStructReturnType(compiler, varDsc->GetStructHnd(),
+ compiler->compMethodInfoGetEntrypointCallConv(
+ compiler->info.compMethodInfo));
pRetTypeDesc = &retTypeDesc;
assert(compiler->lvaGetDesc(op1->AsLclVar()->GetLclNum())->lvFieldCnt ==
retTypeDesc.GetReturnRegCount());
diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp
index 71f905a79cc44..8780d46d1e7c8 100644
--- a/src/coreclr/src/jit/morph.cpp
+++ b/src/coreclr/src/jit/morph.cpp
@@ -2662,9 +2662,14 @@ void Compiler::fgInitArgInfo(GenTreeCall* call)
// If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling
// convention for x86/SSE.
- // If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it
+ // If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it.
//
- if (hasFixedRetBuffReg() && call->HasRetBufArg())
+ // We don't use the fixed return buffer argument if we have the special unmanaged instance call convention.
+ // That convention doesn't use the fixed return buffer register.
+ //
+ CLANG_FORMAT_COMMENT_ANCHOR;
+
+ if (call->HasFixedRetBufArg())
{
args = call->gtCallArgs;
assert(args != nullptr);
@@ -2830,10 +2835,11 @@ void Compiler::fgInitArgInfo(GenTreeCall* call)
{
maxRegArgs = 0;
}
-
+#ifdef UNIX_X86_ABI
// Add in the ret buff arg
if (callHasRetBuffArg)
maxRegArgs++;
+#endif
}
#endif // TARGET_X86
@@ -3218,6 +3224,8 @@ void Compiler::fgInitArgInfo(GenTreeCall* call)
if (isRegParamType(genActualType(argx->TypeGet()))
#ifdef UNIX_AMD64_ABI
&& (!isStructArg || structDesc.passedInRegisters)
+#elif defined(TARGET_X86)
+ || (isStructArg && isTrivialPointerSizedStruct(objClass))
#endif
)
{
@@ -3746,8 +3754,6 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
}
else // This is passed by value.
{
-
-#ifndef TARGET_X86
// Check to see if we can transform this into load of a primitive type.
// 'size' must be the number of pointer sized items
DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE);
@@ -3939,7 +3945,6 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
assert(varTypeIsEnregisterable(argObj->TypeGet()) ||
((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType)));
}
-#endif // !TARGET_X86
#ifndef UNIX_AMD64_ABI
// We still have a struct unless we converted the GT_OBJ into a GT_IND above...
@@ -5146,7 +5151,7 @@ void Compiler::fgFixupStructReturn(GenTree* callNode)
}
else
{
- returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
+ returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
}
if (howToReturnStruct == SPK_ByReference)
@@ -6810,7 +6815,9 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason)
{
var_types retType = (compDoOldStructRetyping() ? info.compRetNativeType : info.compRetType);
assert(impTailCallRetTypeCompatible(retType, info.compMethodInfo->args.retTypeClass,
- (var_types)callee->gtReturnType, callee->gtRetClsHnd));
+ compMethodInfoGetEntrypointCallConv(info.compMethodInfo),
+ (var_types)callee->gtReturnType, callee->gtRetClsHnd,
+ callee->GetUnmanagedCallConv()));
}
#endif
@@ -7733,7 +7740,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
{
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
- callType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
+ callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference));
if (howToReturnStruct == SPK_ByValue)
{
@@ -8096,7 +8103,7 @@ GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* orig
if (varTypeIsStruct(origCall->gtType))
{
- retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd);
+ retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv());
}
}
else
diff --git a/src/coreclr/src/jit/target.h b/src/coreclr/src/jit/target.h
index 41d1afea12415..3e133f0eba32e 100644
--- a/src/coreclr/src/jit/target.h
+++ b/src/coreclr/src/jit/target.h
@@ -2026,6 +2026,24 @@ typedef target_ssize_t cnsval_ssize_t;
typedef target_size_t cnsval_size_t;
#endif
+// Represents the calling conventions supported with the extensible calling convention syntax
+// as well as the original metadata-encoded calling conventions.
+enum class CorInfoCallConvExtension
+{
+ Managed,
+ C,
+ Stdcall,
+ Thiscall,
+ Fastcall
+ // New calling conventions supported with the extensible calling convention encoding go here.
+};
+
+// Determines whether or not this calling convention is an instance method calling convention.
+inline bool callConvIsInstanceMethodCallConv(CorInfoCallConvExtension callConv)
+{
+ return callConv == CorInfoCallConvExtension::Thiscall;
+}
+
/*****************************************************************************/
#endif // TARGET_H_
/*****************************************************************************/
diff --git a/src/coreclr/src/tools/Common/JitInterface/CorInfoImpl.cs b/src/coreclr/src/tools/Common/JitInterface/CorInfoImpl.cs
index 99ced50ec73d3..b73d9d13f15bf 100644
--- a/src/coreclr/src/tools/Common/JitInterface/CorInfoImpl.cs
+++ b/src/coreclr/src/tools/Common/JitInterface/CorInfoImpl.cs
@@ -683,21 +683,6 @@ private CorInfoType asCorInfoType(TypeDesc type, out TypeDesc typeIfNotPrimitive
{
throw new RequiresRuntimeJitException(type);
}
-#endif
-#if READYTORUN
- if (elementSize.AsInt == 4)
- {
- var normalizedCategory = _compilation.TypeSystemContext.NormalizedCategoryFor4ByteStructOnX86(type);
- if (normalizedCategory != type.Category)
- {
- if (NeedsTypeLayoutCheck(type))
- {
- ISymbolNode node = _compilation.SymbolNodeFactory.CheckTypeLayout(type);
- _methodCodeNode.Fixups.Add(node);
- }
- return (CorInfoType)normalizedCategory;
- }
- }
#endif
}
return CorInfoType.CORINFO_TYPE_VALUECLASS;
diff --git a/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ArgIterator.cs b/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ArgIterator.cs
index f5e324e04b6e8..de5cb85bfad42 100644
--- a/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ArgIterator.cs
+++ b/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/ArgIterator.cs
@@ -120,17 +120,7 @@ public CorElementType GetCorElementType()
return CorElementType.ELEMENT_TYPE_BYREF;
}
- Internal.TypeSystem.TypeFlags category;
- switch (_type.Context.Target.Architecture)
- {
- case TargetArchitecture.X86:
- category = ((CompilerTypeSystemContext)_type.Context).NormalizedCategoryFor4ByteStructOnX86(_type.UnderlyingType);
- break;
-
- default:
- category = _type.UnderlyingType.Category;
- break;
- }
+ Internal.TypeSystem.TypeFlags category = _type.UnderlyingType.Category;
// We use the UnderlyingType to handle Enums properly
return category switch
{
diff --git a/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/TransitionBlock.cs b/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/TransitionBlock.cs
index fac7217077b82..bd89459beeca9 100644
--- a/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/TransitionBlock.cs
+++ b/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/TransitionBlock.cs
@@ -198,6 +198,14 @@ public bool IsArgumentInRegister(ref int pNumRegistersUsed, CorElementType typ,
}
break;
}
+#elif READYTORUN
+ case CorElementType.ELEMENT_TYPE_VALUETYPE:
+ if (IsTrivialPointerSizedStruct(thArgType))
+ {
+ pNumRegistersUsed++;
+ return true;
+ }
+ break;
#endif
}
}
@@ -205,6 +213,46 @@ public bool IsArgumentInRegister(ref int pNumRegistersUsed, CorElementType typ,
return false;
}
+ private bool IsTrivialPointerSizedStruct(TypeHandle thArgType)
+ {
+ Debug.Assert(IsX86);
+ Debug.Assert(thArgType.IsValueType());
+ if (thArgType.GetSize() != 4)
+ {
+ // Type does not have trivial layout or has the wrong size.
+ return false;
+ }
+ TypeDesc typeOfEmbeddedField = null;
+ foreach (var field in thArgType.GetRuntimeTypeHandle().GetFields())
+ {
+ if (field.IsStatic)
+ continue;
+ if (typeOfEmbeddedField != null)
+ {
+ // Type has more than one instance field
+ return false;
+ }
+
+ typeOfEmbeddedField = field.FieldType;
+ }
+
+ if ((typeOfEmbeddedField != null) && ((typeOfEmbeddedField.IsValueType) || (typeOfEmbeddedField.IsPointer)))
+ {
+ switch (typeOfEmbeddedField.UnderlyingType.Category)
+ {
+ case TypeFlags.IntPtr:
+ case TypeFlags.UIntPtr:
+ case TypeFlags.Int32:
+ case TypeFlags.UInt32:
+ case TypeFlags.Pointer:
+ return true;
+ case TypeFlags.ValueType:
+ return IsTrivialPointerSizedStruct(new TypeHandle(typeOfEmbeddedField));
+ }
+ }
+ return false;
+ }
+
///
/// This overload should only be used in AMD64-specific code only.
///
diff --git a/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCompilerContext.cs b/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCompilerContext.cs
index 86b179a0264aa..5b8c09852f7ec 100644
--- a/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCompilerContext.cs
+++ b/src/coreclr/src/tools/aot/ILCompiler.ReadyToRun/Compiler/ReadyToRunCompilerContext.cs
@@ -17,70 +17,6 @@ public CompilerTypeSystemContext(TargetDetails details, SharedGenericsMode gener
{
_genericsMode = genericsMode;
}
-
- private object _normalizedLock = new object();
- private HashSet _nonNormalizedTypes = new HashSet();
- private Dictionary _normalizedTypeCategory = new Dictionary();
-
- public TypeFlags NormalizedCategoryFor4ByteStructOnX86(TypeDesc type)
- {
- // Fast early out for cases which don't need normalization
- var typeCategory = type.Category;
-
- if (((typeCategory != TypeFlags.ValueType) && (typeCategory != TypeFlags.Enum)) || (type.GetElementSize().AsInt != 4))
- {
- return typeCategory;
- }
-
- lock(_normalizedLock)
- {
- if (_nonNormalizedTypes.Contains(type))
- return typeCategory;
-
- if (_normalizedTypeCategory.TryGetValue(type, out TypeFlags category))
- return category;
-
- if (Target.Architecture != TargetArchitecture.X86)
- {
- throw new NotSupportedException();
- }
-
- TypeDesc typeOfEmbeddedField = null;
- foreach (var field in type.GetFields())
- {
- if (field.IsStatic)
- continue;
- if (typeOfEmbeddedField != null)
- {
- // Type has more than one instance field
- _nonNormalizedTypes.Add(type);
- return typeCategory;
- }
-
- typeOfEmbeddedField = field.FieldType;
- }
-
- if ((typeOfEmbeddedField != null) && ((typeOfEmbeddedField.IsValueType) || (typeOfEmbeddedField.IsPointer)))
- {
- TypeFlags singleElementFieldType = NormalizedCategoryFor4ByteStructOnX86(typeOfEmbeddedField);
- if (singleElementFieldType == TypeFlags.Pointer)
- singleElementFieldType = TypeFlags.UIntPtr;
-
- switch (singleElementFieldType)
- {
- case TypeFlags.IntPtr:
- case TypeFlags.UIntPtr:
- case TypeFlags.Int32:
- case TypeFlags.UInt32:
- _normalizedTypeCategory.Add(type, singleElementFieldType);
- return singleElementFieldType;
- }
- }
-
- _nonNormalizedTypes.Add(type);
- return typeCategory;
- }
- }
}
public partial class ReadyToRunCompilerContext : CompilerTypeSystemContext
diff --git a/src/coreclr/src/vm/callingconvention.h b/src/coreclr/src/vm/callingconvention.h
index 51b46e5efaf9e..129970032a0a4 100644
--- a/src/coreclr/src/vm/callingconvention.h
+++ b/src/coreclr/src/vm/callingconvention.h
@@ -387,11 +387,56 @@ class ArgIteratorTemplate : public ARGITERATOR_BASE
//
// typ: the signature type
//=========================================================================
- static BOOL IsArgumentInRegister(int * pNumRegistersUsed, CorElementType typ)
+ static BOOL IsArgumentInRegister(int * pNumRegistersUsed, CorElementType typ, TypeHandle hnd)
{
LIMITED_METHOD_CONTRACT;
- if ( (*pNumRegistersUsed) < NUM_ARGUMENT_REGISTERS) {
- if (gElementTypeInfo[typ].m_enregister) {
+ if ( (*pNumRegistersUsed) < NUM_ARGUMENT_REGISTERS)
+ {
+ if (typ == ELEMENT_TYPE_VALUETYPE)
+ {
+ // The JIT enables passing trivial pointer sized structs in registers.
+ MethodTable* pMT = hnd.GetMethodTable();
+
+ while (typ == ELEMENT_TYPE_VALUETYPE &&
+ pMT->GetNumInstanceFields() == 1 && (!pMT->HasLayout() ||
+ pMT->GetNumInstanceFieldBytes() == 4
+ )) // Don't do the optimization if we're getting specified anything but the trivial layout.
+ {
+ FieldDesc * pFD = pMT->GetApproxFieldDescListRaw();
+ CorElementType type = pFD->GetFieldType();
+
+ bool exitLoop = false;
+ switch (type)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ //@todo: Is it more apropos to call LookupApproxFieldTypeHandle() here?
+ TypeHandle fldHnd = pFD->GetApproxFieldTypeHandleThrowing();
+ CONSISTENCY_CHECK(!fldHnd.IsNull());
+ pMT = fldHnd.GetMethodTable();
+ }
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ {
+ typ = type;
+ break;
+ }
+ default:
+ exitLoop = true;
+ break;
+ }
+
+ if (exitLoop)
+ {
+ break;
+ }
+ }
+ }
+ if (gElementTypeInfo[typ].m_enregister)
+ {
(*pNumRegistersUsed)++;
return(TRUE);
}
@@ -1050,7 +1095,7 @@ int ArgIteratorTemplate::GetNextOffset()
return argOfs;
}
#endif
- if (IsArgumentInRegister(&m_numRegistersUsed, argType))
+ if (IsArgumentInRegister(&m_numRegistersUsed, argType, thValueType))
{
return TransitionBlock::GetOffsetOfArgumentRegisters() + (NUM_ARGUMENT_REGISTERS - m_numRegistersUsed) * sizeof(void *);
}
@@ -1548,7 +1593,7 @@ void ArgIteratorTemplate::ComputeReturnFlags()
flags |= RETURN_HAS_RET_BUFFER;
break;
}
-#endif
+#endif // defined(TARGET_X86) || defined(TARGET_AMD64)
if (size <= ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE)
break;
@@ -1627,7 +1672,7 @@ void ArgIteratorTemplate::ForceSigWalk()
TypeHandle thValueType;
CorElementType type = this->GetNextArgumentType(i, &thValueType);
- if (!IsArgumentInRegister(&numRegistersUsed, type))
+ if (!IsArgumentInRegister(&numRegistersUsed, type, thValueType))
{
int structSize = MetaSig::GetElemSize(type, thValueType);
@@ -1832,6 +1877,17 @@ class ArgIterator : public ArgIteratorTemplate
return FALSE;
#endif
}
+
+ BOOL HasValueTypeReturn()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ TypeHandle thValueType;
+ CorElementType type = m_pSig->GetReturnTypeNormalized(&thValueType);
+ // Enums are normalized to their underlying type when passing to and from functions.
+ // This occurs in both managed and native calling conventions.
+ return type == ELEMENT_TYPE_VALUETYPE && !thValueType.IsEnum();
+ }
};
// Conventience helper
diff --git a/src/coreclr/src/vm/class.cpp b/src/coreclr/src/vm/class.cpp
index a853e7a2d8957..bb7725d63545d 100644
--- a/src/coreclr/src/vm/class.cpp
+++ b/src/coreclr/src/vm/class.cpp
@@ -1335,73 +1335,6 @@ void ClassLoader::PropagateCovariantReturnMethodImplSlots(MethodTable* pMT)
}
-//*******************************************************************************
-// This is the routine that computes the internal type of a given type. It normalizes
-// structs that have only one field (of int/ptr sized values), to be that underlying type.
-//
-// * see code:MethodTable#KindsOfElementTypes for more
-// * It get used by code:TypeHandle::GetInternalCorElementType
-CorElementType EEClass::ComputeInternalCorElementTypeForValueType(MethodTable * pMT)
-{
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- } CONTRACTL_END;
-
- if (pMT->GetNumInstanceFields() == 1 && (!pMT->HasLayout()
- || pMT->GetNumInstanceFieldBytes() == 4
-#ifdef TARGET_64BIT
- || pMT->GetNumInstanceFieldBytes() == 8
-#endif // TARGET_64BIT
- )) // Don't do the optimization if we're getting specified anything but the trivial layout.
- {
- FieldDesc * pFD = pMT->GetApproxFieldDescListRaw();
- CorElementType type = pFD->GetFieldType();
-
- if (type == ELEMENT_TYPE_VALUETYPE)
- {
- //@todo: Is it more apropos to call LookupApproxFieldTypeHandle() here?
- TypeHandle fldHnd = pFD->GetApproxFieldTypeHandleThrowing();
- CONSISTENCY_CHECK(!fldHnd.IsNull());
-
- type = fldHnd.GetInternalCorElementType();
- }
-
- switch (type)
- {
- // "DDB 20951: vc8 unmanaged pointer bug."
- // If ELEMENT_TYPE_PTR were returned, Compiler::verMakeTypeInfo would have problem
- // creating a TI_STRUCT out of CORINFO_TYPE_PTR.
- // As a result, the importer would not be able to realize that the thing on the stack
- // is an instance of a valuetype (that contains one single "void*" field), rather than
- // a pointer to a valuetype.
- // Returning ELEMENT_TYPE_U allows verMakeTypeInfo to go down the normal code path
- // for creating a TI_STRUCT.
- case ELEMENT_TYPE_PTR:
- type = ELEMENT_TYPE_U;
- FALLTHROUGH;
-
- case ELEMENT_TYPE_I:
- case ELEMENT_TYPE_U:
- case ELEMENT_TYPE_I4:
- case ELEMENT_TYPE_U4:
-#ifdef TARGET_64BIT
- case ELEMENT_TYPE_I8:
- case ELEMENT_TYPE_U8:
-#endif // TARGET_64BIT
-
- {
- return type;
- }
-
- default:
- break;
- }
- }
-
- return ELEMENT_TYPE_VALUETYPE;
-}
-
//*******************************************************************************
//
// Debugger notification
diff --git a/src/coreclr/src/vm/class.h b/src/coreclr/src/vm/class.h
index 179a082199a31..300ed225ebc28 100644
--- a/src/coreclr/src/vm/class.h
+++ b/src/coreclr/src/vm/class.h
@@ -794,8 +794,6 @@ class EEClass // DO NOT CREATE A NEW EEClass USING NEW!
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, MethodTable *pMT);
#endif
- static CorElementType ComputeInternalCorElementTypeForValueType(MethodTable * pMT);
-
/************************************
* INSTANCE MEMBER VARIABLES
************************************/
diff --git a/src/coreclr/src/vm/classnames.h b/src/coreclr/src/vm/classnames.h
index 59e516d3e3a42..94e9b1025c7a4 100644
--- a/src/coreclr/src/vm/classnames.h
+++ b/src/coreclr/src/vm/classnames.h
@@ -81,6 +81,7 @@
#define g_ReflectionReflectItfName "System.Reflection.IReflect"
#define g_RuntimeArgumentHandleName "RuntimeArgumentHandle"
#define g_RuntimeFieldHandleClassName "System.RuntimeFieldHandle"
+#define g_RuntimeFieldHandleInternalName "RuntimeFieldHandleInternal"
#define g_RuntimeMethodHandleClassName "System.RuntimeMethodHandle"
#define g_RuntimeMethodHandleInternalName "RuntimeMethodHandleInternal"
#define g_RuntimeTypeHandleClassName "System.RuntimeTypeHandle"
diff --git a/src/coreclr/src/vm/clrtocomcall.cpp b/src/coreclr/src/vm/clrtocomcall.cpp
index dc574467724ee..914d28f7c5983 100644
--- a/src/coreclr/src/vm/clrtocomcall.cpp
+++ b/src/coreclr/src/vm/clrtocomcall.cpp
@@ -307,7 +307,7 @@ I4ARRAYREF SetUpWrapperInfo(MethodDesc *pMD)
MarshalInfo Info(msig.GetModule(), msig.GetArgProps(), msig.GetSigTypeContext(), params[iParam],
MarshalInfo::MARSHAL_SCENARIO_COMINTEROP, (CorNativeLinkType)0, (CorNativeLinkFlags)0,
- TRUE, iParam, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, TRUE, pMD, TRUE
+ TRUE, iParam, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, TRUE
#ifdef _DEBUG
, pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName, iParam
#endif
diff --git a/src/coreclr/src/vm/comtoclrcall.cpp b/src/coreclr/src/vm/comtoclrcall.cpp
index 9e6774f4a9e9f..6a68c2c2009d1 100644
--- a/src/coreclr/src/vm/comtoclrcall.cpp
+++ b/src/coreclr/src/vm/comtoclrcall.cpp
@@ -862,7 +862,7 @@ void ComCallMethodDesc::InitRuntimeNativeInfo(MethodDesc *pStubMD)
UINT cbSize = MetaSig::GetElemSize(type, thValueType);
- if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
+ if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type, thValueType))
{
wSourceSlotEDX = wInputStack / STACK_ELEM_SIZE;
wInputStack += STACK_ELEM_SIZE;
@@ -1022,7 +1022,7 @@ void ComCallMethodDesc::InitNativeInfo()
MarshalInfo info(fsig.GetModule(), fsig.GetArgProps(), fsig.GetSigTypeContext(), pFD->GetMemberDef(), MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
(CorNativeLinkType)0, (CorNativeLinkFlags)0,
- FALSE, 0, fsig.NumFixedArgs(), BestFit, ThrowOnUnmappableChar, FALSE, TRUE, NULL, FALSE
+ FALSE, 0, fsig.NumFixedArgs(), BestFit, ThrowOnUnmappableChar, FALSE, NULL, FALSE
#ifdef _DEBUG
, szDebugName, szDebugClassName, 0
#endif
@@ -1120,7 +1120,7 @@ void ComCallMethodDesc::InitNativeInfo()
MarshalInfo info(msig.GetModule(), msig.GetArgProps(), msig.GetSigTypeContext(), params[iArg],
MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
(CorNativeLinkType)0, (CorNativeLinkFlags)0,
- TRUE, iArg, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, TRUE, pMD, FALSE
+ TRUE, iArg, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, FALSE
#ifdef _DEBUG
, szDebugName, szDebugClassName, iArg
#endif
@@ -1176,7 +1176,7 @@ void ComCallMethodDesc::InitNativeInfo()
MarshalInfo info(msig.GetModule(), msig.GetReturnProps(), msig.GetSigTypeContext(), params[0],
MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
(CorNativeLinkType)0, (CorNativeLinkFlags)0,
- FALSE, 0, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, TRUE, pMD, FALSE
+ FALSE, 0, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, FALSE
#ifdef _DEBUG
,szDebugName, szDebugClassName, 0
#endif
diff --git a/src/coreclr/src/vm/dispatchinfo.cpp b/src/coreclr/src/vm/dispatchinfo.cpp
index efa942d24abdb..e3c88987614c6 100644
--- a/src/coreclr/src/vm/dispatchinfo.cpp
+++ b/src/coreclr/src/vm/dispatchinfo.cpp
@@ -902,7 +902,7 @@ void DispatchMemberInfo::SetUpMethodMarshalerInfo(MethodDesc *pMD, BOOL bReturnV
MarshalInfo Info(msig.GetModule(), msig.GetArgProps(), msig.GetSigTypeContext(), paramDef, MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
(CorNativeLinkType)0, (CorNativeLinkFlags)0,
- TRUE, iParam, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, TRUE, pMD, TRUE
+ TRUE, iParam, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, TRUE
#ifdef _DEBUG
, pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName, iParam
#endif
@@ -939,7 +939,7 @@ void DispatchMemberInfo::SetUpMethodMarshalerInfo(MethodDesc *pMD, BOOL bReturnV
{
MarshalInfo Info(msig.GetModule(), msig.GetReturnProps(), msig.GetSigTypeContext(), returnParamDef, MarshalInfo::MARSHAL_SCENARIO_COMINTEROP,
(CorNativeLinkType)0, (CorNativeLinkFlags)0,
- FALSE, 0, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, TRUE, pMD, TRUE
+ FALSE, 0, numArgs, BestFit, ThrowOnUnmappableChar, FALSE, pMD, TRUE
#ifdef _DEBUG
, pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName, 0
#endif
diff --git a/src/coreclr/src/vm/dllimport.cpp b/src/coreclr/src/vm/dllimport.cpp
index 259af70a1e62d..f21eacf3e4535 100644
--- a/src/coreclr/src/vm/dllimport.cpp
+++ b/src/coreclr/src/vm/dllimport.cpp
@@ -1356,10 +1356,17 @@ class PInvoke_ILStubState : public ILStubState
{
STANDARD_VM_CONTRACT;
+#if defined(TARGET_X86) && !defined(FEATURE_STUBS_AS_IL)
+ // x86 with non-IL stubs manually handles calling conventions
+ // for reverse P/Invokes with the x86 stub linker.
+ // Don't use the JIT calling convention support on reverse P/Invokes.
if (SF_IsForwardStub(dwStubFlags))
{
m_slIL.SetCallingConvention(unmgdCallConv, SF_IsVarArgStub(dwStubFlags));
}
+#else
+ m_slIL.SetCallingConvention(unmgdCallConv, SF_IsVarArgStub(dwStubFlags));
+#endif
}
private:
@@ -3307,7 +3314,6 @@ static MarshalInfo::MarshalType DoMarshalReturnValue(MetaSig& msig,
CorNativeLinkFlags nlFlags,
UINT argidx, // this is used for reverse pinvoke hresult swapping
StubState* pss,
- BOOL isInstanceMethod,
int argOffset,
DWORD dwStubFlags,
MethodDesc *pMD,
@@ -3357,7 +3363,6 @@ static MarshalInfo::MarshalType DoMarshalReturnValue(MetaSig& msig,
SF_IsBestFit(dwStubFlags),
SF_IsThrowOnUnmappableChar(dwStubFlags),
TRUE,
- isInstanceMethod,
pMD,
TRUE
DEBUG_ARG(pDebugName)
@@ -3543,52 +3548,6 @@ static void CreateNDirectStubWorker(StubState* pss,
UINT nativeStackSize = (SF_IsCOMStub(dwStubFlags) ? TARGET_POINTER_SIZE : 0);
bool fStubNeedsCOM = SF_IsCOMStub(dwStubFlags);
- // Normally we would like this to be false so that we use the correct signature
- // in the IL_STUB, (i.e if it returns a value class then the signature will use that)
- // When this bool is true we change the return type to void and explicitly add a
- // return buffer argument as the first argument so as to match the native calling convention correctly.
- BOOL fMarshalReturnValueFirst = FALSE;
-
- BOOL fReverseWithReturnBufferArg = FALSE;
- // Only consider ThisCall methods to be instance methods.
- // Techinically COM methods are also instance methods, but we don't want to change the behavior of the built-in
- // COM abi work because there are many users that rely on the current behavior (for example WPF).
- bool isInstanceMethod = fThisCall;
-
- // We can only change fMarshalReturnValueFirst to true when we are NOT doing HRESULT-swapping!
- // When we are HRESULT-swapping, the managed return type is actually the type of the last parameter and not the return type.
- // The native return type of an HRESULT-swapped function is an HRESULT, which never uses a return-buffer argument.
- // Since the managed return type is actually the last parameter, we need to marshal it after the last parameter in the managed signature
- // to make sure we match the native signature correctly (when marshalling parameters, we add them to the native stub signature).
- if (!SF_IsHRESULTSwapping(dwStubFlags))
- {
- // We cannot just use pSig.GetReturnType() here since it will return ELEMENT_TYPE_VALUETYPE for enums.
- bool isReturnTypeValueType = msig.GetRetTypeHandleThrowing().GetVerifierCorElementType() == ELEMENT_TYPE_VALUETYPE;
-#if defined(TARGET_X86) || defined(TARGET_ARM)
- // JIT32 has problems in generating code for pinvoke ILStubs which do a return in return buffer.
- // Therefore instead we change the signature of calli to return void and make the return buffer as first
- // argument. This matches the ABI i.e. return buffer is passed as first arg. So native target will get the
- // return buffer in correct register.
- // The return structure secret arg comes first, however byvalue return is processed at
- // the end because it could be the HRESULT-swapped argument which always comes last.
-
-#ifdef UNIX_X86_ABI
- // For functions with value type class, managed and unmanaged calling convention differ
- fMarshalReturnValueFirst = HasRetBuffArgUnmanagedFixup(&msig);
-#elif defined(TARGET_ARM)
- fMarshalReturnValueFirst = (isInstanceMethod && isReturnTypeValueType) && HasRetBuffArg(&msig);
-#else
- // On Windows-X86, the native signature might need a return buffer when the managed doesn't (specifically when the native signature is a member function).
- fMarshalReturnValueFirst = (!SF_IsReverseStub(dwStubFlags) && HasRetBuffArg(&msig)) || (isInstanceMethod && isReturnTypeValueType);
-#endif // UNIX_X86_ABI
-#elif defined(TARGET_AMD64) || defined (TARGET_ARM64)
- fMarshalReturnValueFirst = isInstanceMethod && isReturnTypeValueType;
-#endif // defined(TARGET_X86) || defined(TARGET_ARM)
-#ifdef _WIN32
- fReverseWithReturnBufferArg = fMarshalReturnValueFirst && SF_IsReverseStub(dwStubFlags);
-#endif
- }
-
//
// Marshal the arguments
//
@@ -3631,7 +3590,6 @@ static void CreateNDirectStubWorker(StubState* pss,
SF_IsBestFit(dwStubFlags),
SF_IsThrowOnUnmappableChar(dwStubFlags),
TRUE,
- isInstanceMethod ? TRUE : FALSE,
pMD,
TRUE
DEBUG_ARG(pSigDesc->m_pDebugName)
@@ -3643,76 +3601,6 @@ static void CreateNDirectStubWorker(StubState* pss,
int argidx = 1;
int nativeArgIndex = 0;
- // If we are generating a return buffer on a member function that is marked as thiscall (as opposed to being a COM method)
- // then we need to marshal the this parameter first and the return buffer second.
- // We don't need to do this for COM methods because the "this" is implied as argument 0 by the signature of the stub.
- if (fThisCall && fMarshalReturnValueFirst)
- {
- msig.NextArg();
-
- MarshalInfo &info = pParamMarshalInfo[argidx - 1];
- pss->MarshalArgument(&info, argOffset, GetStackOffsetFromStackSize(nativeStackSize, fThisCall));
- nativeStackSize += info.GetNativeArgSize();
-
- fStubNeedsCOM |= info.MarshalerRequiresCOM();
-
- // make sure that the first parameter is enregisterable
- if (info.GetNativeArgSize() > TARGET_POINTER_SIZE)
- COMPlusThrow(kMarshalDirectiveException, IDS_EE_NDIRECT_BADNATL_THISCALL);
-
- argidx++;
- }
-
- // If we're doing a native->managed call and are generating a return buffer,
- // we need to move all of the actual arguments over one and have the return value be the first argument (after the this pointer if applicable).
- if (fReverseWithReturnBufferArg)
- {
- ++argOffset;
- }
-
- if (fMarshalReturnValueFirst)
- {
- marshalType = DoMarshalReturnValue(msig,
- pParamTokenArray,
- nlType,
- nlFlags,
- 1, // Indicating as the first argument
- pss,
- isInstanceMethod,
- argOffset,
- dwStubFlags,
- pMD,
- nativeStackSize,
- fStubNeedsCOM,
- 0
- DEBUG_ARG(pSigDesc->m_pDebugName)
- DEBUG_ARG(pSigDesc->m_pDebugClassName)
- );
-
- if (marshalType == MarshalInfo::MARSHAL_TYPE_DATE ||
- marshalType == MarshalInfo::MARSHAL_TYPE_CURRENCY ||
- marshalType == MarshalInfo::MARSHAL_TYPE_ARRAYWITHOFFSET ||
- marshalType == MarshalInfo::MARSHAL_TYPE_HANDLEREF ||
- marshalType == MarshalInfo::MARSHAL_TYPE_ARGITERATOR
-#ifdef FEATURE_COMINTEROP
- || marshalType == MarshalInfo::MARSHAL_TYPE_OLECOLOR
-#endif // FEATURE_COMINTEROP
- )
- {
- // These are special non-blittable types returned by-ref in managed,
- // but marshaled as primitive values returned by-value in unmanaged.
- }
- else
- {
- // This is an ordinary value type - see if it is returned by-ref.
- MethodTable *pRetMT = msig.GetRetTypeHandleThrowing().AsMethodTable();
- if (IsUnmanagedValueTypeReturnedByRef(pRetMT->GetNativeSize()))
- {
- nativeStackSize += TARGET_POINTER_SIZE;
- }
- }
- }
-
while (argidx <= numArgs)
{
//
@@ -3758,40 +3646,64 @@ static void CreateNDirectStubWorker(StubState* pss,
argOffset++;
}
- if (!fMarshalReturnValueFirst)
- {
- // This could be a HRESULT-swapped argument so it must come last.
- marshalType = DoMarshalReturnValue(msig,
- pParamTokenArray,
- nlType,
- nlFlags,
- argidx,
- pss,
- isInstanceMethod,
- argOffset,
- dwStubFlags,
- pMD,
- nativeStackSize,
- fStubNeedsCOM,
- nativeArgIndex
- DEBUG_ARG(pSigDesc->m_pDebugName)
- DEBUG_ARG(pSigDesc->m_pDebugClassName)
- );
-
- // If the return value is a SafeHandle or CriticalHandle, mark the stub method.
- // Interop methods that use this stub will have an implicit reliability contract
- // (see code:TAStackCrawlCallBack).
- if (!SF_IsHRESULTSwapping(dwStubFlags))
- {
- if (marshalType == MarshalInfo::MARSHAL_TYPE_SAFEHANDLE ||
- marshalType == MarshalInfo::MARSHAL_TYPE_CRITICALHANDLE)
- {
- if (pMD->IsDynamicMethod())
- pMD->AsDynamicMethodDesc()->SetUnbreakable(true);
- }
+ marshalType = DoMarshalReturnValue(msig,
+ pParamTokenArray,
+ nlType,
+ nlFlags,
+ argidx,
+ pss,
+ argOffset,
+ dwStubFlags,
+ pMD,
+ nativeStackSize,
+ fStubNeedsCOM,
+ nativeArgIndex
+ DEBUG_ARG(pSigDesc->m_pDebugName)
+ DEBUG_ARG(pSigDesc->m_pDebugClassName)
+ );
+
+ // If the return value is a SafeHandle or CriticalHandle, mark the stub method.
+ // Interop methods that use this stub will have an implicit reliability contract
+ // (see code:TAStackCrawlCallBack).
+ if (!SF_IsHRESULTSwapping(dwStubFlags))
+ {
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_SAFEHANDLE ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_CRITICALHANDLE)
+ {
+ if (pMD->IsDynamicMethod())
+ pMD->AsDynamicMethodDesc()->SetUnbreakable(true);
}
}
+ if (marshalType == MarshalInfo::MARSHAL_TYPE_DATE ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_CURRENCY ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_ARRAYWITHOFFSET ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_HANDLEREF ||
+ marshalType == MarshalInfo::MARSHAL_TYPE_ARGITERATOR
+#ifdef FEATURE_COMINTEROP
+ || marshalType == MarshalInfo::MARSHAL_TYPE_OLECOLOR
+#endif // FEATURE_COMINTEROP
+ )
+ {
+ // These are special non-blittable types returned by-ref in managed,
+ // but marshaled as primitive values returned by-value in unmanaged.
+ }
+ else
+ {
+ // This is an ordinary value type - see if it is returned by-ref.
+ TypeHandle retType = msig.GetRetTypeHandleThrowing();
+ if (retType.IsValueType() && !retType.IsEnum() && IsUnmanagedValueTypeReturnedByRef(retType.MakeNativeValueType().GetSize()))
+ {
+ nativeStackSize += sizeof(LPVOID);
+ }
+#if defined(TARGET_WINDOWS) && !defined(TARGET_ARM)
+ else if (fThisCall && !retType.IsEnum())
+ {
+ nativeStackSize += sizeof(LPVOID);
+ }
+#endif
+ }
+
if (SF_IsHRESULTSwapping(dwStubFlags))
{
if (msig.GetReturnType() != ELEMENT_TYPE_VOID)
@@ -3914,7 +3826,6 @@ static void CreateStructStub(ILStubState* pss,
SF_IsBestFit(dwStubFlags),
SF_IsThrowOnUnmappableChar(dwStubFlags),
TRUE,
- FALSE,
pMD,
TRUE
DEBUG_ARG(pSigDesc->m_pDebugName)
diff --git a/src/coreclr/src/vm/dllimportcallback.cpp b/src/coreclr/src/vm/dllimportcallback.cpp
index f25da147690eb..e33e08849f979 100644
--- a/src/coreclr/src/vm/dllimportcallback.cpp
+++ b/src/coreclr/src/vm/dllimportcallback.cpp
@@ -169,7 +169,7 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
{
// exchange ecx ( "this") with the hidden structure return buffer
// xchg ecx, [esp]
- pcpusl->X86EmitOp(0x87, kECX, (X86Reg)4 /*ESP*/);
+ pcpusl->X86EmitOp(0x87, kECX, (X86Reg)kESP_Unsafe);
}
// jam ecx (the "this" param onto stack. Now it looks like a normal stdcall.)
@@ -178,6 +178,25 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
// push edx - repush the return address
pcpusl->X86EmitPushReg(kEDX);
}
+
+ // The native signature doesn't have a return buffer
+ // but the managed signature does.
+ // Set up the return buffer address here.
+ if (pInfo->m_wFlags & umtmlBufRetValToEnreg)
+ {
+ // Calculate the return buffer address
+ // Calculate the offset to the return buffer we establish for EAX:EDX below.
+ // lea edx [esp - offset to EAX:EDX return buffer]
+ pcpusl->X86EmitEspOffset(0x8d, kEDX, -0xc /* skip return addr, EBP, EBX */ -0x8 /* point to start of EAX:EDX return buffer */ );
+
+ // exchange edx (which has the return buffer address)
+ // with the return address
+ // xchg edx, [esp]
+ pcpusl->X86EmitOp(0x87, kEDX, (X86Reg)kESP_Unsafe);
+
+ // push edx
+ pcpusl->X86EmitPushReg(kEDX);
+ }
// Setup the EBP frame
pcpusl->X86EmitPushEBPframe();
@@ -257,6 +276,9 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
// push fs:[0]
const static BYTE codeSEH1[] = { 0x64, 0xFF, 0x35, 0x0, 0x0, 0x0, 0x0};
pcpusl->EmitBytes(codeSEH1, sizeof(codeSEH1));
+ // EmitBytes doesn't know to increase the stack size
+ // so we do so manually
+ pcpusl->SetStackSize(pcpusl->GetStackSize() + 4);
// link in the exception frame
// mov dword ptr fs:[0], esp
@@ -292,9 +314,9 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
//
// | |
// +-------------------------+
- // EBX - 20 | Saved Result: EDX/ST(0) |
+ // EBX - 20 | Saved Result: EAX/ST(0) |
// +- - - - - - - - - - - - -+
- // EBX - 16 | Saved Result: EAX/ST(0) |
+ // EBX - 16 | Saved Result: EDX/ST(0) |
// +-------------------------+
// EBX - 12 | Caller's EBX |
// +-------------------------+
@@ -471,11 +493,26 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
// save EDX:EAX
if (retbufofs == UNUSED_STACK_OFFSET)
{
- pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEAX);
- pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX, EAX */, kEDX);
+ pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX, EDX */, kEAX);
+ pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEDX);
}
- else
+ // In the umtmlBufRetValToEnreg case,
+ // we set up the return buffer to output
+ // into the EDX:EAX buffer we set up for the register return case.
+ // So we don't need to do more work here.
+ else if ((pInfo->m_wFlags & umtmlBufRetValToEnreg) == 0)
{
+ if (pInfo->m_wFlags & umtmlEnregRetValToBuf)
+ {
+ pcpusl->X86EmitPushReg(kEDI); // Save EDI register
+ // Move the return value from the enregistered return from the JIT
+ // to the return buffer that the native calling convention expects.
+ // NOTE: Since the managed calling convention does not enregister 8-byte
+ // struct returns on x86, we only need to handle the single-register 4-byte case.
+ pcpusl->X86EmitIndexRegLoad(kEDI, kEBX, retbufofs);
+ pcpusl->X86EmitIndexRegStore(kEDI, 0x0, kEAX);
+ pcpusl->X86EmitPopReg(kEDI); // Restore EDI register
+ }
// pretend that the method returned the ret buf hidden argument
// (the structure ptr); C++ compiler seems to rely on this
@@ -483,7 +520,7 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
pcpusl->X86EmitIndexRegLoad(kEAX, kEBX, retbufofs);
// save it as the return value
- pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEAX);
+ pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX, EDX */, kEAX);
}
}
@@ -555,8 +592,8 @@ VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
}
else
{
- pcpusl->X86EmitPopReg(kEDX);
pcpusl->X86EmitPopReg(kEAX);
+ pcpusl->X86EmitPopReg(kEDX);
}
// Restore EBX, which was saved in prolog
@@ -769,6 +806,13 @@ Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStat
UINT nOffset = 0;
int numRegistersUsed = 0;
int numStackSlotsIndex = nStackBytes / STACK_ELEM_SIZE;
+
+ // This could have been set in the UnmanagedCallersOnly scenario.
+ if (m_callConv == UINT16_MAX)
+ m_callConv = static_cast(pSigInfo->GetCallConv());
+
+ UMThunkStubInfo stubInfo;
+ memset(&stubInfo, 0, sizeof(stubInfo));
// process this
if (!fIsStatic)
@@ -778,13 +822,27 @@ Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStat
}
// process the return buffer parameter
- if (argit.HasRetBuffArg())
+ if (argit.HasRetBuffArg() || (m_callConv == pmCallConvThiscall && argit.HasValueTypeReturn()))
{
- numRegistersUsed++;
- _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
- psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] = nOffset;
+ // Only copy the retbuf arg from the src call when both the managed call and native call
+ // have a return buffer.
+ if (argit.HasRetBuffArg())
+ {
+ // managed has a return buffer
+ if (m_callConv != pmCallConvThiscall &&
+ argit.HasValueTypeReturn() &&
+ pMetaSig->GetReturnTypeSize() == ENREGISTERED_RETURNTYPE_MAXSIZE)
+ {
+ // Only managed has a return buffer.
+ // Native returns in registers.
+ // We add a flag so the stub correctly sets up the return buffer.
+ stubInfo.m_wFlags |= umtmlBufRetValToEnreg;
+ }
+ numRegistersUsed++;
+ _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
+ psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] = nOffset;
+ }
retbufofs = nOffset;
-
nOffset += StackElemSize(sizeof(LPVOID));
}
@@ -810,7 +868,7 @@ Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStat
fPassPointer = TRUE;
}
- if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
+ if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type, thValueType))
{
_ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] =
@@ -849,13 +907,6 @@ Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStat
m_cbActualArgSize = cbActualArgSize;
- // This could have been set in the UnmanagedCallersOnly scenario.
- if (m_callConv == UINT16_MAX)
- m_callConv = static_cast(pSigInfo->GetCallConv());
-
- UMThunkStubInfo stubInfo;
- memset(&stubInfo, 0, sizeof(stubInfo));
-
if (!FitsInU2(m_cbActualArgSize))
COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
@@ -879,8 +930,17 @@ Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStat
{
stubInfo.m_wFlags |= umtmlThisCallHiddenArg;
}
+ else if (argit.HasValueTypeReturn())
+ {
+ stubInfo.m_wFlags |= umtmlThisCallHiddenArg | umtmlEnregRetValToBuf;
+ // When the native signature has a return buffer but the
+ // managed one does not, we need to handle popping the
+ // the return buffer of the stack manually, which we do here.
+ m_cbRetPop += 4;
+ }
}
}
+
stubInfo.m_cbRetPop = m_cbRetPop;
if (fIsStatic) stubInfo.m_wFlags |= umtmlIsStatic;
@@ -1396,7 +1456,7 @@ VOID UMThunkMarshInfo::RunTimeInit()
TypeHandle thValueType;
CorElementType type = sig.NextArgNormalized(&thValueType);
int cbSize = sig.GetElemSize(type, thValueType);
- if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
+ if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type, thValueType))
{
offs += STACK_ELEM_SIZE;
}
@@ -1481,7 +1541,7 @@ VOID UMThunkMarshInfo::SetupArguments(char *pSrc, ArgumentRegisters *pArgRegs, c
int cbSize = sig.GetElemSize(type, thValueType);
int elemSize = StackElemSize(cbSize);
- if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
+ if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type, thValueType))
{
_ASSERTE(elemSize == STACK_ELEM_SIZE);
diff --git a/src/coreclr/src/vm/dllimportcallback.h b/src/coreclr/src/vm/dllimportcallback.h
index 8a483808d3d28..1bdb5d5cbebd3 100644
--- a/src/coreclr/src/vm/dllimportcallback.h
+++ b/src/coreclr/src/vm/dllimportcallback.h
@@ -18,13 +18,15 @@
enum UMThunkStubFlags
{
- umtmlIsStatic = 0x0001,
- umtmlThisCall = 0x0002,
- umtmlThisCallHiddenArg = 0x0004,
- umtmlFpu = 0x0008,
+ umtmlIsStatic = 0x0001,
+ umtmlThisCall = 0x0002,
+ umtmlThisCallHiddenArg = 0x0004,
+ umtmlFpu = 0x0008,
+ umtmlEnregRetValToBuf = 0x0010,
+ umtmlBufRetValToEnreg = 0x0020,
#ifdef TARGET_X86
// the signature is trivial so stub need not be generated and the target can be called directly
- umtmlSkipStub = 0x0080,
+ umtmlSkipStub = 0x0080,
#endif // TARGET_X86
};
diff --git a/src/coreclr/src/vm/fieldmarshaler.cpp b/src/coreclr/src/vm/fieldmarshaler.cpp
index 6f114f2df7f82..17ded4ec02b19 100644
--- a/src/coreclr/src/vm/fieldmarshaler.cpp
+++ b/src/coreclr/src/vm/fieldmarshaler.cpp
@@ -70,7 +70,6 @@ VOID ParseNativeType(Module* pModule,
FALSE, // We only need validation of the native signature and the MARSHAL_TYPE_*
FALSE, // so we don't need to accurately get the BestFitCustomAttribute data for this construction.
FALSE, /* fEmitsIL */
- FALSE, /* onInstanceMethod */
nullptr,
FALSE /* fUseCustomMarshal */
#ifdef _DEBUG
diff --git a/src/coreclr/src/vm/ilmarshalers.h b/src/coreclr/src/vm/ilmarshalers.h
index c08f4f3efc49b..12a1824ca7ba3 100644
--- a/src/coreclr/src/vm/ilmarshalers.h
+++ b/src/coreclr/src/vm/ilmarshalers.h
@@ -352,12 +352,6 @@ class ILMarshaler
return (0 != (dwMarshalFlags & MARSHAL_FLAG_RETVAL));
}
- static inline bool IsInMemberFunction(DWORD dwMarshalFlags)
- {
- LIMITED_METHOD_CONTRACT;
- return (0 != (dwMarshalFlags & MARSHAL_FLAG_IN_MEMBER_FUNCTION));
- }
-
static inline bool IsFieldMarshal(DWORD dwMarshalFlags)
{
LIMITED_METHOD_CONTRACT;
@@ -598,99 +592,20 @@ class ILMarshaler
LocalDesc nativeType = GetNativeType();
LocalDesc managedType = GetManagedType();
- bool byrefNativeReturn = false;
- CorElementType typ = ELEMENT_TYPE_VOID;
- UINT32 nativeSize = 0;
- bool nativeMethodIsMemberFunction = IsInMemberFunction(dwMarshalFlags);
-
- // we need to convert value type return types to primitives as
- // JIT does not inline P/Invoke calls that return structures
- if (nativeType.IsValueClass())
- {
- if (wNativeSize == VARIABLESIZE)
- {
- // the unmanaged type size is variable
- nativeSize = m_pargs->m_pMT->GetNativeSize();
- }
- else
- {
- // the unmanaged type size is fixed
- nativeSize = wNativeSize;
- }
-
-#if defined(TARGET_WINDOWS)
- // JIT32 and JIT64 (which is only used on the Windows Desktop CLR) has a problem generating
- // code for the pinvoke ILStubs which do a return using a struct type. Therefore, we
- // change the signature of calli to return void and make the return buffer as first argument.
-
- // For Windows, we need to use a return buffer for native member functions returning structures.
- // On Windows arm we need to respect HFAs and not use a return buffer if the return type is an HFA
- // for X86 Windows non-member functions we bash the return type from struct to U1, U2, U4 or U8
- // and use byrefNativeReturn for all other structs.
- if (nativeMethodIsMemberFunction)
- {
-#ifdef TARGET_ARM
- byrefNativeReturn = !nativeType.InternalToken.GetMethodTable()->IsNativeHFA();
-#else
- byrefNativeReturn = true;
-#endif
- }
- else
- {
-#ifdef TARGET_X86
- switch (nativeSize)
- {
- case 1: typ = ELEMENT_TYPE_U1; break;
- case 2: typ = ELEMENT_TYPE_U2; break;
- case 4: typ = ELEMENT_TYPE_U4; break;
- case 8: typ = ELEMENT_TYPE_U8; break;
- default: byrefNativeReturn = true; break;
- }
-#endif // TARGET_X86
- }
-#endif // defined(TARGET_WINDOWS)
-
- // for UNIX_X86_ABI, we always need a return buffer argument for any size of structs.
-#ifdef UNIX_X86_ABI
- byrefNativeReturn = true;
-#endif
- }
-
- if (IsHresultSwap(dwMarshalFlags) || (byrefNativeReturn && (IsCLRToNative(m_dwMarshalFlags) || nativeMethodIsMemberFunction)))
+ if (IsHresultSwap(dwMarshalFlags))
{
LocalDesc extraParamType = nativeType;
extraParamType.MakeByRef();
m_pcsMarshal->SetStubTargetArgType(&extraParamType, false);
- if (IsHresultSwap(dwMarshalFlags))
- {
- // HRESULT swapping: the original return value is transformed into an extra
- // byref parameter and the target is expected to return an HRESULT
- m_pcsMarshal->SetStubTargetReturnType(ELEMENT_TYPE_I4); // native method returns an HRESULT
- }
- else
- {
- // byref structure return: the original return value is transformed into an
- // extra byref parameter and the target is not expected to return anything
- //
- // note: we do this only for forward calls because [unmanaged calling conv.
- // uses byref return] implies [managed calling conv. uses byref return]
- m_pcsMarshal->SetStubTargetReturnType(ELEMENT_TYPE_VOID);
- }
+ // HRESULT swapping: the original return value is transformed into an extra
+ // byref parameter and the target is expected to return an HRESULT
+ m_pcsMarshal->SetStubTargetReturnType(ELEMENT_TYPE_I4); // native method returns an HRESULT
}
else
{
- if (typ != ELEMENT_TYPE_VOID)
- {
- // small structure return: the original return value is transformed into
- // ELEMENT_TYPE_U1, ELEMENT_TYPE_U2, ELEMENT_TYPE_U4, or ELEMENT_TYPE_U8
- m_pcsMarshal->SetStubTargetReturnType(typ);
- }
- else
- {
- m_pcsMarshal->SetStubTargetReturnType(&nativeType);
- }
+ m_pcsMarshal->SetStubTargetReturnType(&nativeType);
}
m_managedHome.InitHome(ILStubMarshalHome::HomeType_ILLocal, m_pcsMarshal->NewLocal(managedType));
@@ -700,31 +615,14 @@ class ILMarshaler
if (IsCLRToNative(dwMarshalFlags))
{
- if (IsHresultSwap(dwMarshalFlags) || byrefNativeReturn)
+ if (IsHresultSwap(dwMarshalFlags))
{
EmitReInitNative(m_pcsMarshal);
EmitLoadNativeHomeAddrForByRefDispatch(pcsDispatch); // load up the byref native type as an extra arg
}
else
{
- if (typ != ELEMENT_TYPE_VOID)
- {
- // small structure forward: the returned integer is memcpy'd into native home
- // of the structure
-
- DWORD dwTempLocalNum = m_pcsUnmarshal->NewLocal(typ);
- m_pcsUnmarshal->EmitSTLOC(dwTempLocalNum);
-
- // cpblk
- m_nativeHome.EmitLoadHomeAddr(m_pcsUnmarshal);
- m_pcsUnmarshal->EmitLDLOCA(dwTempLocalNum);
- m_pcsUnmarshal->EmitLDC(nativeSize);
- m_pcsUnmarshal->EmitCPBLK();
- }
- else
- {
- EmitStoreNativeValue(m_pcsUnmarshal);
- }
+ EmitStoreNativeValue(m_pcsUnmarshal);
}
if (NeedsMarshalCleanupIndex())
@@ -778,31 +676,9 @@ class ILMarshaler
m_nativeHome.EmitCopyToByrefArgWithNullCheck(m_pcsUnmarshal, &nativeType, argidx);
m_pcsUnmarshal->EmitLDC(S_OK);
}
- else if (byrefNativeReturn && nativeMethodIsMemberFunction)
- {
- m_nativeHome.EmitCopyToByrefArg(m_pcsUnmarshal, &nativeType, argidx);
- }
else
{
- if (typ != ELEMENT_TYPE_VOID)
- {
- // small structure return (reverse): native home of the structure is memcpy'd
- // into the integer to be returned from the stub
-
- DWORD dwTempLocalNum = m_pcsUnmarshal->NewLocal(typ);
-
- // cpblk
- m_pcsUnmarshal->EmitLDLOCA(dwTempLocalNum);
- m_nativeHome.EmitLoadHomeAddr(m_pcsUnmarshal);
- m_pcsUnmarshal->EmitLDC(nativeSize);
- m_pcsUnmarshal->EmitCPBLK();
-
- m_pcsUnmarshal->EmitLDLOC(dwTempLocalNum);
- }
- else
- {
- EmitLoadNativeValue(m_pcsUnmarshal);
- }
+ EmitLoadNativeValue(m_pcsUnmarshal);
}
// make sure we free (and zero) the return value if an exception is thrown
diff --git a/src/coreclr/src/vm/methodtablebuilder.cpp b/src/coreclr/src/vm/methodtablebuilder.cpp
index 5e09e619b9e8b..3fa72fe5a71b4 100644
--- a/src/coreclr/src/vm/methodtablebuilder.cpp
+++ b/src/coreclr/src/vm/methodtablebuilder.cpp
@@ -9652,19 +9652,6 @@ void MethodTableBuilder::CheckForSystemTypes()
_ASSERTE(g_pByReferenceClass != NULL);
_ASSERTE(g_pByReferenceClass->IsByRefLike());
-#ifdef TARGET_X86
- if (GetCl() == g_pByReferenceClass->GetCl())
- {
- // x86 by default treats the type of ByReference as the actual type of its IntPtr field, see calls to
- // ComputeInternalCorElementTypeForValueType in this file. This is a special case where the struct needs to be
- // treated as a value type so that its field can be considered as a byref pointer.
- _ASSERTE(pMT->GetFlag(MethodTable::enum_flag_Category_Mask) == MethodTable::enum_flag_Category_PrimitiveValueType);
- pMT->ClearFlag(MethodTable::enum_flag_Category_Mask);
- pMT->SetInternalCorElementType(ELEMENT_TYPE_VALUETYPE);
- return;
- }
-#endif
-
_ASSERTE(g_pNullableClass->IsNullable());
// Pre-compute whether the class is a Nullable so that code:Nullable::IsNullableType is efficient
@@ -9732,18 +9719,6 @@ void MethodTableBuilder::CheckForSystemTypes()
{
pMT->SetIsNullable();
}
-#ifdef TARGET_X86
- else if (strcmp(name, g_ByReferenceName) == 0)
- {
- // x86 by default treats the type of ByReference as the actual type of its IntPtr field, see calls to
- // ComputeInternalCorElementTypeForValueType in this file. This is a special case where the struct needs to be
- // treated as a value type so that its field can be considered as a byref pointer.
- _ASSERTE(pMT->GetFlag(MethodTable::enum_flag_Category_Mask) == MethodTable::enum_flag_Category_PrimitiveValueType);
- pMT->ClearFlag(MethodTable::enum_flag_Category_Mask);
- pMT->SetInternalCorElementType(ELEMENT_TYPE_VALUETYPE);
- }
-#endif
-#ifndef TARGET_X86
else if (strcmp(name, g_RuntimeArgumentHandleName) == 0)
{
pMT->SetInternalCorElementType (ELEMENT_TYPE_I);
@@ -9752,7 +9727,10 @@ void MethodTableBuilder::CheckForSystemTypes()
{
pMT->SetInternalCorElementType (ELEMENT_TYPE_I);
}
-#endif
+ else if (strcmp(name, g_RuntimeFieldHandleInternalName) == 0)
+ {
+ pMT->SetInternalCorElementType (ELEMENT_TYPE_I);
+ }
}
else
{
@@ -10379,15 +10357,7 @@ MethodTableBuilder::SetupMethodTable2(
}
else
{
-#ifdef TARGET_X86
- // JIT64 is not aware of normalized value types and this
- // optimization (return small value types by value in registers)
- // is already done in JIT64.
- OVERRIDE_TYPE_LOAD_LEVEL_LIMIT(CLASS_LOADED);
- normalizedType = EEClass::ComputeInternalCorElementTypeForValueType(pMT);
-#else
normalizedType = ELEMENT_TYPE_VALUETYPE;
-#endif
}
}
pMT->SetInternalCorElementType(normalizedType);
diff --git a/src/coreclr/src/vm/mlinfo.cpp b/src/coreclr/src/vm/mlinfo.cpp
index 9a684a41d0ee9..1097bbea2a6ad 100644
--- a/src/coreclr/src/vm/mlinfo.cpp
+++ b/src/coreclr/src/vm/mlinfo.cpp
@@ -659,102 +659,6 @@ BOOL ParseNativeTypeInfo(NativeTypeParamInfo* pParamInfo,
return TRUE;
}
-//==========================================================================
-// Determines whether *pManagedElemType is really normalized (i.e. differs
-// from what sigPtr points to modulo generic instantiation). If it is the
-// case, all types that have been normalized away are checked for valid
-// managed/unmanaged type combination, and *pNativeType is updated to contain
-// the native type of the primitive type field inside. On error (a generic
-// type is encountered or managed/unmanaged type mismatch) or non-default
-// native type of the primitive type inside, *pManagedElemType is un-normalized
-// so that the calling code can deal with the situation in its own way.
-//==========================================================================
-void VerifyAndAdjustNormalizedType(
- Module * pModule,
- SigPointer sigPtr,
- const SigTypeContext * pTypeContext,
- CorElementType * pManagedElemType,
- CorNativeType * pNativeType)
-{
- CorElementType sigElemType = sigPtr.PeekElemTypeClosed(pModule, pTypeContext);
-
- if (*pManagedElemType != sigElemType)
- {
- // Normalized element type differs from closed element type, which means that
- // normalization has occurred.
- _ASSERTE(sigElemType == ELEMENT_TYPE_VALUETYPE);
-
- // Now we know that this is a normalized value type - we have to verify the removed
- // value type(s) and get to the true primitive type inside.
- TypeHandle th = sigPtr.GetTypeHandleThrowing(pModule,
- pTypeContext,
- ClassLoader::LoadTypes,
- CLASS_LOAD_UNRESTORED,
- TRUE);
- _ASSERTE(!th.IsNull() && !th.IsTypeDesc());
-
- CorNativeType ntype = *pNativeType;
-
- if (!th.AsMethodTable()->IsTruePrimitive() &&
- !th.IsEnum())
- {
- // This is a trivial (yet non-primitive) value type that has been normalized.
- // Loop until we eventually hit the primitive type or enum inside.
- do
- {
- if (th.HasInstantiation())
- {
- // generic structures are either not marshalable or special-cased - the caller needs to know either way
- *pManagedElemType = sigElemType;
- return;
- }
-
- // verify the native type of the value type (must be default or Struct)
- if (!(ntype == NATIVE_TYPE_DEFAULT || ntype == NATIVE_TYPE_STRUCT))
- {
- *pManagedElemType = sigElemType;
- return;
- }
-
- MethodTable *pMT = th.GetMethodTable();
- _ASSERTE(pMT != NULL && pMT->IsValueType() && pMT->GetNumInstanceFields() == 1);
-
- // get the only instance field
- PTR_FieldDesc fieldDesc = pMT->GetApproxFieldDescListRaw();
-
- // retrieve the MarshalAs of the field
- NativeTypeParamInfo paramInfo;
- if (!ParseNativeTypeInfo(fieldDesc->GetMemberDef(), th.GetModule()->GetMDImport(), ¶mInfo))
- {
- *pManagedElemType = sigElemType;
- return;
- }
-
- ntype = paramInfo.m_NativeType;
-
- th = fieldDesc->GetApproxFieldTypeHandleThrowing();
- }
- while (!th.IsTypeDesc() &&
- !th.AsMethodTable()->IsTruePrimitive() &&
- !th.IsEnum());
-
- // now ntype contains the native type of *pManagedElemType
- if (ntype == NATIVE_TYPE_DEFAULT)
- {
- // Let's update the caller's native type with default type only.
- // Updating with a non-default native type that is not allowed
- // for the given managed type would result in confusing exception
- // messages.
- *pNativeType = ntype;
- }
- else
- {
- *pManagedElemType = sigElemType;
- }
- }
- }
-}
-
VOID ThrowInteropParamException(UINT resID, UINT paramIdx)
{
CONTRACTL
@@ -1187,7 +1091,6 @@ MarshalInfo::MarshalInfo(Module* pModule,
BOOL BestFit,
BOOL ThrowOnUnmappableChar,
BOOL fEmitsIL,
- BOOL onInstanceMethod,
MethodDesc* pMD,
BOOL fLoadCustomMarshal
#ifdef _DEBUG
@@ -1230,7 +1133,6 @@ MarshalInfo::MarshalInfo(Module* pModule,
CorElementType corElemType = ELEMENT_TYPE_END;
m_pMT = NULL;
m_pMD = pMD;
- m_onInstanceMethod = onInstanceMethod;
#ifdef FEATURE_COMINTEROP
m_fDispItf = FALSE;
@@ -1367,38 +1269,6 @@ MarshalInfo::MarshalInfo(Module* pModule,
}
}
- // System primitive types (System.Int32, et.al.) will be marshaled as expected
- // because the mtype CorElementType is normalized (e.g. ELEMENT_TYPE_I4).
-#ifdef TARGET_X86
- // We however need to detect if such a normalization occurred for non-system
- // trivial value types, because we hold CorNativeType belonging to the original
- // "un-normalized" signature type. It has to be verified that all the value types
- // that have been normalized away have default marshaling or MarshalAs(Struct).
- // In addition, the nativeType must be updated with the type of the real primitive inside.
- // We don't normalize on return values of member functions since struct return values need to be treated as structures.
- if (isParam || !onInstanceMethod)
- {
- VerifyAndAdjustNormalizedType(pModule, sig, pTypeContext, &mtype, &nativeType);
- }
- else
- {
- SigPointer sigtmp = sig;
- CorElementType closedElemType = sigtmp.PeekElemTypeClosed(pModule, pTypeContext);
- if (closedElemType == ELEMENT_TYPE_VALUETYPE)
- {
- TypeHandle th = sigtmp.GetTypeHandleThrowing(pModule, pTypeContext);
- // If the return type of an instance method is a value-type we need the actual return type.
- // However, if the return type is an enum, we can normalize it.
- if (!th.IsEnum())
- {
- mtype = closedElemType;
- }
- }
-
- }
-#endif // TARGET_X86
-
-
if (nativeType == NATIVE_TYPE_CUSTOMMARSHALER)
{
if (IsFieldScenario())
@@ -2366,23 +2236,6 @@ MarshalInfo::MarshalInfo(Module* pModule,
m_type = MARSHAL_TYPE_BLITTABLEVALUECLASSWITHCOPYCTOR;
}
else
-#ifdef TARGET_X86
- // JIT64 is not aware of normalized value types and this optimization
- // (returning small value types by value in registers) is already done in JIT64.
- if ( !m_byref // Permit register-sized structs as return values
- && !isParam
- && !onInstanceMethod
- && CorIsPrimitiveType(m_pMT->GetInternalCorElementType())
- && !IsUnmanagedValueTypeReturnedByRef(nativeSize)
- && managedSize <= TARGET_POINTER_SIZE
- && nativeSize <= TARGET_POINTER_SIZE
- && !IsFieldScenario())
- {
- m_type = MARSHAL_TYPE_GENERIC_4;
- m_args.m_pMT = m_pMT;
- }
- else
-#endif // TARGET_X86
{
m_args.m_pMT = m_pMT;
m_type = MARSHAL_TYPE_BLITTABLEVALUECLASS;
@@ -2777,7 +2630,7 @@ DWORD CalculateArgumentMarshalFlags(BOOL byref, BOOL in, BOOL out, BOOL fMngToNa
return dwMarshalFlags;
}
-DWORD CalculateReturnMarshalFlags(BOOL hrSwap, BOOL fMngToNative, BOOL onInstanceMethod)
+DWORD CalculateReturnMarshalFlags(BOOL hrSwap, BOOL fMngToNative)
{
LIMITED_METHOD_CONTRACT;
DWORD dwMarshalFlags = MARSHAL_FLAG_RETVAL;
@@ -2792,11 +2645,6 @@ DWORD CalculateReturnMarshalFlags(BOOL hrSwap, BOOL fMngToNative, BOOL onInstanc
dwMarshalFlags |= MARSHAL_FLAG_CLR_TO_NATIVE;
}
- if (onInstanceMethod)
- {
- dwMarshalFlags |= MARSHAL_FLAG_IN_MEMBER_FUNCTION;
- }
-
return dwMarshalFlags;
}
@@ -2940,7 +2788,7 @@ void MarshalInfo::GenerateReturnIL(NDirectStubLinker* psl,
}
NewHolder pMarshaler = CreateILMarshaler(m_type, psl);
- DWORD dwMarshalFlags = CalculateReturnMarshalFlags(retval, fMngToNative, m_onInstanceMethod);
+ DWORD dwMarshalFlags = CalculateReturnMarshalFlags(retval, fMngToNative);
if (!pMarshaler->SupportsReturnMarshal(dwMarshalFlags, &resID))
{
diff --git a/src/coreclr/src/vm/mlinfo.h b/src/coreclr/src/vm/mlinfo.h
index 23ec668115ac9..792d078220f56 100644
--- a/src/coreclr/src/vm/mlinfo.h
+++ b/src/coreclr/src/vm/mlinfo.h
@@ -51,8 +51,7 @@ enum MarshalFlags
MARSHAL_FLAG_HRESULT_SWAP = 0x010,
MARSHAL_FLAG_RETVAL = 0x020,
// unused = 0x040,
- MARSHAL_FLAG_FIELD = 0x080,
- MARSHAL_FLAG_IN_MEMBER_FUNCTION = 0x100
+ MARSHAL_FLAG_FIELD = 0x080
};
#include
@@ -189,13 +188,6 @@ BOOL ParseNativeTypeInfo(mdToken token,
IMDInternalImport* pScope,
NativeTypeParamInfo* pParamInfo);
-void VerifyAndAdjustNormalizedType(
- Module * pModule,
- SigPointer sigPtr,
- const SigTypeContext * pTypeContext,
- CorElementType * pManagedElemType,
- CorNativeType * pNativeType);
-
#ifdef _DEBUG
BOOL IsFixedBuffer(mdFieldDef field, IMDInternalImport* pInternalImport);
#endif
@@ -321,7 +313,6 @@ class MarshalInfo
BOOL BestFit,
BOOL ThrowOnUnmappableChar,
BOOL fEmitsIL,
- BOOL onInstanceMethod,
MethodDesc* pMD = NULL,
BOOL fUseCustomMarshal = TRUE
#ifdef _DEBUG
@@ -519,7 +510,6 @@ class MarshalInfo
VARTYPE m_arrayElementType;
int m_iArrayRank;
BOOL m_nolowerbounds; // if managed type is SZARRAY, don't allow lower bounds
- BOOL m_onInstanceMethod;
// for NT_ARRAY only
UINT32 m_multiplier; // multipler for "sizeis"
diff --git a/src/tests/Interop/PInvoke/Miscellaneous/ThisCall/ThisCallNative.cpp b/src/tests/Interop/PInvoke/Miscellaneous/ThisCall/ThisCallNative.cpp
index 99a73baeca20b..cf2569fb75386 100644
--- a/src/tests/Interop/PInvoke/Miscellaneous/ThisCall/ThisCallNative.cpp
+++ b/src/tests/Interop/PInvoke/Miscellaneous/ThisCall/ThisCallNative.cpp
@@ -21,9 +21,14 @@ struct IntWrapper
int i;
};
+enum E : unsigned int
+{
+ Value = 42
+};
+
class C
{
- int dummy = 0xcccccccc;
+ E dummy = E::Value;
float width;
float height;
@@ -47,6 +52,11 @@ class C
{
return {(int)height};
}
+
+ virtual E GetE()
+ {
+ return dummy;
+ }
};
@@ -54,3 +64,23 @@ extern "C" DLL_EXPORT C* STDMETHODCALLTYPE CreateInstanceOfC(float width, float
{
return new C(width, height);
}
+
+extern "C" DLL_EXPORT SizeF STDMETHODCALLTYPE GetSizeFromManaged(C* c)
+{
+ return c->GetSize();
+}
+
+extern "C" DLL_EXPORT Width STDMETHODCALLTYPE GetWidthFromManaged(C* c)
+{
+ return c->GetWidth();
+}
+
+extern "C" DLL_EXPORT IntWrapper STDMETHODCALLTYPE GetHeightAsIntFromManaged(C* c)
+{
+ return c->GetHeightAsInt();
+}
+
+extern "C" DLL_EXPORT E STDMETHODCALLTYPE GetEFromManaged(C* c)
+{
+ return c->GetE();
+}
diff --git a/src/tests/Interop/PInvoke/Miscellaneous/ThisCall/ThisCallTest.cs b/src/tests/Interop/PInvoke/Miscellaneous/ThisCall/ThisCallTest.cs
index 5162195b51057..66d40546e91b3 100644
--- a/src/tests/Interop/PInvoke/Miscellaneous/ThisCall/ThisCallTest.cs
+++ b/src/tests/Interop/PInvoke/Miscellaneous/ThisCall/ThisCallTest.cs
@@ -16,12 +16,13 @@ public struct VtableLayout
public IntPtr getSize;
public IntPtr getWidth;
public IntPtr getHeightAsInt;
+ public IntPtr getE;
}
public VtableLayout* vtable;
- private int c;
- public readonly float width;
- public readonly float height;
+ public E dummy;
+ public float width;
+ public float height;
}
public struct SizeF
@@ -40,6 +41,11 @@ public struct IntWrapper
public int i;
}
+ public enum E : uint
+ {
+ Value = 42
+ }
+
[UnmanagedFunctionPointer(CallingConvention.ThisCall)]
public delegate SizeF GetSizeFn(C* c);
[UnmanagedFunctionPointer(CallingConvention.ThisCall)]
@@ -47,13 +53,25 @@ public struct IntWrapper
[UnmanagedFunctionPointer(CallingConvention.ThisCall)]
public delegate IntWrapper GetHeightAsIntFn(C* c);
+ [UnmanagedFunctionPointer(CallingConvention.ThisCall)]
+ public delegate E GetEFn(C* c);
+
[DllImport(nameof(ThisCallNative))]
public static extern C* CreateInstanceOfC(float width, float height);
+
+ [DllImport(nameof(ThisCallNative))]
+ public static extern SizeF GetSizeFromManaged(C* c);
+ [DllImport(nameof(ThisCallNative))]
+ public static extern Width GetWidthFromManaged(C* c);
+ [DllImport(nameof(ThisCallNative))]
+ public static extern IntWrapper GetHeightAsIntFromManaged(C* c);
+ [DllImport(nameof(ThisCallNative))]
+ public static extern E GetEFromManaged(C* c);
}
-class ThisCallTest
+unsafe class ThisCallTest
{
- public unsafe static int Main(string[] args)
+ public static int Main(string[] args)
{
try
{
@@ -63,6 +81,11 @@ public unsafe static int Main(string[] args)
Test8ByteHFA(instance);
Test4ByteHFA(instance);
Test4ByteNonHFA(instance);
+ TestEnum(instance);
+ Test8ByteHFAReverse();
+ Test4ByteHFAReverse();
+ Test4ByteNonHFAReverse();
+ TestEnumReverse();
}
catch (System.Exception ex)
{
@@ -72,7 +95,7 @@ public unsafe static int Main(string[] args)
return 100;
}
- private static unsafe void Test8ByteHFA(ThisCallNative.C* instance)
+ private static void Test8ByteHFA(ThisCallNative.C* instance)
{
ThisCallNative.GetSizeFn callback = Marshal.GetDelegateForFunctionPointer(instance->vtable->getSize);
@@ -82,7 +105,7 @@ private static unsafe void Test8ByteHFA(ThisCallNative.C* instance)
Assert.AreEqual(instance->height, result.height);
}
- private static unsafe void Test4ByteHFA(ThisCallNative.C* instance)
+ private static void Test4ByteHFA(ThisCallNative.C* instance)
{
ThisCallNative.GetWidthFn callback = Marshal.GetDelegateForFunctionPointer(instance->vtable->getWidth);
@@ -91,7 +114,7 @@ private static unsafe void Test4ByteHFA(ThisCallNative.C* instance)
Assert.AreEqual(instance->width, result.width);
}
- private static unsafe void Test4ByteNonHFA(ThisCallNative.C* instance)
+ private static void Test4ByteNonHFA(ThisCallNative.C* instance)
{
ThisCallNative.GetHeightAsIntFn callback = Marshal.GetDelegateForFunctionPointer(instance->vtable->getHeightAsInt);
@@ -99,4 +122,79 @@ private static unsafe void Test4ByteNonHFA(ThisCallNative.C* instance)
Assert.AreEqual((int)instance->height, result.i);
}
+
+ private static void TestEnum(ThisCallNative.C* instance)
+ {
+ ThisCallNative.GetEFn callback = Marshal.GetDelegateForFunctionPointer(instance->vtable->getE);
+
+ ThisCallNative.E result = callback(instance);
+
+ Assert.AreEqual(instance->dummy, result);
+ }
+
+ private static void Test8ByteHFAReverse()
+ {
+ ThisCallNative.C c = CreateCWithManagedVTable(2.0f, 3.0f);
+ ThisCallNative.SizeF result = ThisCallNative.GetSizeFromManaged(&c);
+
+ Assert.AreEqual(c.width, result.width);
+ Assert.AreEqual(c.height, result.height);
+ }
+
+ private static void Test4ByteHFAReverse()
+ {
+ ThisCallNative.C c = CreateCWithManagedVTable(2.0f, 3.0f);
+ ThisCallNative.Width result = ThisCallNative.GetWidthFromManaged(&c);
+
+ Assert.AreEqual(c.width, result.width);
+ }
+
+ private static void Test4ByteNonHFAReverse()
+ {
+ ThisCallNative.C c = CreateCWithManagedVTable(2.0f, 3.0f);
+ ThisCallNative.IntWrapper result = ThisCallNative.GetHeightAsIntFromManaged(&c);
+
+ Assert.AreEqual((int)c.height, result.i);
+ }
+
+ private static void TestEnumReverse()
+ {
+ ThisCallNative.C c = CreateCWithManagedVTable(2.0f, 3.0f);
+ ThisCallNative.E result = ThisCallNative.GetEFromManaged(&c);
+
+ Assert.AreEqual(c.dummy, result);
+ }
+
+ private static ThisCallNative.C CreateCWithManagedVTable(float width, float height)
+ {
+ return new ThisCallNative.C
+ {
+ vtable = ManagedVtable,
+ dummy = ThisCallNative.E.Value,
+ width = width,
+ height = height
+ };
+ }
+
+ private static ThisCallNative.C.VtableLayout* managedVtable;
+
+ private static ThisCallNative.C.VtableLayout* ManagedVtable
+ {
+ get
+ {
+ if (managedVtable == null)
+ {
+ managedVtable = (ThisCallNative.C.VtableLayout*)Marshal.AllocHGlobal(sizeof(ThisCallNative.C.VtableLayout));
+ managedVtable->getSize = Marshal.GetFunctionPointerForDelegate(
+ (ThisCallNative.GetSizeFn)((ThisCallNative.C* c) => new ThisCallNative.SizeF { width = c->width, height = c->height} ));
+ managedVtable->getWidth = Marshal.GetFunctionPointerForDelegate(
+ (ThisCallNative.GetWidthFn)((ThisCallNative.C* c) => new ThisCallNative.Width { width = c->width} ));
+ managedVtable->getHeightAsInt = Marshal.GetFunctionPointerForDelegate(
+ (ThisCallNative.GetHeightAsIntFn)((ThisCallNative.C* c) => new ThisCallNative.IntWrapper { i = (int)c->height} ));
+ managedVtable->getE = Marshal.GetFunctionPointerForDelegate(
+ (ThisCallNative.GetEFn)((ThisCallNative.C* c) => c->dummy ));
+ }
+ return managedVtable;
+ }
+ }
}
diff --git a/src/tests/Interop/StructMarshalling/PInvoke/MarshalStructAsLayoutExp.cs b/src/tests/Interop/StructMarshalling/PInvoke/MarshalStructAsLayoutExp.cs
index 9cffdfaccc475..fb2d2cf5cde7a 100644
--- a/src/tests/Interop/StructMarshalling/PInvoke/MarshalStructAsLayoutExp.cs
+++ b/src/tests/Interop/StructMarshalling/PInvoke/MarshalStructAsLayoutExp.cs
@@ -211,6 +211,8 @@ public static int Main()
#endregion
[DllImport("MarshalStructAsParam")]
static extern LongStructPack16Explicit GetLongStruct(long l1, long l2);
+ [DllImport("MarshalStructAsParam")]
+ static extern IntStructPack8Explicit GetIntStruct(int i, int j);
[DllImport("MarshalStructAsParam")]
static extern bool MarshalStructAsParam_AsExpByValOverlappingLongFloat(OverlappingLongFloat str, long expected);
@@ -1680,5 +1682,12 @@ private static void RunMarshalStructAsReturn()
Console.WriteLine("Failed to return LongStructPack16Explicit.");
failures++;
}
+
+ IntStructPack8Explicit intStruct = GetIntStruct(12345, 678910);
+ if(intStruct.i1 != 12345 || intStruct.i2 != 678910)
+ {
+ Console.WriteLine("Failed to return IntStructPack8Explicit.");
+ failures++;
+ }
}
}
diff --git a/src/tests/Interop/StructMarshalling/PInvoke/MarshalStructAsParamDLL.cpp b/src/tests/Interop/StructMarshalling/PInvoke/MarshalStructAsParamDLL.cpp
index 74ab359cd67b5..a7f5d17de6c48 100644
--- a/src/tests/Interop/StructMarshalling/PInvoke/MarshalStructAsParamDLL.cpp
+++ b/src/tests/Interop/StructMarshalling/PInvoke/MarshalStructAsParamDLL.cpp
@@ -1263,6 +1263,11 @@ extern "C" DLL_EXPORT MultipleBools STDMETHODCALLTYPE GetBools(BOOL b1, BOOL b2)
return {b1, b2};
}
+extern "C" DLL_EXPORT IntStructPack8Explicit STDMETHODCALLTYPE GetIntStruct(int i, int j)
+{
+ return {i, j};
+}
+
using IntIntDelegate = int (STDMETHODCALLTYPE*)(int a);
struct DelegateFieldMarshaling
diff --git a/src/tests/issues.targets b/src/tests/issues.targets
index ba9cc1368ac47..1e060bb592a2f 100644
--- a/src/tests/issues.targets
+++ b/src/tests/issues.targets
@@ -414,6 +414,9 @@
https://github.com/dotnet/runtime/issues/12299
+
+ Thiscall not supported on Windows ARM32.
+
https://github.com/dotnet/runtime/issues/12979