Skip to content

Commit

Permalink
Simplify NativeAOT PAL (dotnet#111730)
Browse files Browse the repository at this point in the history
* Simplify NativeAOT PAL

- Delete xplat Windows CONTEXT definition since it is not actually used for anything
- Delete hijacking callback abstraction. We have introduced cycles between PAL layer and runtime layer of the thread suspension a while ago, so we can take full advantage of the tight coupling and avoid going through extra indirections.
- Delete HANDLE abstraction for Thread object (enabled by deleting hijacking callback abstraction)

* More cleanup

* Fix Arm64 build break

* Delete RhpPInvokeExceptionGuard

* Fix win x86 build break

* More win x86 build breaks
  • Loading branch information
jkotas authored Jan 23, 2025
1 parent 546482d commit 92e1cdd
Show file tree
Hide file tree
Showing 27 changed files with 389 additions and 1,111 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -133,16 +133,14 @@ internal static void FailFastViaClasslib(RhFailFastReason reason, object? unhand
FallbackFailFast(reason, unhandledException);
}

#if TARGET_WINDOWS

#if TARGET_AMD64
[StructLayout(LayoutKind.Explicit, Size = 0x4d0)]
#elif TARGET_ARM
[StructLayout(LayoutKind.Explicit, Size = 0x1a0)]
#elif TARGET_X86
[StructLayout(LayoutKind.Explicit, Size = 0x2cc)]
#elif TARGET_ARM64
[StructLayout(LayoutKind.Explicit, Size = 0x390)]
#else
[StructLayout(LayoutKind.Explicit, Size = 0x10)] // this is small enough that it should trip an assert in RhpCopyContextFromExInfo
#endif
private struct OSCONTEXT
{
Expand All @@ -151,13 +149,11 @@ private struct OSCONTEXT
internal static void* PointerAlign(void* ptr, int alignmentInBytes)
{
int alignMask = alignmentInBytes - 1;
#if TARGET_64BIT
return (void*)((((long)ptr) + alignMask) & ~alignMask);
#else
return (void*)((((int)ptr) + alignMask) & ~alignMask);
#endif
return (void*)((((nint)ptr) + alignMask) & ~alignMask);
}

#endif // TARGET_WINDOWS

#if NATIVEAOT
private static void OnFirstChanceExceptionViaClassLib(object exception)
{
Expand Down Expand Up @@ -220,12 +216,16 @@ internal static void UnhandledExceptionFailFastViaClasslib(
classlibAddress);
}

#if TARGET_WINDOWS
// 16-byte align the context. This is overkill on x86 and ARM, but simplifies things slightly.
const int contextAlignment = 16;
byte* pbBuffer = stackalloc byte[sizeof(OSCONTEXT) + contextAlignment];
void* pContext = PointerAlign(pbBuffer, contextAlignment);

InternalCalls.RhpCopyContextFromExInfo(pContext, sizeof(OSCONTEXT), exInfo._pExContext);
#else
void* pContext = null; // Fatal crash handler does not use the context on non-Windows
#endif

try
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -247,9 +247,11 @@ internal static extern unsafe IntPtr RhpCallPropagateExceptionCallback(
[MethodImpl(MethodImplOptions.InternalCall)]
internal static extern void RhpValidateExInfoStack();

#if TARGET_WINDOWS
[RuntimeImport(Redhawk.BaseName, "RhpCopyContextFromExInfo")]
[MethodImpl(MethodImplOptions.InternalCall)]
internal static extern unsafe void RhpCopyContextFromExInfo(void* pOSContext, int cbOSContext, EH.PAL_LIMITED_CONTEXT* pPalContext);
#endif

[RuntimeImport(Redhawk.BaseName, "RhpGetThreadAbortException")]
[MethodImpl(MethodImplOptions.InternalCall)]
Expand Down
10 changes: 5 additions & 5 deletions src/coreclr/nativeaot/Runtime/AsmOffsets.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,12 @@ ASM_OFFSET( 0, 0, Thread, m_eeAllocContext)
ASM_OFFSET( 2c, 40, Thread, m_ThreadStateFlags)
ASM_OFFSET( 30, 48, Thread, m_pTransitionFrame)
ASM_OFFSET( 34, 50, Thread, m_pDeferredTransitionFrame)
ASM_OFFSET( 44, 70, Thread, m_ppvHijackedReturnAddressLocation)
ASM_OFFSET( 48, 78, Thread, m_pvHijackedReturnAddress)
ASM_OFFSET( 4c, 80, Thread, m_pExInfoStackHead)
ASM_OFFSET( 50, 88, Thread, m_threadAbortException)
ASM_OFFSET( 40, 68, Thread, m_ppvHijackedReturnAddressLocation)
ASM_OFFSET( 44, 70, Thread, m_pvHijackedReturnAddress)
ASM_OFFSET( 48, 78, Thread, m_pExInfoStackHead)
ASM_OFFSET( 4c, 80, Thread, m_threadAbortException)
#ifdef TARGET_X86
ASM_OFFSET( 54, FF, Thread, m_uHijackedReturnValueFlags)
ASM_OFFSET( 50, FF, Thread, m_uHijackedReturnValueFlags)
#endif

ASM_SIZEOF( 14, 20, EHEnum)
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/nativeaot/Runtime/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ else()
list(APPEND FULL_RUNTIME_SOURCES
unix/cgroupcpu.cpp
unix/HardwareExceptions.cpp
unix/UnixContext.cpp
unix/NativeContext.cpp
unix/UnixSignals.cpp
unix/UnwindHelpers.cpp
unix/UnixNativeCodeManager.cpp
Expand Down
156 changes: 24 additions & 132 deletions src/coreclr/nativeaot/Runtime/EHHelpers.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "common.h"
#ifdef HOST_WINDOWS
#include <windows.h>
#endif
#ifndef DACCESS_COMPILE
#include "CommonTypes.h"
#include "CommonMacros.h"
Expand Down Expand Up @@ -28,6 +31,7 @@
#include "MethodTable.h"
#include "MethodTable.inl"
#include "CommonMacros.inl"
#include "NativeContext.h"

struct MethodRegionInfo
{
Expand Down Expand Up @@ -110,33 +114,21 @@ FCIMPL2(int32_t, RhGetModuleFileName, HANDLE moduleHandle, _Out_ const TCHAR** p
}
FCIMPLEND

#ifdef TARGET_WINDOWS
FCIMPL3(void, RhpCopyContextFromExInfo, void * pOSContext, int32_t cbOSContext, PAL_LIMITED_CONTEXT * pPalContext)
{
ASSERT((size_t)cbOSContext >= sizeof(CONTEXT));
CONTEXT* pContext = (CONTEXT *)pOSContext;

#ifndef HOST_WASM

#if defined(HOST_X86) || defined(HOST_AMD64)
memset(pOSContext, 0, cbOSContext);
pContext->ContextFlags = CONTEXT_CONTROL | CONTEXT_INTEGER;

// Fill in CONTEXT_CONTROL registers that were not captured in PAL_LIMITED_CONTEXT.
PopulateControlSegmentRegisters(pContext);
#endif // HOST_X86 || HOST_AMD64

#endif // !HOST_WASM

#if defined(UNIX_AMD64_ABI)
pContext->Rip = pPalContext->IP;
pContext->Rsp = pPalContext->Rsp;
pContext->Rbp = pPalContext->Rbp;
pContext->Rdx = pPalContext->Rdx;
pContext->Rax = pPalContext->Rax;
pContext->Rbx = pPalContext->Rbx;
pContext->R12 = pPalContext->R12;
pContext->R13 = pPalContext->R13;
pContext->R14 = pPalContext->R14;
pContext->R15 = pPalContext->R15;
#elif defined(HOST_AMD64)
#if defined(HOST_AMD64)
pContext->Rip = pPalContext->IP;
pContext->Rsp = pPalContext->Rsp;
pContext->Rbp = pPalContext->Rbp;
Expand All @@ -156,19 +148,6 @@ FCIMPL3(void, RhpCopyContextFromExInfo, void * pOSContext, int32_t cbOSContext,
pContext->Esi = pPalContext->Rsi;
pContext->Eax = pPalContext->Rax;
pContext->Ebx = pPalContext->Rbx;
#elif defined(HOST_ARM)
pContext->R0 = pPalContext->R0;
pContext->R4 = pPalContext->R4;
pContext->R5 = pPalContext->R5;
pContext->R6 = pPalContext->R6;
pContext->R7 = pPalContext->R7;
pContext->R8 = pPalContext->R8;
pContext->R9 = pPalContext->R9;
pContext->R10 = pPalContext->R10;
pContext->R11 = pPalContext->R11;
pContext->Sp = pPalContext->SP;
pContext->Lr = pPalContext->LR;
pContext->Pc = pPalContext->IP;
#elif defined(HOST_ARM64)
pContext->X0 = pPalContext->X0;
pContext->X1 = pPalContext->X1;
Expand All @@ -187,109 +166,20 @@ FCIMPL3(void, RhpCopyContextFromExInfo, void * pOSContext, int32_t cbOSContext,
pContext->Sp = pPalContext->SP;
pContext->Lr = pPalContext->LR;
pContext->Pc = pPalContext->IP;
#elif defined(HOST_LOONGARCH64)
pContext->R4 = pPalContext->R4;
pContext->R5 = pPalContext->R5;
pContext->R23 = pPalContext->R23;
pContext->R24 = pPalContext->R24;
pContext->R25 = pPalContext->R25;
pContext->R26 = pPalContext->R26;
pContext->R27 = pPalContext->R27;
pContext->R28 = pPalContext->R28;
pContext->R29 = pPalContext->R29;
pContext->R30 = pPalContext->R30;
pContext->R31 = pPalContext->R31;
pContext->Fp = pPalContext->FP;
pContext->Sp = pPalContext->SP;
pContext->Ra = pPalContext->RA;
pContext->Pc = pPalContext->IP;
#elif defined(HOST_RISCV64)
pContext->A0 = pPalContext->A0;
pContext->A1 = pPalContext->A1;
pContext->S1 = pPalContext->S1;
pContext->S2 = pPalContext->S2;
pContext->S3 = pPalContext->S3;
pContext->S4 = pPalContext->S4;
pContext->S5 = pPalContext->S5;
pContext->S6 = pPalContext->S6;
pContext->S7 = pPalContext->S7;
pContext->S8 = pPalContext->S8;
pContext->S9 = pPalContext->S9;
pContext->S10 = pPalContext->S10;
pContext->S11 = pPalContext->S11;
pContext->Fp = pPalContext->FP;
pContext->Sp = pPalContext->SP;
pContext->Ra = pPalContext->RA;
pContext->Pc = pPalContext->IP;
#elif defined(HOST_WASM)
// No registers, no work to do yet
#else
#error Not Implemented for this architecture -- RhpCopyContextFromExInfo
#endif
}
FCIMPLEND

struct DISPATCHER_CONTEXT
{
uintptr_t ControlPc;
// N.B. There is more here (so this struct isn't the right size), but we ignore everything else
};

#ifdef HOST_X86
struct EXCEPTION_REGISTRATION_RECORD
{
uintptr_t Next;
uintptr_t Handler;
};
#endif // HOST_X86

EXTERN_C void QCALLTYPE RhpFailFastForPInvokeExceptionPreemp(intptr_t PInvokeCallsiteReturnAddr,
void* pExceptionRecord, void* pContextRecord);
FCDECL3(void, RhpFailFastForPInvokeExceptionCoop, intptr_t PInvokeCallsiteReturnAddr,
void* pExceptionRecord, void* pContextRecord);
EXTERN_C int32_t __stdcall RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs);

EXTERN_C int32_t __stdcall RhpPInvokeExceptionGuard(PEXCEPTION_RECORD pExceptionRecord,
uintptr_t EstablisherFrame,
PCONTEXT pContextRecord,
DISPATCHER_CONTEXT * pDispatcherContext)
{
UNREFERENCED_PARAMETER(EstablisherFrame);

Thread * pThread = ThreadStore::GetCurrentThread();

// A thread in DoNotTriggerGc mode has many restrictions that will become increasingly likely to be violated as
// exception dispatch kicks off. So we just address this as early as possible with a FailFast.
// The most likely case where this occurs is in GC-callouts -- in that case, we have
// managed code that runs on behalf of GC, which might have a bug that causes an AV.
if (pThread->IsDoNotTriggerGcSet())
RhFailFast();

// We promote exceptions that were not converted to managed exceptions to a FailFast. However, we have to
// be careful because we got here via OS SEH infrastructure and, therefore, don't know what GC mode we're
// currently in. As a result, since we're calling back into managed code to handle the FailFast, we must
// correctly call either a UnmanagedCallersOnly or a RuntimeExport version of the same method.
if (pThread->IsCurrentThreadInCooperativeMode())
{
// Cooperative mode -- Typically, RhpVectoredExceptionHandler will handle this because the faulting IP will be
// in managed code. But sometimes we AV on a bad call indirect or something similar. In that situation, we can
// use the dispatcher context or exception registration record to find the relevant classlib.
#ifdef HOST_X86
intptr_t classlibBreadcrumb = ((EXCEPTION_REGISTRATION_RECORD*)EstablisherFrame)->Handler;
#else
intptr_t classlibBreadcrumb = pDispatcherContext->ControlPc;
#endif
RhpFailFastForPInvokeExceptionCoop(classlibBreadcrumb, pExceptionRecord, pContextRecord);
}
else
{
// Preemptive mode -- the classlib associated with the last pinvoke owns the fail fast behavior.
intptr_t pinvokeCallsiteReturnAddr = (intptr_t)pThread->GetCurrentThreadPInvokeReturnAddress();
RhpFailFastForPInvokeExceptionPreemp(pinvokeCallsiteReturnAddr, pExceptionRecord, pContextRecord);
}
LONG WINAPI RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs);

return 0;
}
#endif // TARGET_WINDOWS

FCDECL2(void, RhpThrowHwEx, int exceptionCode, TADDR faultingIP);

Expand Down Expand Up @@ -413,7 +303,7 @@ static uintptr_t UnwindSimpleHelperToCaller(
#ifdef TARGET_UNIX
PAL_LIMITED_CONTEXT * pContext
#else
_CONTEXT * pContext
NATIVE_CONTEXT * pContext
#endif
)
{
Expand Down Expand Up @@ -516,7 +406,7 @@ EXTERN_C void RhpContinueOnFatalErrors()
g_ContinueOnFatalErrors = true;
}

int32_t __stdcall RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs)
LONG WINAPI RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs)
{
uintptr_t faultCode = pExPtrs->ExceptionRecord->ExceptionCode;

Expand Down Expand Up @@ -550,7 +440,7 @@ int32_t __stdcall RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs)
RhFailFast();
}

PCONTEXT interruptedContext = pExPtrs->ContextRecord;
NATIVE_CONTEXT* interruptedContext = (NATIVE_CONTEXT*)pExPtrs->ContextRecord;
bool areShadowStacksEnabled = PalAreShadowStacksEnabled();
if (areShadowStacksEnabled)
{
Expand All @@ -560,8 +450,8 @@ int32_t __stdcall RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs)
// When the CET is enabled, the interruption happens on the ret instruction in the calee.
// We need to "pop" rsp to the caller, as if the ret has consumed it.
interruptedContext->SetSp(interruptedContext->GetSp() + 8);
uintptr_t ssp = GetSSP(interruptedContext);
SetSSP(interruptedContext, ssp + 8);
uintptr_t ssp = GetSSP(&interruptedContext->ctx);
SetSSP(&interruptedContext->ctx, ssp + 8);
}

// Change the IP to be at the original return site, as if we have returned to the caller.
Expand All @@ -575,7 +465,7 @@ int32_t __stdcall RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs)
}
#endif // TARGET_AMD64 (support for STATUS_RETURN_ADDRESS_HIJACK_ATTEMPT)

uintptr_t faultingIP = pExPtrs->ContextRecord->GetIp();
uintptr_t faultingIP = ((NATIVE_CONTEXT*)pExPtrs->ContextRecord)->GetIp();

ICodeManager * pCodeManager = GetRuntimeInstance()->GetCodeManagerForAddress((PTR_VOID)faultingIP);
bool translateToManagedException = false;
Expand All @@ -601,7 +491,7 @@ int32_t __stdcall RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs)

// Do not use ASSERT_UNCONDITIONALLY here. It will crash because of it consumes too much stack.
PalPrintFatalError("\nProcess is terminating due to StackOverflowException.\n");
PalRaiseFailFastException(pExPtrs->ExceptionRecord, pExPtrs->ContextRecord, 0);
RaiseFailFastException(pExPtrs->ExceptionRecord, pExPtrs->ContextRecord, 0);
}

translateToManagedException = true;
Expand All @@ -621,17 +511,19 @@ int32_t __stdcall RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs)
}

// we were AV-ing in a helper - unwind our way to our caller
faultingIP = UnwindSimpleHelperToCaller(pExPtrs->ContextRecord);
faultingIP = UnwindSimpleHelperToCaller((NATIVE_CONTEXT*)(pExPtrs->ContextRecord));

translateToManagedException = true;
}
}

if (translateToManagedException)
{
pExPtrs->ContextRecord->SetIp(PCODEToPINSTR((PCODE)&RhpThrowHwEx));
pExPtrs->ContextRecord->SetArg0Reg(faultCode);
pExPtrs->ContextRecord->SetArg1Reg(faultingIP);
NATIVE_CONTEXT* pCtx = (NATIVE_CONTEXT*)pExPtrs->ContextRecord;

pCtx->SetIp(PCODEToPINSTR((PCODE)&RhpThrowHwEx));
pCtx->SetArg0Reg(faultCode);
pCtx->SetArg1Reg(faultingIP);

return EXCEPTION_CONTINUE_EXECUTION;
}
Expand Down Expand Up @@ -664,7 +556,7 @@ int32_t __stdcall RhpVectoredExceptionHandler(PEXCEPTION_POINTERS pExPtrs)
if (((uint8_t*)faultingIP >= s_pbRuntimeModuleLower) && ((uint8_t*)faultingIP < s_pbRuntimeModuleUpper))
{
ASSERT_UNCONDITIONALLY("Hardware exception raised inside the runtime.");
PalRaiseFailFastException(pExPtrs->ExceptionRecord, pExPtrs->ContextRecord, 0);
RaiseFailFastException(pExPtrs->ExceptionRecord, pExPtrs->ContextRecord, 0);
}
}

Expand Down
Loading

0 comments on commit 92e1cdd

Please sign in to comment.