From d1f3fb1c76ad72155d3becc8f9bed7d70e9485a9 Mon Sep 17 00:00:00 2001 From: Eliot Miranda Date: Wed, 25 Jul 2018 19:06:07 -0700 Subject: [PATCH] CogVM source as per VMMaker.oscog-eem.2424 Cogits. Fix bad bug in pc mapping of methods containing primitives. methodUsesPrimitiveErrorCode:header: used the initialPC inst var, rather than deriving the initialPC of the method parameter. initialPC is stale/over ripe, being that of the last JITTED method. So many methods containing primitives get their PC mapping wrong. We haven't noticed this much because primitives typically succeed. Nuke an obsolete version of the method. Have the Spur immutability store generators voidReceiverOptStatus, since with immutability, stores are suspension pioints (when an immutability check fails) and so receiverResultReg cannot be assumed to be live after a store. Remove ssAllocateRequiredRegMask:upThrough:, inlining it into its callers, ssAllocateCall/RequiredReg:... et al. Clean up the computation of the register mask in SistaCogit>>genForwardersInlinedIdenticalOrNotIf: et al. --- nsspur64src/vm/cogit.h | 2 +- nsspur64src/vm/cogitX64SysV.c | 174 ++++------- nsspur64src/vm/cogitX64WIN64.c | 170 ++++------ nsspur64src/vm/cointerp.c | 28 +- nsspur64src/vm/cointerp.h | 2 +- nsspur64src/vm/gcc3x-cointerp.c | 28 +- nsspursrc/vm/cogit.h | 2 +- nsspursrc/vm/cogitARMv5.c | 180 ++++------- nsspursrc/vm/cogitIA32.c | 176 ++++------- nsspursrc/vm/cogitMIPSEL.c | 178 ++++------- nsspursrc/vm/cointerp.c | 40 ++- nsspursrc/vm/cointerp.h | 4 +- nsspursrc/vm/gcc3x-cointerp.c | 40 ++- spur64src/vm/cogit.h | 2 +- spur64src/vm/cogitX64SysV.c | 165 ++++------ spur64src/vm/cogitX64WIN64.c | 161 ++++------ spur64src/vm/cointerp.c | 34 +- spur64src/vm/cointerp.h | 2 +- spur64src/vm/cointerpmt.c | 28 +- spur64src/vm/cointerpmt.h | 2 +- spur64src/vm/gcc3x-cointerp.c | 34 +- spur64src/vm/gcc3x-cointerpmt.c | 28 +- spurlowcode64src/vm/cogit.h | 2 +- spurlowcode64src/vm/cogitX64SysV.c | 399 +++++++++++------------- spurlowcode64src/vm/cogitX64WIN64.c | 399 +++++++++++------------- spurlowcode64src/vm/cointerp.c | 36 ++- spurlowcode64src/vm/cointerp.h | 2 +- spurlowcode64src/vm/gcc3x-cointerp.c | 36 ++- spurlowcodesrc/vm/cogit.h | 2 +- spurlowcodesrc/vm/cogitARMv5.c | 449 ++++++++++++--------------- spurlowcodesrc/vm/cogitIA32.c | 401 +++++++++++------------- spurlowcodesrc/vm/cogitMIPSEL.c | 413 +++++++++++------------- spurlowcodesrc/vm/cointerp.c | 38 ++- spurlowcodesrc/vm/cointerp.h | 2 +- spurlowcodesrc/vm/gcc3x-cointerp.c | 38 ++- spursista64src/vm/cogit.h | 2 +- spursista64src/vm/cogitX64SysV.c | 204 +++++------- spursista64src/vm/cogitX64WIN64.c | 204 +++++------- spursista64src/vm/cointerp.c | 28 +- spursista64src/vm/cointerp.h | 2 +- spursista64src/vm/gcc3x-cointerp.c | 28 +- spursistasrc/vm/cogit.h | 2 +- spursistasrc/vm/cogitARMv5.c | 214 ++++++------- spursistasrc/vm/cogitIA32.c | 214 ++++++------- spursistasrc/vm/cogitMIPSEL.c | 220 ++++++------- spursistasrc/vm/cointerp.c | 40 ++- spursistasrc/vm/cointerp.h | 4 +- spursistasrc/vm/gcc3x-cointerp.c | 40 ++- spursrc/vm/cogit.h | 2 +- spursrc/vm/cogitARMv5.c | 155 ++++----- spursrc/vm/cogitIA32.c | 155 ++++----- spursrc/vm/cogitMIPSEL.c | 165 ++++------ spursrc/vm/cointerp.c | 32 +- spursrc/vm/cointerp.h | 2 +- spursrc/vm/cointerpmt.c | 36 ++- spursrc/vm/cointerpmt.h | 2 +- spursrc/vm/gcc3x-cointerp.c | 32 +- spursrc/vm/gcc3x-cointerpmt.c | 36 ++- src/vm/cogit.h | 2 +- src/vm/cogitARMv5.c | 145 +++------ src/vm/cogitIA32.c | 139 +++------ src/vm/cogitMIPSEL.c | 145 +++------ src/vm/cointerp.c | 36 ++- src/vm/cointerp.h | 2 +- src/vm/cointerpmt.c | 36 ++- src/vm/cointerpmt.h | 2 +- src/vm/gcc3x-cointerp.c | 36 ++- src/vm/gcc3x-cointerpmt.c | 36 ++- 68 files changed, 2595 insertions(+), 3500 deletions(-) diff --git a/nsspur64src/vm/cogit.h b/nsspur64src/vm/cogit.h index 626872f7c7..909d1a9cf8 100644 --- a/nsspur64src/vm/cogit.h +++ b/nsspur64src/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/nsspur64src/vm/cogitX64SysV.c b/nsspur64src/vm/cogitX64SysV.c index f97f574563..fe8c52dde7 100644 --- a/nsspur64src/vm/cogitX64SysV.c +++ b/nsspur64src/vm/cogitX64SysV.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -583,7 +583,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -1227,13 +1227,7 @@ static sqInt receiverIsInReceiverResultReg(void); static void NoDbgRegParms reinitializeFixupsFromthrough(sqInt start, sqInt end); static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -5510,7 +5504,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -5818,7 +5812,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -6093,6 +6087,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -17617,6 +17614,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -17668,6 +17667,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24381,7 +24382,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27524,7 +27525,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27661,7 +27662,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27923,7 +27924,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -28062,7 +28064,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -28227,7 +28230,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29068,7 +29072,8 @@ genNSSendnumArgsdepthsendTable(sqInt selectorIndex, sqInt numArgs, sqInt depth, (nsSendCache1->numArgs = numArgs); (nsSendCache1->depth = depth); (nsSendCache1->classTag = 0 /* illegalClassTag */); - ssAllocateCallReg(SendNumArgsReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << SendNumArgsReg), simStackPtr, simNativeStackPtr); marshallAbsentReceiverSendArguments(numArgs); /* begin uniqueLiteral:forInstruction: */ anInstruction = genoperandoperand(MoveCwR, nsSendCache, SendNumArgsReg); @@ -29235,7 +29240,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -29264,7 +29270,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29285,7 +29292,8 @@ genPushEnclosingObjectAt(sqInt level) AbstractInstruction *anInstruction; voidReceiverResultRegContainsSelf(); - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction = genoperandoperand(MoveCqR, level, SendNumArgsReg); /* begin CallRT: */ @@ -29359,7 +29367,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -29415,7 +29424,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -29840,16 +29850,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1ULL << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -30081,7 +30095,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -30095,7 +30110,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30169,7 +30185,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -30215,7 +30232,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30252,7 +30270,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -30539,7 +30558,8 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) sqInt numSpilled; assert(needsFrame); - ssAllocateCallReg(ReceiverResultReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << ReceiverResultReg), simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); /* begin ssFlushTo: */ @@ -30591,11 +30611,14 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) else { if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -30679,11 +30702,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -31189,45 +31215,6 @@ scanMethod(void) return numBlocks; } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1ULL << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1ULL << requiredReg1) | (1ULL << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1ULL << requiredReg1) | (1ULL << requiredReg2)) | (1ULL << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -31266,39 +31253,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1ULL << requiredReg1) | (1ULL << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/nsspur64src/vm/cogitX64WIN64.c b/nsspur64src/vm/cogitX64WIN64.c index a5b1de10ac..a4c4124cd9 100644 --- a/nsspur64src/vm/cogitX64WIN64.c +++ b/nsspur64src/vm/cogitX64WIN64.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -1227,13 +1227,7 @@ static sqInt receiverIsInReceiverResultReg(void); static void NoDbgRegParms reinitializeFixupsFromthrough(sqInt start, sqInt end); static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -5518,7 +5512,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -6101,6 +6095,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -17625,6 +17622,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -17676,6 +17675,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24410,7 +24411,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27553,7 +27554,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27690,7 +27691,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27952,7 +27953,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -28091,7 +28093,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -28256,7 +28259,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29097,7 +29101,8 @@ genNSSendnumArgsdepthsendTable(sqInt selectorIndex, sqInt numArgs, sqInt depth, (nsSendCache1->numArgs = numArgs); (nsSendCache1->depth = depth); (nsSendCache1->classTag = 0 /* illegalClassTag */); - ssAllocateCallReg(SendNumArgsReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << SendNumArgsReg), simStackPtr, simNativeStackPtr); marshallAbsentReceiverSendArguments(numArgs); /* begin uniqueLiteral:forInstruction: */ anInstruction = genoperandoperand(MoveCwR, nsSendCache, SendNumArgsReg); @@ -29264,7 +29269,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -29293,7 +29299,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29314,7 +29321,8 @@ genPushEnclosingObjectAt(sqInt level) AbstractInstruction *anInstruction; voidReceiverResultRegContainsSelf(); - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction = genoperandoperand(MoveCqR, level, SendNumArgsReg); /* begin CallRT: */ @@ -29388,7 +29396,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -29444,7 +29453,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -29869,16 +29879,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1ULL << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -30110,7 +30124,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -30124,7 +30139,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30198,7 +30214,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -30244,7 +30261,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30281,7 +30299,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -30568,7 +30587,8 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) sqInt numSpilled; assert(needsFrame); - ssAllocateCallReg(ReceiverResultReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << ReceiverResultReg), simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); /* begin ssFlushTo: */ @@ -30620,11 +30640,14 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) else { if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -30708,11 +30731,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -31218,45 +31244,6 @@ scanMethod(void) return numBlocks; } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1ULL << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1ULL << requiredReg1) | (1ULL << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1ULL << requiredReg1) | (1ULL << requiredReg2)) | (1ULL << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -31295,39 +31282,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1ULL << requiredReg1) | (1ULL << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/nsspur64src/vm/cointerp.c b/nsspur64src/vm/cointerp.c index 0d74733fe0..4180c6dc62 100644 --- a/nsspur64src/vm/cointerp.c +++ b/nsspur64src/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2591,7 +2591,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Newspeak Virtual Machine CoInterpreterPrimitives_VMMaker.oscog-eem.2420"; +const char *interpreterVersion = "Newspeak Virtual Machine CoInterpreterPrimitives_VMMaker.oscog-eem.2424"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -18056,12 +18056,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/nsspur64src/vm/cointerp.h b/nsspur64src/vm/cointerp.h index 81b373212e..3dd346ae8a 100644 --- a/nsspur64src/vm/cointerp.h +++ b/nsspur64src/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/nsspur64src/vm/gcc3x-cointerp.c b/nsspur64src/vm/gcc3x-cointerp.c index c8e90a266b..b732b0c644 100644 --- a/nsspur64src/vm/gcc3x-cointerp.c +++ b/nsspur64src/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2594,7 +2594,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Newspeak Virtual Machine CoInterpreterPrimitives_VMMaker.oscog-eem.2420"; +const char *interpreterVersion = "Newspeak Virtual Machine CoInterpreterPrimitives_VMMaker.oscog-eem.2424"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -18065,12 +18065,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/nsspursrc/vm/cogit.h b/nsspursrc/vm/cogit.h index 626872f7c7..909d1a9cf8 100644 --- a/nsspursrc/vm/cogit.h +++ b/nsspursrc/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/nsspursrc/vm/cogitARMv5.c b/nsspursrc/vm/cogitARMv5.c index fcd349c44e..7cee47242f 100644 --- a/nsspursrc/vm/cogitARMv5.c +++ b/nsspursrc/vm/cogitARMv5.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -651,7 +651,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -725,7 +725,7 @@ static BytecodeDescriptor * loadBytesAndGetDescriptor(void); static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor *descriptor, sqInt pc); static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); -static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1233,13 +1233,7 @@ static sqInt receiverIsInReceiverResultReg(void); static void NoDbgRegParms reinitializeFixupsFromthrough(sqInt start, sqInt end); static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -7428,7 +7422,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - usqInt end; + sqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -10183,7 +10177,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -10493,7 +10487,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -10767,6 +10761,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -12377,7 +12374,7 @@ gMoveCwR(sqInt wordConstant, sqInt reg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -22857,6 +22854,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -22910,6 +22909,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24833,7 +24834,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27689,7 +27690,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27835,7 +27836,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -28103,7 +28104,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -28242,7 +28244,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -28407,7 +28410,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29290,7 +29294,8 @@ genNSSendnumArgsdepthsendTable(sqInt selectorIndex, sqInt numArgs, sqInt depth, (nsSendCache1->numArgs = numArgs); (nsSendCache1->depth = depth); (nsSendCache1->classTag = 2 /* illegalClassTag */); - ssAllocateCallReg(SendNumArgsReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << SendNumArgsReg), simStackPtr, simNativeStackPtr); marshallAbsentReceiverSendArguments(numArgs); /* begin uniqueLiteral:forInstruction: */ anInstruction = genoperandoperand(MoveCwR, nsSendCache, SendNumArgsReg); @@ -29475,7 +29480,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -29504,7 +29510,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29525,7 +29532,8 @@ genPushEnclosingObjectAt(sqInt level) AbstractInstruction *anInstruction; voidReceiverResultRegContainsSelf(); - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction = genoperandoperand(MoveCqR, level, SendNumArgsReg); if (usesOutOfLineLiteral(anInstruction)) { @@ -29605,7 +29613,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -29666,7 +29675,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -30110,16 +30120,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -30357,7 +30371,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(gMoveCwR(association, ReceiverResultReg), association); @@ -30374,7 +30389,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30451,7 +30467,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -30500,7 +30517,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30537,7 +30555,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -30829,7 +30848,8 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) sqInt numSpilled; assert(needsFrame); - ssAllocateCallReg(ReceiverResultReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << ReceiverResultReg), simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); /* begin ssFlushTo: */ @@ -30890,11 +30910,14 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) else { if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -30978,11 +31001,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -31488,45 +31514,6 @@ scanMethod(void) return numBlocks; } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -31565,39 +31552,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/nsspursrc/vm/cogitIA32.c b/nsspursrc/vm/cogitIA32.c index dd2df706c7..6497e6e466 100644 --- a/nsspursrc/vm/cogitIA32.c +++ b/nsspursrc/vm/cogitIA32.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -712,7 +712,7 @@ static BytecodeDescriptor * loadBytesAndGetDescriptor(void); static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor *descriptor, sqInt pc); static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); -static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1196,13 +1196,7 @@ static sqInt receiverIsInReceiverResultReg(void); static void NoDbgRegParms reinitializeFixupsFromthrough(sqInt start, sqInt end); static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -7478,7 +7472,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - sqInt end; + usqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -10172,7 +10166,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -10756,6 +10750,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -12310,7 +12307,7 @@ gMoveCwR(sqInt wordConstant, sqInt reg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static sqInt NoDbgRegParms +static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -21878,6 +21875,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -21929,6 +21928,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -23066,7 +23067,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -25864,7 +25865,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -26001,7 +26002,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -26263,7 +26264,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -26402,7 +26404,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -26561,7 +26564,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -27402,7 +27406,8 @@ genNSSendnumArgsdepthsendTable(sqInt selectorIndex, sqInt numArgs, sqInt depth, (nsSendCache1->numArgs = numArgs); (nsSendCache1->depth = depth); (nsSendCache1->classTag = 2 /* illegalClassTag */); - ssAllocateCallReg(SendNumArgsReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << SendNumArgsReg), simStackPtr, simNativeStackPtr); marshallAbsentReceiverSendArguments(numArgs); /* begin uniqueLiteral:forInstruction: */ anInstruction = genoperandoperand(MoveCwR, nsSendCache, SendNumArgsReg); @@ -27569,7 +27574,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -27598,7 +27604,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -27619,7 +27626,8 @@ genPushEnclosingObjectAt(sqInt level) AbstractInstruction *anInstruction; voidReceiverResultRegContainsSelf(); - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction = genoperandoperand(MoveCqR, level, SendNumArgsReg); /* begin CallRT: */ @@ -27693,7 +27701,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -27748,7 +27757,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -28173,16 +28183,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -28414,7 +28428,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -28428,7 +28443,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -28502,7 +28518,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -28548,7 +28565,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -28585,7 +28603,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -28872,7 +28891,8 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) sqInt numSpilled; assert(needsFrame); - ssAllocateCallReg(ReceiverResultReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << ReceiverResultReg), simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); /* begin ssFlushTo: */ @@ -28924,11 +28944,14 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) else { if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -29012,11 +29035,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -29522,45 +29548,6 @@ scanMethod(void) return numBlocks; } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -29599,39 +29586,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/nsspursrc/vm/cogitMIPSEL.c b/nsspursrc/vm/cogitMIPSEL.c index 86ce4b4a62..0e1c90d189 100644 --- a/nsspursrc/vm/cogitMIPSEL.c +++ b/nsspursrc/vm/cogitMIPSEL.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -586,7 +586,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -1343,13 +1343,7 @@ static sqInt receiverIsInReceiverResultReg(void); static void NoDbgRegParms reinitializeFixupsFromthrough(sqInt start, sqInt end); static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -5113,7 +5107,7 @@ configureMNUCPICmethodOperandnumArgsdelta(CogMethod *cPIC, sqInt methodOperand, static sqInt NoDbgRegParms cPICCompactAndIsNowEmpty(CogMethod *cPIC) { - sqInt entryPoint; + usqInt entryPoint; sqInt followingAddress; sqInt i; sqInt methods[MaxCPICCases]; @@ -5214,7 +5208,7 @@ cPICHasForwardedClass(CogMethod *cPIC) static sqInt NoDbgRegParms cPICHasFreedTargets(CogMethod *cPIC) { - usqInt entryPoint; + sqInt entryPoint; sqInt i; sqInt pc; CogMethod *targetMethod; @@ -5369,7 +5363,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -5677,7 +5671,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -5951,6 +5945,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -21188,6 +21185,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -21239,6 +21238,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -22380,7 +22381,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -25162,7 +25163,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -25301,7 +25302,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -25563,7 +25564,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -25702,7 +25704,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -25867,7 +25870,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -26711,7 +26715,8 @@ genNSSendnumArgsdepthsendTable(sqInt selectorIndex, sqInt numArgs, sqInt depth, (nsSendCache1->numArgs = numArgs); (nsSendCache1->depth = depth); (nsSendCache1->classTag = 2 /* illegalClassTag */); - ssAllocateCallReg(SendNumArgsReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << SendNumArgsReg), simStackPtr, simNativeStackPtr); marshallAbsentReceiverSendArguments(numArgs); /* begin uniqueLiteral:forInstruction: */ anInstruction = genoperandoperand(MoveCwR, nsSendCache, SendNumArgsReg); @@ -26878,7 +26883,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -26907,7 +26913,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -26928,7 +26935,8 @@ genPushEnclosingObjectAt(sqInt level) AbstractInstruction *anInstruction; voidReceiverResultRegContainsSelf(); - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction = genoperandoperand(MoveCqR, level, SendNumArgsReg); /* begin CallRT: */ @@ -27002,7 +27010,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -27057,7 +27066,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -27482,16 +27492,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -27723,7 +27737,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -27737,7 +27752,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -27811,7 +27827,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -27857,7 +27874,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -27894,7 +27912,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -28183,7 +28202,8 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) sqInt numSpilled; assert(needsFrame); - ssAllocateCallReg(ReceiverResultReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << ReceiverResultReg), simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); /* begin ssFlushTo: */ @@ -28235,11 +28255,14 @@ marshallAbsentReceiverSendArguments(sqInt numArgs) else { if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -28323,11 +28346,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -28833,45 +28859,6 @@ scanMethod(void) return numBlocks; } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -28910,39 +28897,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/nsspursrc/vm/cointerp.c b/nsspursrc/vm/cointerp.c index fd01d0bcdb..909ca5e860 100644 --- a/nsspursrc/vm/cointerp.c +++ b/nsspursrc/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -925,7 +925,7 @@ static sqInt NoDbgRegParms changeClassOfto(sqInt rcvr, sqInt argClass); static double NoDbgRegParms dbgFloatValueOf(sqInt oop); static sqInt defaultEdenBytes(void); extern sqInt fetchClassTagOf(sqInt oop); -extern sqInt floatObjectOf(double aFloat); +extern usqInt floatObjectOf(double aFloat); extern double floatValueOf(sqInt oop); static sqInt hasSixtyFourBitImmediates(void); extern sqInt headerIndicatesAlternateBytecodeSet(sqInt methodHeader); @@ -2569,7 +2569,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Newspeak Virtual Machine CoInterpreterPrimitives_VMMaker.oscog-eem.2420"; +const char *interpreterVersion = "Newspeak Virtual Machine CoInterpreterPrimitives_VMMaker.oscog-eem.2424"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -17244,12 +17244,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -40889,7 +40897,7 @@ fetchClassTagOf(sqInt oop) } /* Spur32BitMemoryManager>>#floatObjectOf: */ -sqInt +usqInt floatObjectOf(double aFloat) { DECL_MAYBE_SQ_GLOBAL_STRUCT usqInt newFloatObj; @@ -45004,11 +45012,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } @@ -65169,8 +65177,8 @@ void (*functionPointerForinClass(sqInt primIdx,sqInt theClass))(void) static sqInt getErrorObjectFromPrimFailCode(void) { DECL_MAYBE_SQ_GLOBAL_STRUCT - usqInt clone; - usqInt errObj; + sqInt clone; + sqInt errObj; sqInt fieldIndex; sqInt i; sqInt knownClassIndex; diff --git a/nsspursrc/vm/cointerp.h b/nsspursrc/vm/cointerp.h index 2dbb10384c..21da0f1b49 100644 --- a/nsspursrc/vm/cointerp.h +++ b/nsspursrc/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ @@ -148,7 +148,7 @@ extern usqInt scavengeThresholdAddress(void); extern sqInt withoutForwardingOnandwithsendToCogit(sqInt obj1, sqInt obj2, sqInt aBool, sqInt (*selector)(sqInt,sqInt,sqInt)); extern sqInt byteSwapped(sqInt w); extern sqInt fetchClassTagOf(sqInt oop); -extern sqInt floatObjectOf(double aFloat); +extern usqInt floatObjectOf(double aFloat); extern sqInt headerIndicatesAlternateBytecodeSet(sqInt methodHeader); extern sqInt instantiateClassindexableSize(sqInt classObj, usqInt nElements); extern sqInt isIntegerValue(sqInt intValue); diff --git a/nsspursrc/vm/gcc3x-cointerp.c b/nsspursrc/vm/gcc3x-cointerp.c index 161d695fbf..28bc251274 100644 --- a/nsspursrc/vm/gcc3x-cointerp.c +++ b/nsspursrc/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -928,7 +928,7 @@ static sqInt NoDbgRegParms changeClassOfto(sqInt rcvr, sqInt argClass); static double NoDbgRegParms dbgFloatValueOf(sqInt oop); static sqInt defaultEdenBytes(void); extern sqInt fetchClassTagOf(sqInt oop); -extern sqInt floatObjectOf(double aFloat); +extern usqInt floatObjectOf(double aFloat); extern double floatValueOf(sqInt oop); static sqInt hasSixtyFourBitImmediates(void); extern sqInt headerIndicatesAlternateBytecodeSet(sqInt methodHeader); @@ -2572,7 +2572,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Newspeak Virtual Machine CoInterpreterPrimitives_VMMaker.oscog-eem.2420"; +const char *interpreterVersion = "Newspeak Virtual Machine CoInterpreterPrimitives_VMMaker.oscog-eem.2424"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -17253,12 +17253,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -40898,7 +40906,7 @@ fetchClassTagOf(sqInt oop) } /* Spur32BitMemoryManager>>#floatObjectOf: */ -sqInt +usqInt floatObjectOf(double aFloat) { DECL_MAYBE_SQ_GLOBAL_STRUCT usqInt newFloatObj; @@ -45013,11 +45021,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } @@ -65178,8 +65186,8 @@ void (*functionPointerForinClass(sqInt primIdx,sqInt theClass))(void) static sqInt getErrorObjectFromPrimFailCode(void) { DECL_MAYBE_SQ_GLOBAL_STRUCT - usqInt clone; - usqInt errObj; + sqInt clone; + sqInt errObj; sqInt fieldIndex; sqInt i; sqInt knownClassIndex; diff --git a/spur64src/vm/cogit.h b/spur64src/vm/cogit.h index 08dd7f2e94..e3a3f28dd0 100644 --- a/spur64src/vm/cogit.h +++ b/spur64src/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spur64src/vm/cogitX64SysV.c b/spur64src/vm/cogitX64SysV.c index ba08dbdc6b..83ceb8dd31 100644 --- a/spur64src/vm/cogitX64SysV.c +++ b/spur64src/vm/cogitX64SysV.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -582,7 +582,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -652,7 +652,7 @@ static BytecodeDescriptor * loadBytesAndGetDescriptor(void); static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor *descriptor, sqInt pc); static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); -static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1220,13 +1220,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -2776,7 +2770,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - sqInt end; + usqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -5487,7 +5481,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -5793,7 +5787,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static sqInt NoDbgRegParms +static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -6074,6 +6068,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -7522,7 +7519,7 @@ gMoveCwR(sqInt wordConstant, sqInt reg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static sqInt NoDbgRegParms +static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -17237,6 +17234,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -17288,6 +17287,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24000,7 +24001,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27187,7 +27188,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27435,7 +27436,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27693,7 +27694,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -27832,7 +27834,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -27997,7 +28000,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -28034,7 +28038,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29091,7 +29096,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -29120,7 +29126,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29247,7 +29254,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -29303,7 +29311,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -29725,16 +29734,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1ULL << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -29966,7 +29979,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -29980,7 +29994,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30054,7 +30069,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -30100,7 +30116,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30137,7 +30154,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -30478,11 +30496,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -30999,45 +31020,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1ULL << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1ULL << requiredReg1) | (1ULL << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1ULL << requiredReg1) | (1ULL << requiredReg2)) | (1ULL << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -31076,39 +31058,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1ULL << requiredReg1) | (1ULL << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spur64src/vm/cogitX64WIN64.c b/spur64src/vm/cogitX64WIN64.c index 7fe5490ecf..66f4aa0186 100644 --- a/spur64src/vm/cogitX64WIN64.c +++ b/spur64src/vm/cogitX64WIN64.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -652,7 +652,7 @@ static BytecodeDescriptor * loadBytesAndGetDescriptor(void); static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor *descriptor, sqInt pc); static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); -static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1220,13 +1220,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -2776,7 +2770,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - usqInt end; + sqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -5495,7 +5489,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -6082,6 +6076,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -7530,7 +7527,7 @@ gMoveCwR(sqInt wordConstant, sqInt reg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -17245,6 +17242,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -17296,6 +17295,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24029,7 +24030,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27216,7 +27217,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27464,7 +27465,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27722,7 +27723,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -27861,7 +27863,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -28026,7 +28029,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -28063,7 +28067,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29120,7 +29125,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -29149,7 +29155,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29276,7 +29283,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -29332,7 +29340,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -29754,16 +29763,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1ULL << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -29995,7 +30008,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -30009,7 +30023,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30083,7 +30098,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -30129,7 +30145,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30166,7 +30183,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -30507,11 +30525,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -31028,45 +31049,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1ULL << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1ULL << requiredReg1) | (1ULL << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1ULL << requiredReg1) | (1ULL << requiredReg2)) | (1ULL << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -31105,39 +31087,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1ULL << requiredReg1) | (1ULL << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spur64src/vm/cointerp.c b/spur64src/vm/cointerp.c index 9009b6b078..176185fbb6 100644 --- a/spur64src/vm/cointerp.c +++ b/spur64src/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -1504,7 +1504,7 @@ static sqInt NoDbgRegParms lookupMNUInClass(sqInt class); static sqInt NoDbgRegParms lookupOrdinaryNoMNUEtcInClass(sqInt class); extern sqInt lookupSelectorinClass(sqInt selector, sqInt class); static void NoDbgRegParms makeContextSnapshotSafe(sqInt ctxt); -extern sqInt makePointwithxValueyValue(sqInt xValue, sqInt yValue); +extern usqInt makePointwithxValueyValue(sqInt xValue, sqInt yValue); static void mapInterpreterOops(void); static sqInt NoDbgRegParms markAndTraceStackPages(sqInt fullGCFlag); static void markAndTraceUntracedReachableStackPages(void); @@ -2563,7 +2563,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -15949,12 +15949,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -20367,7 +20375,7 @@ printFrameWithSP(char *theFP, char *theSP) usqInt index; sqInt methodField; usqInt numArgs; - sqInt numTemps; + usqInt numTemps; char *rcvrAddress; sqInt rcvrOrClosure; sqInt theMethod; @@ -67275,7 +67283,7 @@ makeContextSnapshotSafe(sqInt ctxt) We know both will be integers so no value nor root checking is needed */ /* StackInterpreter>>#makePointwithxValue:yValue: */ -sqInt +usqInt makePointwithxValueyValue(sqInt xValue, sqInt yValue) { DECL_MAYBE_SQ_GLOBAL_STRUCT sqInt classObj; diff --git a/spur64src/vm/cointerp.h b/spur64src/vm/cointerp.h index 06c73a384f..972702c404 100644 --- a/spur64src/vm/cointerp.h +++ b/spur64src/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spur64src/vm/cointerpmt.c b/spur64src/vm/cointerpmt.c index ea9e61820a..ecc6f1a97c 100644 --- a/spur64src/vm/cointerpmt.c +++ b/spur64src/vm/cointerpmt.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2662,7 +2662,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -16416,12 +16416,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/spur64src/vm/cointerpmt.h b/spur64src/vm/cointerpmt.h index 3161d89f86..c194c23201 100644 --- a/spur64src/vm/cointerpmt.h +++ b/spur64src/vm/cointerpmt.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spur64src/vm/gcc3x-cointerp.c b/spur64src/vm/gcc3x-cointerp.c index 44f39ce615..0e0fb60587 100644 --- a/spur64src/vm/gcc3x-cointerp.c +++ b/spur64src/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -1507,7 +1507,7 @@ static sqInt NoDbgRegParms lookupMNUInClass(sqInt class); static sqInt NoDbgRegParms lookupOrdinaryNoMNUEtcInClass(sqInt class); extern sqInt lookupSelectorinClass(sqInt selector, sqInt class); static void NoDbgRegParms makeContextSnapshotSafe(sqInt ctxt); -extern sqInt makePointwithxValueyValue(sqInt xValue, sqInt yValue); +extern usqInt makePointwithxValueyValue(sqInt xValue, sqInt yValue); static void mapInterpreterOops(void); static sqInt NoDbgRegParms markAndTraceStackPages(sqInt fullGCFlag); static void markAndTraceUntracedReachableStackPages(void); @@ -2566,7 +2566,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -15958,12 +15958,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -20376,7 +20384,7 @@ printFrameWithSP(char *theFP, char *theSP) usqInt index; sqInt methodField; usqInt numArgs; - sqInt numTemps; + usqInt numTemps; char *rcvrAddress; sqInt rcvrOrClosure; sqInt theMethod; @@ -67284,7 +67292,7 @@ makeContextSnapshotSafe(sqInt ctxt) We know both will be integers so no value nor root checking is needed */ /* StackInterpreter>>#makePointwithxValue:yValue: */ -sqInt +usqInt makePointwithxValueyValue(sqInt xValue, sqInt yValue) { DECL_MAYBE_SQ_GLOBAL_STRUCT sqInt classObj; diff --git a/spur64src/vm/gcc3x-cointerpmt.c b/spur64src/vm/gcc3x-cointerpmt.c index 9c2bebd6ab..f86d4d6954 100644 --- a/spur64src/vm/gcc3x-cointerpmt.c +++ b/spur64src/vm/gcc3x-cointerpmt.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2665,7 +2665,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -16425,12 +16425,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/spurlowcode64src/vm/cogit.h b/spurlowcode64src/vm/cogit.h index ff2f9991e5..d9b4360f7c 100644 --- a/spurlowcode64src/vm/cogit.h +++ b/spurlowcode64src/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spurlowcode64src/vm/cogitX64SysV.c b/spurlowcode64src/vm/cogitX64SysV.c index 17ac258cfd..c7668cef39 100644 --- a/spurlowcode64src/vm/cogitX64SysV.c +++ b/spurlowcode64src/vm/cogitX64SysV.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -626,7 +626,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -1353,15 +1353,9 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); static void NoDbgRegParms ssAllocateRequiredFloatReg(sqInt requiredReg); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssNativeFlushTo(sqInt index); @@ -6142,7 +6136,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -6448,7 +6442,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static sqInt NoDbgRegParms +static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -6729,6 +6723,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -18956,6 +18953,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -19007,6 +19006,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -26251,7 +26252,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -29657,7 +29658,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -29905,7 +29906,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -30189,7 +30190,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -30360,7 +30362,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -30539,7 +30542,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -30576,7 +30580,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -31361,7 +31366,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) sqInt i114; sqInt i115; sqInt i116; - sqInt i117; + sqInt i118; sqInt i119; sqInt i12; sqInt i120; @@ -31899,7 +31904,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) /* Ensure we are not using a duplicated register. */ rOopTop18 = registerOrNone(ssTop()); index118 = ((simSpillBase < 0) ? 0 : simSpillBase); - for (i117 = index118; i117 <= (simStackPtr); i117 += 1) { + for (i118 = index118; i118 <= (simStackPtr); i118 += 1) { if ((registerOrNone(simStackAt(index118))) == rOopTop18) { goto l54; } @@ -34971,24 +34976,24 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) sqInt rTop11; sqInt rTop110; sqInt rTop111; + sqInt rTop112; sqInt rTop12; sqInt rTop13; sqInt rTop14; sqInt rTop15; sqInt rTop17; sqInt rTop18; - sqInt rTop19; sqInt rTop2; + sqInt rTop20; sqInt rTop21; sqInt rTop22; - sqInt rTop23; + sqInt rTop24; sqInt rTop25; sqInt rTop26; sqInt rTop27; - sqInt rTop28; + sqInt rTop29; sqInt rTop30; sqInt rTop31; - sqInt rTop32; sqInt rTop4; sqInt rTop5; sqInt rTop6; @@ -35246,18 +35251,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 129: /* begin genLowcodeMalloc32 */ - rTop28 = NoReg; + rTop27 = NoReg; rResult8 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop28 = nativeRegisterOrNone(ssNativeTop()); + rTop27 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop28 == NoReg) { - rTop28 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop27 == NoReg) { + rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult8 = allocateRegNotConflictingWith(1ULL << rTop28); - assert(!(((rTop28 == NoReg) + rResult8 = allocateRegNotConflictingWith(1ULL << rTop27); + assert(!(((rTop27 == NoReg) || (rResult8 == NoReg)))); - size1 = rTop28; + size1 = rTop27; pointer7 = rResult8; nativePopToReg(ssNativeTop(), size1); ssNativePop(1); @@ -35287,18 +35292,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 130: /* begin genLowcodeMalloc64 */ /* begin allocateRegistersForLowcodeIntegerResultInteger: */ - rTop111 = NoReg; + rTop112 = NoReg; rResult13 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop111 = nativeRegisterOrNone(ssNativeTop()); + rTop112 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop111 == NoReg) { - rTop111 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop112 == NoReg) { + rTop112 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult13 = allocateRegNotConflictingWith(1ULL << rTop111); - assert(!(((rTop111 == NoReg) + rResult13 = allocateRegNotConflictingWith(1ULL << rTop112); + assert(!(((rTop112 == NoReg) || (rResult13 == NoReg)))); - size2 = rTop111; + size2 = rTop112; pointer8 = rResult13; nativePopToReg(ssNativeTop(), size2); ssNativePop(1); @@ -35327,10 +35332,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 131: /* begin genLowcodeMemcpy32 */ - rTop30 = (rNext13 = (rNextNext2 = NoReg)); + rTop29 = (rNext13 = (rNextNext2 = NoReg)); nativeValueIndex2 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop30 = nativeRegisterOrNone(ssNativeTop()); + rTop29 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext13 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35349,7 +35354,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext2 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex2)); } } - if (rTop30 == NoReg) { + if (rTop29 == NoReg) { nextRegisterMask2 = 0; if (rNext13 != NoReg) { /* begin registerMaskFor: */ @@ -35358,11 +35363,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1ULL << rNextNext2); } - rTop30 = allocateRegNotConflictingWith(nextRegisterMask2); + rTop29 = allocateRegNotConflictingWith(nextRegisterMask2); } if (rNext13 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask2 = 1ULL << rTop30; + nextRegisterMask2 = 1ULL << rTop29; if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1ULL << rNextNext2); } @@ -35370,13 +35375,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext2 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask2 = (1ULL << rTop30) | (1ULL << rNext13); + nextRegisterMask2 = (1ULL << rTop29) | (1ULL << rNext13); rNextNext2 = allocateRegNotConflictingWith(nextRegisterMask2); } - assert(!(((rTop30 == NoReg) + assert(!(((rTop29 == NoReg) || ((rNext13 == NoReg) || (rNextNext2 == NoReg))))); - size3 = rTop30; + size3 = rTop29; source = rNext13; dest = rNextNext2; nativePopToReg(ssNativeTop(), size3); @@ -35401,10 +35406,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 132: /* begin genLowcodeMemcpy64 */ - rTop31 = (rNext14 = (rNextNext3 = NoReg)); + rTop30 = (rNext14 = (rNextNext3 = NoReg)); nativeValueIndex3 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop31 = nativeRegisterOrNone(ssNativeTop()); + rTop30 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext14 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35423,7 +35428,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext3 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex3)); } } - if (rTop31 == NoReg) { + if (rTop30 == NoReg) { nextRegisterMask3 = 0; if (rNext14 != NoReg) { /* begin registerMaskFor: */ @@ -35432,11 +35437,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1ULL << rNextNext3); } - rTop31 = allocateRegNotConflictingWith(nextRegisterMask3); + rTop30 = allocateRegNotConflictingWith(nextRegisterMask3); } if (rNext14 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask3 = 1ULL << rTop31; + nextRegisterMask3 = 1ULL << rTop30; if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1ULL << rNextNext3); } @@ -35444,13 +35449,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext3 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask3 = (1ULL << rTop31) | (1ULL << rNext14); + nextRegisterMask3 = (1ULL << rTop30) | (1ULL << rNext14); rNextNext3 = allocateRegNotConflictingWith(nextRegisterMask3); } - assert(!(((rTop31 == NoReg) + assert(!(((rTop30 == NoReg) || ((rNext14 == NoReg) || (rNextNext3 == NoReg))))); - size4 = rTop31; + size4 = rTop30; source1 = rNext14; dest1 = rNextNext3; nativePopToReg(ssNativeTop(), size4); @@ -35478,9 +35483,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) size5 = extA; /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask13 = 0; - rTop32 = (rNext15 = NoReg); + rTop31 = (rNext15 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop32 = nativeRegisterOrNone(ssNativeTop()); + rTop31 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext15 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35492,15 +35497,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask13 = 1ULL << reg16; } } - if (rTop32 == NoReg) { - rTop32 = allocateRegNotConflictingWith(topRegistersMask13); + if (rTop31 == NoReg) { + rTop31 = allocateRegNotConflictingWith(topRegistersMask13); } if (rNext15 == NoReg) { - rNext15 = allocateRegNotConflictingWith(1ULL << rTop32); + rNext15 = allocateRegNotConflictingWith(1ULL << rTop31); } - assert(!(((rTop32 == NoReg) + assert(!(((rTop31 == NoReg) || (rNext15 == NoReg)))); - source2 = rTop32; + source2 = rTop31; dest2 = rNext15; nativePopToReg(ssNativeTop(), source2); ssNativePop(1); @@ -35922,9 +35927,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) /* begin genLowcodePointerAddOffset64 */ /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask7 = 0; - rTop19 = (rNext12 = NoReg); + rTop110 = (rNext12 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop19 = nativeRegisterOrNone(ssNativeTop()); + rTop110 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext12 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35936,15 +35941,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask7 = 1ULL << reg8; } } - if (rTop19 == NoReg) { - rTop19 = allocateRegNotConflictingWith(topRegistersMask7); + if (rTop110 == NoReg) { + rTop110 = allocateRegNotConflictingWith(topRegistersMask7); } if (rNext12 == NoReg) { - rNext12 = allocateRegNotConflictingWith(1ULL << rTop19); + rNext12 = allocateRegNotConflictingWith(1ULL << rTop110); } - assert(!(((rTop19 == NoReg) + assert(!(((rTop110 == NoReg) || (rNext12 == NoReg)))); - offset6 = rTop19; + offset6 = rTop110; base2 = rNext12; nativePopToReg(ssNativeTop(), offset6); ssNativePop(1); @@ -35958,9 +35963,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 165: /* begin genLowcodePointerEqual */ topRegistersMask8 = 0; - rTop21 = (rNext8 = NoReg); + rTop20 = (rNext8 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop21 = nativeRegisterOrNone(ssNativeTop()); + rTop20 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext8 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35972,15 +35977,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask8 = 1ULL << reg9; } } - if (rTop21 == NoReg) { - rTop21 = allocateRegNotConflictingWith(topRegistersMask8); + if (rTop20 == NoReg) { + rTop20 = allocateRegNotConflictingWith(topRegistersMask8); } if (rNext8 == NoReg) { - rNext8 = allocateRegNotConflictingWith(1ULL << rTop21); + rNext8 = allocateRegNotConflictingWith(1ULL << rTop20); } - assert(!(((rTop21 == NoReg) + assert(!(((rTop20 == NoReg) || (rNext8 == NoReg)))); - second4 = rTop21; + second4 = rTop20; first4 = rNext8; nativePopToReg(ssNativeTop(), second4); ssNativePop(1); @@ -36004,9 +36009,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 166: /* begin genLowcodePointerNotEqual */ topRegistersMask9 = 0; - rTop22 = (rNext9 = NoReg); + rTop21 = (rNext9 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop22 = nativeRegisterOrNone(ssNativeTop()); + rTop21 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext9 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36018,15 +36023,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask9 = 1ULL << reg10; } } - if (rTop22 == NoReg) { - rTop22 = allocateRegNotConflictingWith(topRegistersMask9); + if (rTop21 == NoReg) { + rTop21 = allocateRegNotConflictingWith(topRegistersMask9); } if (rNext9 == NoReg) { - rNext9 = allocateRegNotConflictingWith(1ULL << rTop22); + rNext9 = allocateRegNotConflictingWith(1ULL << rTop21); } - assert(!(((rTop22 == NoReg) + assert(!(((rTop21 == NoReg) || (rNext9 == NoReg)))); - second5 = rTop22; + second5 = rTop21; first5 = rNext9; nativePopToReg(ssNativeTop(), second5); ssNativePop(1); @@ -36049,15 +36054,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 167: /* begin genLowcodePointerToInt32 */ - rTop23 = NoReg; + rTop22 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop23 = nativeRegisterOrNone(ssNativeTop()); + rTop22 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop23 == NoReg) { - rTop23 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop22 == NoReg) { + rTop22 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop23 == NoReg))); - pointer5 = rTop23; + assert(!((rTop22 == NoReg))); + pointer5 = rTop22; nativePopToReg(ssNativeTop(), pointer5); ssNativePop(1); ssPushNativeRegister(pointer5); @@ -36066,18 +36071,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 168: /* begin genLowcodePointerToInt64 */ /* begin allocateRegistersForLowcodeIntegerResultInteger: */ - rTop110 = NoReg; + rTop111 = NoReg; rResult12 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop110 = nativeRegisterOrNone(ssNativeTop()); + rTop111 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop110 == NoReg) { - rTop110 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop111 == NoReg) { + rTop111 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult12 = allocateRegNotConflictingWith(1ULL << rTop110); - assert(!(((rTop110 == NoReg) + rResult12 = allocateRegNotConflictingWith(1ULL << rTop111); + assert(!(((rTop111 == NoReg) || (rResult12 == NoReg)))); - pointer6 = rTop110; + pointer6 = rTop111; result2 = rResult12; nativePopToReg(ssNativeTop(), pointer6); ssNativePop(1); @@ -36118,30 +36123,30 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 171: /* begin genLowcodePopInt32 */ - rTop25 = NoReg; + rTop24 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop25 = nativeRegisterOrNone(ssNativeTop()); + rTop24 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop25 == NoReg) { - rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop24 == NoReg) { + rTop24 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop25 == NoReg))); - value10 = rTop25; + assert(!((rTop24 == NoReg))); + value10 = rTop24; nativePopToReg(ssNativeTop(), value10); ssNativePop(1); return 0; case 172: /* begin genLowcodePopInt64 */ - rTop26 = NoReg; + rTop25 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop26 = nativeRegisterOrNone(ssNativeTop()); + rTop25 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop26 == NoReg) { - rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop25 == NoReg) { + rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop26 == NoReg))); - value11 = rTop26; + assert(!((rTop25 == NoReg))); + value11 = rTop25; nativePopToReg(ssNativeTop(), value11); ssNativePop(1); return 0; @@ -36154,15 +36159,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 174: /* begin genLowcodePopPointer */ - rTop27 = NoReg; + rTop26 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop27 = nativeRegisterOrNone(ssNativeTop()); + rTop26 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop27 == NoReg) { - rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop26 == NoReg) { + rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop27 == NoReg))); - pointerValue7 = rTop27; + assert(!((rTop26 == NoReg))); + pointerValue7 = rTop26; nativePopToReg(ssNativeTop(), pointerValue7); ssNativePop(1); return 0; @@ -36429,7 +36434,7 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) sqInt rTop13; sqInt rTop14; sqInt rTop15; - sqInt rTop16; + sqInt rTop17; sqInt rTop18; sqInt rTop19; sqInt rTop2; @@ -36969,9 +36974,9 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) /* begin genLowcodeStoreInt64ToMemory */ /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask8 = 0; - rTop16 = (rNext11 = NoReg); + rTop17 = (rNext11 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop16 = nativeRegisterOrNone(ssNativeTop()); + rTop17 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext11 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36983,15 +36988,15 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) topRegistersMask8 = 1ULL << reg8; } } - if (rTop16 == NoReg) { - rTop16 = allocateRegNotConflictingWith(topRegistersMask8); + if (rTop17 == NoReg) { + rTop17 = allocateRegNotConflictingWith(topRegistersMask8); } if (rNext11 == NoReg) { - rNext11 = allocateRegNotConflictingWith(1ULL << rTop16); + rNext11 = allocateRegNotConflictingWith(1ULL << rTop17); } - assert(!(((rTop16 == NoReg) + assert(!(((rTop17 == NoReg) || (rNext11 == NoReg)))); - pointer4 = rTop16; + pointer4 = rTop17; value9 = rNext11; nativePopToReg(ssNativeTop(), pointer4); ssNativePop(1); @@ -38276,7 +38281,6 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction113; AbstractInstruction *anInstruction114; AbstractInstruction *anInstruction115; - AbstractInstruction *anInstruction116; AbstractInstruction *anInstruction12; AbstractInstruction *anInstruction13; AbstractInstruction *anInstruction14; @@ -38292,6 +38296,7 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction23; AbstractInstruction *anInstruction24; AbstractInstruction *anInstruction25; + AbstractInstruction *anInstruction26; AbstractInstruction *anInstruction3; AbstractInstruction *anInstruction4; AbstractInstruction *anInstruction5; @@ -39731,12 +39736,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump4 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction17 = genoperandoperand(MoveCqR, 1, value11); + anInstruction18 = genoperandoperand(MoveCqR, 1, value11); /* begin Jump: */ contJump4 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump4, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction18 = genoperandoperand(MoveCqR, 0, value11); + anInstruction17 = genoperandoperand(MoveCqR, 0, value11); jmpTarget(contJump4, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value11); return 0; @@ -39834,12 +39839,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump5 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction19 = genoperandoperand(MoveCqR, 1, value13); + anInstruction20 = genoperandoperand(MoveCqR, 1, value13); /* begin Jump: */ contJump5 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction110 = genoperandoperand(MoveCqR, 0, value13); + anInstruction19 = genoperandoperand(MoveCqR, 0, value13); jmpTarget(contJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value13); return 0; @@ -40103,12 +40108,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump6 = gJumpFPNotEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction20 = genoperandoperand(MoveCqR, 1, value19); + anInstruction21 = genoperandoperand(MoveCqR, 1, value19); /* begin Jump: */ contJump6 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction111 = genoperandoperand(MoveCqR, 0, value19); + anInstruction110 = genoperandoperand(MoveCqR, 0, value19); jmpTarget(contJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value19); return 0; @@ -40149,12 +40154,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump7 = gJumpFPLessOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction21 = genoperandoperand(MoveCqR, 1, value20); + anInstruction22 = genoperandoperand(MoveCqR, 1, value20); /* begin Jump: */ contJump7 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction112 = genoperandoperand(MoveCqR, 0, value20); + anInstruction111 = genoperandoperand(MoveCqR, 0, value20); jmpTarget(contJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value20); return 0; @@ -40195,12 +40200,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump8 = gJumpFPLess(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction22 = genoperandoperand(MoveCqR, 1, value21); + anInstruction23 = genoperandoperand(MoveCqR, 1, value21); /* begin Jump: */ contJump8 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction113 = genoperandoperand(MoveCqR, 0, value21); + anInstruction112 = genoperandoperand(MoveCqR, 0, value21); jmpTarget(contJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value21); return 0; @@ -40241,12 +40246,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump9 = gJumpFPGreaterOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction23 = genoperandoperand(MoveCqR, 1, value22); + anInstruction24 = genoperandoperand(MoveCqR, 1, value22); /* begin Jump: */ contJump9 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction114 = genoperandoperand(MoveCqR, 0, value22); + anInstruction113 = genoperandoperand(MoveCqR, 0, value22); jmpTarget(contJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value22); return 0; @@ -40287,12 +40292,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump10 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction24 = genoperandoperand(MoveCqR, 1, value23); + anInstruction25 = genoperandoperand(MoveCqR, 1, value23); /* begin Jump: */ contJump10 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction115 = genoperandoperand(MoveCqR, 0, value23); + anInstruction114 = genoperandoperand(MoveCqR, 0, value23); jmpTarget(contJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value23); return 0; @@ -40390,12 +40395,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump11 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction25 = genoperandoperand(MoveCqR, 1, value25); + anInstruction26 = genoperandoperand(MoveCqR, 1, value25); /* begin Jump: */ contJump11 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction116 = genoperandoperand(MoveCqR, 0, value25); + anInstruction115 = genoperandoperand(MoveCqR, 0, value25); jmpTarget(contJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value25); return 0; @@ -40753,7 +40758,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -40782,7 +40788,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -40909,7 +40916,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -40966,7 +40974,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -41389,16 +41398,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1ULL << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -41631,7 +41644,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -41645,7 +41659,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -41720,7 +41735,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -41767,7 +41783,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -41805,7 +41822,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -42202,11 +42220,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -42737,45 +42758,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1ULL << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1ULL << requiredReg1) | (1ULL << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1ULL << requiredReg1) | (1ULL << requiredReg2)) | (1ULL << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredFloatRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -42876,39 +42858,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1ULL << requiredReg1) | (1ULL << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spurlowcode64src/vm/cogitX64WIN64.c b/spurlowcode64src/vm/cogitX64WIN64.c index 50f13c8d45..e632a0690d 100644 --- a/spurlowcode64src/vm/cogitX64WIN64.c +++ b/spurlowcode64src/vm/cogitX64WIN64.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -626,7 +626,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -1353,15 +1353,9 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); static void NoDbgRegParms ssAllocateRequiredFloatReg(sqInt requiredReg); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssNativeFlushTo(sqInt index); @@ -6174,7 +6168,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -6480,7 +6474,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -6761,6 +6755,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -18988,6 +18985,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -19039,6 +19038,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -26304,7 +26305,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -29710,7 +29711,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -29958,7 +29959,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -30242,7 +30243,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -30413,7 +30415,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -30592,7 +30595,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -30629,7 +30633,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -31414,7 +31419,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) sqInt i114; sqInt i115; sqInt i116; - sqInt i117; + sqInt i118; sqInt i119; sqInt i12; sqInt i120; @@ -31952,7 +31957,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) /* Ensure we are not using a duplicated register. */ rOopTop18 = registerOrNone(ssTop()); index118 = ((simSpillBase < 0) ? 0 : simSpillBase); - for (i117 = index118; i117 <= (simStackPtr); i117 += 1) { + for (i118 = index118; i118 <= (simStackPtr); i118 += 1) { if ((registerOrNone(simStackAt(index118))) == rOopTop18) { goto l54; } @@ -35024,24 +35029,24 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) sqInt rTop11; sqInt rTop110; sqInt rTop111; + sqInt rTop112; sqInt rTop12; sqInt rTop13; sqInt rTop14; sqInt rTop15; sqInt rTop17; sqInt rTop18; - sqInt rTop19; sqInt rTop2; + sqInt rTop20; sqInt rTop21; sqInt rTop22; - sqInt rTop23; + sqInt rTop24; sqInt rTop25; sqInt rTop26; sqInt rTop27; - sqInt rTop28; + sqInt rTop29; sqInt rTop30; sqInt rTop31; - sqInt rTop32; sqInt rTop4; sqInt rTop5; sqInt rTop6; @@ -35299,18 +35304,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 129: /* begin genLowcodeMalloc32 */ - rTop28 = NoReg; + rTop27 = NoReg; rResult8 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop28 = nativeRegisterOrNone(ssNativeTop()); + rTop27 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop28 == NoReg) { - rTop28 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop27 == NoReg) { + rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult8 = allocateRegNotConflictingWith(1ULL << rTop28); - assert(!(((rTop28 == NoReg) + rResult8 = allocateRegNotConflictingWith(1ULL << rTop27); + assert(!(((rTop27 == NoReg) || (rResult8 == NoReg)))); - size1 = rTop28; + size1 = rTop27; pointer7 = rResult8; nativePopToReg(ssNativeTop(), size1); ssNativePop(1); @@ -35340,18 +35345,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 130: /* begin genLowcodeMalloc64 */ /* begin allocateRegistersForLowcodeIntegerResultInteger: */ - rTop111 = NoReg; + rTop112 = NoReg; rResult13 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop111 = nativeRegisterOrNone(ssNativeTop()); + rTop112 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop111 == NoReg) { - rTop111 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop112 == NoReg) { + rTop112 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult13 = allocateRegNotConflictingWith(1ULL << rTop111); - assert(!(((rTop111 == NoReg) + rResult13 = allocateRegNotConflictingWith(1ULL << rTop112); + assert(!(((rTop112 == NoReg) || (rResult13 == NoReg)))); - size2 = rTop111; + size2 = rTop112; pointer8 = rResult13; nativePopToReg(ssNativeTop(), size2); ssNativePop(1); @@ -35380,10 +35385,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 131: /* begin genLowcodeMemcpy32 */ - rTop30 = (rNext13 = (rNextNext2 = NoReg)); + rTop29 = (rNext13 = (rNextNext2 = NoReg)); nativeValueIndex2 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop30 = nativeRegisterOrNone(ssNativeTop()); + rTop29 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext13 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35402,7 +35407,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext2 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex2)); } } - if (rTop30 == NoReg) { + if (rTop29 == NoReg) { nextRegisterMask2 = 0; if (rNext13 != NoReg) { /* begin registerMaskFor: */ @@ -35411,11 +35416,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1ULL << rNextNext2); } - rTop30 = allocateRegNotConflictingWith(nextRegisterMask2); + rTop29 = allocateRegNotConflictingWith(nextRegisterMask2); } if (rNext13 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask2 = 1ULL << rTop30; + nextRegisterMask2 = 1ULL << rTop29; if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1ULL << rNextNext2); } @@ -35423,13 +35428,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext2 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask2 = (1ULL << rTop30) | (1ULL << rNext13); + nextRegisterMask2 = (1ULL << rTop29) | (1ULL << rNext13); rNextNext2 = allocateRegNotConflictingWith(nextRegisterMask2); } - assert(!(((rTop30 == NoReg) + assert(!(((rTop29 == NoReg) || ((rNext13 == NoReg) || (rNextNext2 == NoReg))))); - size3 = rTop30; + size3 = rTop29; source = rNext13; dest = rNextNext2; nativePopToReg(ssNativeTop(), size3); @@ -35454,10 +35459,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 132: /* begin genLowcodeMemcpy64 */ - rTop31 = (rNext14 = (rNextNext3 = NoReg)); + rTop30 = (rNext14 = (rNextNext3 = NoReg)); nativeValueIndex3 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop31 = nativeRegisterOrNone(ssNativeTop()); + rTop30 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext14 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35476,7 +35481,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext3 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex3)); } } - if (rTop31 == NoReg) { + if (rTop30 == NoReg) { nextRegisterMask3 = 0; if (rNext14 != NoReg) { /* begin registerMaskFor: */ @@ -35485,11 +35490,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1ULL << rNextNext3); } - rTop31 = allocateRegNotConflictingWith(nextRegisterMask3); + rTop30 = allocateRegNotConflictingWith(nextRegisterMask3); } if (rNext14 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask3 = 1ULL << rTop31; + nextRegisterMask3 = 1ULL << rTop30; if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1ULL << rNextNext3); } @@ -35497,13 +35502,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext3 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask3 = (1ULL << rTop31) | (1ULL << rNext14); + nextRegisterMask3 = (1ULL << rTop30) | (1ULL << rNext14); rNextNext3 = allocateRegNotConflictingWith(nextRegisterMask3); } - assert(!(((rTop31 == NoReg) + assert(!(((rTop30 == NoReg) || ((rNext14 == NoReg) || (rNextNext3 == NoReg))))); - size4 = rTop31; + size4 = rTop30; source1 = rNext14; dest1 = rNextNext3; nativePopToReg(ssNativeTop(), size4); @@ -35531,9 +35536,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) size5 = extA; /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask13 = 0; - rTop32 = (rNext15 = NoReg); + rTop31 = (rNext15 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop32 = nativeRegisterOrNone(ssNativeTop()); + rTop31 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext15 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35545,15 +35550,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask13 = 1ULL << reg16; } } - if (rTop32 == NoReg) { - rTop32 = allocateRegNotConflictingWith(topRegistersMask13); + if (rTop31 == NoReg) { + rTop31 = allocateRegNotConflictingWith(topRegistersMask13); } if (rNext15 == NoReg) { - rNext15 = allocateRegNotConflictingWith(1ULL << rTop32); + rNext15 = allocateRegNotConflictingWith(1ULL << rTop31); } - assert(!(((rTop32 == NoReg) + assert(!(((rTop31 == NoReg) || (rNext15 == NoReg)))); - source2 = rTop32; + source2 = rTop31; dest2 = rNext15; nativePopToReg(ssNativeTop(), source2); ssNativePop(1); @@ -35975,9 +35980,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) /* begin genLowcodePointerAddOffset64 */ /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask7 = 0; - rTop19 = (rNext12 = NoReg); + rTop110 = (rNext12 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop19 = nativeRegisterOrNone(ssNativeTop()); + rTop110 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext12 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35989,15 +35994,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask7 = 1ULL << reg8; } } - if (rTop19 == NoReg) { - rTop19 = allocateRegNotConflictingWith(topRegistersMask7); + if (rTop110 == NoReg) { + rTop110 = allocateRegNotConflictingWith(topRegistersMask7); } if (rNext12 == NoReg) { - rNext12 = allocateRegNotConflictingWith(1ULL << rTop19); + rNext12 = allocateRegNotConflictingWith(1ULL << rTop110); } - assert(!(((rTop19 == NoReg) + assert(!(((rTop110 == NoReg) || (rNext12 == NoReg)))); - offset6 = rTop19; + offset6 = rTop110; base2 = rNext12; nativePopToReg(ssNativeTop(), offset6); ssNativePop(1); @@ -36011,9 +36016,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 165: /* begin genLowcodePointerEqual */ topRegistersMask8 = 0; - rTop21 = (rNext8 = NoReg); + rTop20 = (rNext8 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop21 = nativeRegisterOrNone(ssNativeTop()); + rTop20 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext8 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36025,15 +36030,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask8 = 1ULL << reg9; } } - if (rTop21 == NoReg) { - rTop21 = allocateRegNotConflictingWith(topRegistersMask8); + if (rTop20 == NoReg) { + rTop20 = allocateRegNotConflictingWith(topRegistersMask8); } if (rNext8 == NoReg) { - rNext8 = allocateRegNotConflictingWith(1ULL << rTop21); + rNext8 = allocateRegNotConflictingWith(1ULL << rTop20); } - assert(!(((rTop21 == NoReg) + assert(!(((rTop20 == NoReg) || (rNext8 == NoReg)))); - second4 = rTop21; + second4 = rTop20; first4 = rNext8; nativePopToReg(ssNativeTop(), second4); ssNativePop(1); @@ -36057,9 +36062,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 166: /* begin genLowcodePointerNotEqual */ topRegistersMask9 = 0; - rTop22 = (rNext9 = NoReg); + rTop21 = (rNext9 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop22 = nativeRegisterOrNone(ssNativeTop()); + rTop21 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext9 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36071,15 +36076,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask9 = 1ULL << reg10; } } - if (rTop22 == NoReg) { - rTop22 = allocateRegNotConflictingWith(topRegistersMask9); + if (rTop21 == NoReg) { + rTop21 = allocateRegNotConflictingWith(topRegistersMask9); } if (rNext9 == NoReg) { - rNext9 = allocateRegNotConflictingWith(1ULL << rTop22); + rNext9 = allocateRegNotConflictingWith(1ULL << rTop21); } - assert(!(((rTop22 == NoReg) + assert(!(((rTop21 == NoReg) || (rNext9 == NoReg)))); - second5 = rTop22; + second5 = rTop21; first5 = rNext9; nativePopToReg(ssNativeTop(), second5); ssNativePop(1); @@ -36102,15 +36107,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 167: /* begin genLowcodePointerToInt32 */ - rTop23 = NoReg; + rTop22 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop23 = nativeRegisterOrNone(ssNativeTop()); + rTop22 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop23 == NoReg) { - rTop23 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop22 == NoReg) { + rTop22 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop23 == NoReg))); - pointer5 = rTop23; + assert(!((rTop22 == NoReg))); + pointer5 = rTop22; nativePopToReg(ssNativeTop(), pointer5); ssNativePop(1); ssPushNativeRegister(pointer5); @@ -36119,18 +36124,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 168: /* begin genLowcodePointerToInt64 */ /* begin allocateRegistersForLowcodeIntegerResultInteger: */ - rTop110 = NoReg; + rTop111 = NoReg; rResult12 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop110 = nativeRegisterOrNone(ssNativeTop()); + rTop111 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop110 == NoReg) { - rTop110 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop111 == NoReg) { + rTop111 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult12 = allocateRegNotConflictingWith(1ULL << rTop110); - assert(!(((rTop110 == NoReg) + rResult12 = allocateRegNotConflictingWith(1ULL << rTop111); + assert(!(((rTop111 == NoReg) || (rResult12 == NoReg)))); - pointer6 = rTop110; + pointer6 = rTop111; result2 = rResult12; nativePopToReg(ssNativeTop(), pointer6); ssNativePop(1); @@ -36171,30 +36176,30 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 171: /* begin genLowcodePopInt32 */ - rTop25 = NoReg; + rTop24 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop25 = nativeRegisterOrNone(ssNativeTop()); + rTop24 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop25 == NoReg) { - rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop24 == NoReg) { + rTop24 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop25 == NoReg))); - value10 = rTop25; + assert(!((rTop24 == NoReg))); + value10 = rTop24; nativePopToReg(ssNativeTop(), value10); ssNativePop(1); return 0; case 172: /* begin genLowcodePopInt64 */ - rTop26 = NoReg; + rTop25 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop26 = nativeRegisterOrNone(ssNativeTop()); + rTop25 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop26 == NoReg) { - rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop25 == NoReg) { + rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop26 == NoReg))); - value11 = rTop26; + assert(!((rTop25 == NoReg))); + value11 = rTop25; nativePopToReg(ssNativeTop(), value11); ssNativePop(1); return 0; @@ -36207,15 +36212,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 174: /* begin genLowcodePopPointer */ - rTop27 = NoReg; + rTop26 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop27 = nativeRegisterOrNone(ssNativeTop()); + rTop26 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop27 == NoReg) { - rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop26 == NoReg) { + rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop27 == NoReg))); - pointerValue7 = rTop27; + assert(!((rTop26 == NoReg))); + pointerValue7 = rTop26; nativePopToReg(ssNativeTop(), pointerValue7); ssNativePop(1); return 0; @@ -36482,7 +36487,7 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) sqInt rTop13; sqInt rTop14; sqInt rTop15; - sqInt rTop16; + sqInt rTop17; sqInt rTop18; sqInt rTop19; sqInt rTop2; @@ -37022,9 +37027,9 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) /* begin genLowcodeStoreInt64ToMemory */ /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask8 = 0; - rTop16 = (rNext11 = NoReg); + rTop17 = (rNext11 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop16 = nativeRegisterOrNone(ssNativeTop()); + rTop17 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext11 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -37036,15 +37041,15 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) topRegistersMask8 = 1ULL << reg8; } } - if (rTop16 == NoReg) { - rTop16 = allocateRegNotConflictingWith(topRegistersMask8); + if (rTop17 == NoReg) { + rTop17 = allocateRegNotConflictingWith(topRegistersMask8); } if (rNext11 == NoReg) { - rNext11 = allocateRegNotConflictingWith(1ULL << rTop16); + rNext11 = allocateRegNotConflictingWith(1ULL << rTop17); } - assert(!(((rTop16 == NoReg) + assert(!(((rTop17 == NoReg) || (rNext11 == NoReg)))); - pointer4 = rTop16; + pointer4 = rTop17; value9 = rNext11; nativePopToReg(ssNativeTop(), pointer4); ssNativePop(1); @@ -38329,7 +38334,6 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction113; AbstractInstruction *anInstruction114; AbstractInstruction *anInstruction115; - AbstractInstruction *anInstruction116; AbstractInstruction *anInstruction12; AbstractInstruction *anInstruction13; AbstractInstruction *anInstruction14; @@ -38345,6 +38349,7 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction23; AbstractInstruction *anInstruction24; AbstractInstruction *anInstruction25; + AbstractInstruction *anInstruction26; AbstractInstruction *anInstruction3; AbstractInstruction *anInstruction4; AbstractInstruction *anInstruction5; @@ -39784,12 +39789,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump4 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction17 = genoperandoperand(MoveCqR, 1, value11); + anInstruction18 = genoperandoperand(MoveCqR, 1, value11); /* begin Jump: */ contJump4 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump4, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction18 = genoperandoperand(MoveCqR, 0, value11); + anInstruction17 = genoperandoperand(MoveCqR, 0, value11); jmpTarget(contJump4, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value11); return 0; @@ -39887,12 +39892,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump5 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction19 = genoperandoperand(MoveCqR, 1, value13); + anInstruction20 = genoperandoperand(MoveCqR, 1, value13); /* begin Jump: */ contJump5 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction110 = genoperandoperand(MoveCqR, 0, value13); + anInstruction19 = genoperandoperand(MoveCqR, 0, value13); jmpTarget(contJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value13); return 0; @@ -40156,12 +40161,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump6 = gJumpFPNotEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction20 = genoperandoperand(MoveCqR, 1, value19); + anInstruction21 = genoperandoperand(MoveCqR, 1, value19); /* begin Jump: */ contJump6 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction111 = genoperandoperand(MoveCqR, 0, value19); + anInstruction110 = genoperandoperand(MoveCqR, 0, value19); jmpTarget(contJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value19); return 0; @@ -40202,12 +40207,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump7 = gJumpFPLessOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction21 = genoperandoperand(MoveCqR, 1, value20); + anInstruction22 = genoperandoperand(MoveCqR, 1, value20); /* begin Jump: */ contJump7 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction112 = genoperandoperand(MoveCqR, 0, value20); + anInstruction111 = genoperandoperand(MoveCqR, 0, value20); jmpTarget(contJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value20); return 0; @@ -40248,12 +40253,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump8 = gJumpFPLess(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction22 = genoperandoperand(MoveCqR, 1, value21); + anInstruction23 = genoperandoperand(MoveCqR, 1, value21); /* begin Jump: */ contJump8 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction113 = genoperandoperand(MoveCqR, 0, value21); + anInstruction112 = genoperandoperand(MoveCqR, 0, value21); jmpTarget(contJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value21); return 0; @@ -40294,12 +40299,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump9 = gJumpFPGreaterOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction23 = genoperandoperand(MoveCqR, 1, value22); + anInstruction24 = genoperandoperand(MoveCqR, 1, value22); /* begin Jump: */ contJump9 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction114 = genoperandoperand(MoveCqR, 0, value22); + anInstruction113 = genoperandoperand(MoveCqR, 0, value22); jmpTarget(contJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value22); return 0; @@ -40340,12 +40345,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump10 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction24 = genoperandoperand(MoveCqR, 1, value23); + anInstruction25 = genoperandoperand(MoveCqR, 1, value23); /* begin Jump: */ contJump10 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction115 = genoperandoperand(MoveCqR, 0, value23); + anInstruction114 = genoperandoperand(MoveCqR, 0, value23); jmpTarget(contJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value23); return 0; @@ -40443,12 +40448,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump11 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction25 = genoperandoperand(MoveCqR, 1, value25); + anInstruction26 = genoperandoperand(MoveCqR, 1, value25); /* begin Jump: */ contJump11 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction116 = genoperandoperand(MoveCqR, 0, value25); + anInstruction115 = genoperandoperand(MoveCqR, 0, value25); jmpTarget(contJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value25); return 0; @@ -40806,7 +40811,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -40835,7 +40841,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -40962,7 +40969,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -41019,7 +41027,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -41442,16 +41451,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1ULL << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -41684,7 +41697,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -41698,7 +41712,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -41773,7 +41788,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -41820,7 +41836,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -41858,7 +41875,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -42255,11 +42273,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -42790,45 +42811,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1ULL << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1ULL << requiredReg1) | (1ULL << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1ULL << requiredReg1) | (1ULL << requiredReg2)) | (1ULL << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredFloatRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -42929,39 +42911,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1ULL << requiredReg1) | (1ULL << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spurlowcode64src/vm/cointerp.c b/spurlowcode64src/vm/cointerp.c index fcbcde1f8b..c807dfb455 100644 --- a/spurlowcode64src/vm/cointerp.c +++ b/spurlowcode64src/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2589,7 +2589,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -5419,18 +5419,18 @@ interpret(void) fp = (thePage->headFP); if (fp == theFP) { frameAbove = 0; - goto l3221; + goto l3220; } while (((callerFP = pointerForOop(longAt(fp + FoxSavedFP)))) != 0) { if (callerFP == theFP) { frameAbove = fp; - goto l3221; + goto l3220; } fp = callerFP; } error("did not find theFP in stack page"); frameAbove = 0; - l3221: /* end findFrameAbove:inPage: */; + l3220: /* end findFrameAbove:inPage: */; /* begin newStackPage */ lruOrFree = (GIV(mostRecentlyUsedPage)->nextPage); if (((lruOrFree->baseFP)) == 0) { @@ -33163,7 +33163,7 @@ interpret(void) sqInt header1; sqInt i; int ignoreContext; - sqInt newClosure; + usqInt newClosure; usqInt newClosure1; usqInt newObj; sqInt numArgs; @@ -35551,12 +35551,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/spurlowcode64src/vm/cointerp.h b/spurlowcode64src/vm/cointerp.h index f4ef3bd9ff..8ca93edd49 100644 --- a/spurlowcode64src/vm/cointerp.h +++ b/spurlowcode64src/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spurlowcode64src/vm/gcc3x-cointerp.c b/spurlowcode64src/vm/gcc3x-cointerp.c index 18aa2fb879..ce09b81673 100644 --- a/spurlowcode64src/vm/gcc3x-cointerp.c +++ b/spurlowcode64src/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2592,7 +2592,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -5428,18 +5428,18 @@ interpret(void) fp = (thePage->headFP); if (fp == theFP) { frameAbove = 0; - goto l3221; + goto l3220; } while (((callerFP = pointerForOop(longAt(fp + FoxSavedFP)))) != 0) { if (callerFP == theFP) { frameAbove = fp; - goto l3221; + goto l3220; } fp = callerFP; } error("did not find theFP in stack page"); frameAbove = 0; - l3221: /* end findFrameAbove:inPage: */; + l3220: /* end findFrameAbove:inPage: */; /* begin newStackPage */ lruOrFree = (GIV(mostRecentlyUsedPage)->nextPage); if (((lruOrFree->baseFP)) == 0) { @@ -33172,7 +33172,7 @@ interpret(void) sqInt header1; sqInt i; int ignoreContext; - sqInt newClosure; + usqInt newClosure; usqInt newClosure1; usqInt newObj; sqInt numArgs; @@ -35560,12 +35560,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/spurlowcodesrc/vm/cogit.h b/spurlowcodesrc/vm/cogit.h index ff2f9991e5..d9b4360f7c 100644 --- a/spurlowcodesrc/vm/cogit.h +++ b/spurlowcodesrc/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spurlowcodesrc/vm/cogitARMv5.c b/spurlowcodesrc/vm/cogitARMv5.c index 2b8a1e40ad..5131f4311e 100644 --- a/spurlowcodesrc/vm/cogitARMv5.c +++ b/spurlowcodesrc/vm/cogitARMv5.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -1384,15 +1384,9 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); static void NoDbgRegParms ssAllocateRequiredFloatReg(sqInt requiredReg); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssNativeFlushTo(sqInt index); @@ -10772,7 +10766,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -11360,6 +11354,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -24253,6 +24250,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24306,6 +24305,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -26795,7 +26796,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -29956,7 +29957,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -30218,7 +30219,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -30511,7 +30512,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -30682,7 +30684,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -30861,7 +30864,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -30898,7 +30902,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -31709,7 +31714,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) sqInt i11; sqInt i110; sqInt i112; - sqInt i114; + sqInt i113; sqInt i115; sqInt i116; sqInt i117; @@ -32268,7 +32273,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) /* Ensure we are not using a duplicated register. */ rOopTop13 = registerOrNone(ssTop()); index113 = ((simSpillBase < 0) ? 0 : simSpillBase); - for (i114 = index113; i114 <= (simStackPtr); i114 += 1) { + for (i113 = index113; i113 <= (simStackPtr); i113 += 1) { if ((registerOrNone(simStackAt(index113))) == rOopTop13) { goto l45; } @@ -35845,6 +35850,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) sqInt rTop16; sqInt rTop17; sqInt rTop18; + sqInt rTop19; sqInt rTop2; sqInt rTop20; sqInt rTop21; @@ -35859,7 +35865,6 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) sqInt rTop3; sqInt rTop30; sqInt rTop31; - sqInt rTop32; sqInt rTop4; sqInt rTop5; sqInt rTop6; @@ -36167,18 +36172,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 129: /* begin genLowcodeMalloc32 */ - rTop28 = NoReg; + rTop27 = NoReg; rResult8 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop28 = nativeRegisterOrNone(ssNativeTop()); + rTop27 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop28 == NoReg) { - rTop28 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop27 == NoReg) { + rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult8 = allocateRegNotConflictingWith(1U << rTop28); - assert(!(((rTop28 == NoReg) + rResult8 = allocateRegNotConflictingWith(1U << rTop27); + assert(!(((rTop27 == NoReg) || (rResult8 == NoReg)))); - size1 = rTop28; + size1 = rTop27; pointer7 = rResult8; nativePopToReg(ssNativeTop(), size1); ssNativePop(1); @@ -36209,10 +36214,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) /* begin genLowcodeMalloc64 */ /* begin allocateRegistersForLowcodeInteger2ResultInteger: */ topRegistersMask12 = 0; - rTop29 = (rNext10 = NoReg); + rTop28 = (rNext10 = NoReg); rResult9 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop29 = nativeRegisterOrNone(ssNativeTop()); + rTop28 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext10 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36224,17 +36229,17 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask12 = 1U << reg15; } } - if (rTop29 == NoReg) { - rTop29 = allocateRegNotConflictingWith(topRegistersMask12); + if (rTop28 == NoReg) { + rTop28 = allocateRegNotConflictingWith(topRegistersMask12); } if (rNext10 == NoReg) { - rNext10 = allocateRegNotConflictingWith(1U << rTop29); + rNext10 = allocateRegNotConflictingWith(1U << rTop28); } - assert(!(((rTop29 == NoReg) + assert(!(((rTop28 == NoReg) || (rNext10 == NoReg)))); - rResult9 = allocateFloatRegNotConflictingWith((1U << rTop29) | (1U << rNext10)); + rResult9 = allocateFloatRegNotConflictingWith((1U << rTop28) | (1U << rNext10)); assert(!((rResult9 == NoReg))); - sizeLow = rTop29; + sizeLow = rTop28; sizeHigh = rNext10; pointer8 = rResult9; nativePopToRegsecondReg(ssNativeTop(), sizeLow, sizeHigh); @@ -36264,10 +36269,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 131: /* begin genLowcodeMemcpy32 */ - rTop30 = (rNext13 = (rNextNext2 = NoReg)); + rTop29 = (rNext13 = (rNextNext2 = NoReg)); nativeValueIndex2 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop30 = nativeRegisterOrNone(ssNativeTop()); + rTop29 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext13 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36286,7 +36291,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext2 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex2)); } } - if (rTop30 == NoReg) { + if (rTop29 == NoReg) { nextRegisterMask2 = 0; if (rNext13 != NoReg) { /* begin registerMaskFor: */ @@ -36295,11 +36300,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1U << rNextNext2); } - rTop30 = allocateRegNotConflictingWith(nextRegisterMask2); + rTop29 = allocateRegNotConflictingWith(nextRegisterMask2); } if (rNext13 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask2 = 1U << rTop30; + nextRegisterMask2 = 1U << rTop29; if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1U << rNextNext2); } @@ -36307,13 +36312,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext2 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask2 = (1U << rTop30) | (1U << rNext13); + nextRegisterMask2 = (1U << rTop29) | (1U << rNext13); rNextNext2 = allocateRegNotConflictingWith(nextRegisterMask2); } - assert(!(((rTop30 == NoReg) + assert(!(((rTop29 == NoReg) || ((rNext13 == NoReg) || (rNextNext2 == NoReg))))); - size3 = rTop30; + size3 = rTop29; source = rNext13; dest = rNextNext2; nativePopToReg(ssNativeTop(), size3); @@ -36338,10 +36343,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 132: /* begin genLowcodeMemcpy64 */ - rTop31 = (rNext14 = (rNextNext3 = NoReg)); + rTop30 = (rNext14 = (rNextNext3 = NoReg)); nativeValueIndex3 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop31 = nativeRegisterOrNone(ssNativeTop()); + rTop30 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext14 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36360,7 +36365,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext3 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex3)); } } - if (rTop31 == NoReg) { + if (rTop30 == NoReg) { nextRegisterMask3 = 0; if (rNext14 != NoReg) { /* begin registerMaskFor: */ @@ -36369,11 +36374,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1U << rNextNext3); } - rTop31 = allocateRegNotConflictingWith(nextRegisterMask3); + rTop30 = allocateRegNotConflictingWith(nextRegisterMask3); } if (rNext14 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask3 = 1U << rTop31; + nextRegisterMask3 = 1U << rTop30; if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1U << rNextNext3); } @@ -36381,13 +36386,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext3 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask3 = (1U << rTop31) | (1U << rNext14); + nextRegisterMask3 = (1U << rTop30) | (1U << rNext14); rNextNext3 = allocateRegNotConflictingWith(nextRegisterMask3); } - assert(!(((rTop31 == NoReg) + assert(!(((rTop30 == NoReg) || ((rNext14 == NoReg) || (rNextNext3 == NoReg))))); - size4 = rTop31; + size4 = rTop30; source1 = rNext14; dest1 = rNextNext3; nativePopToReg(ssNativeTop(), size4); @@ -36415,9 +36420,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) size5 = extA; /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask13 = 0; - rTop32 = (rNext15 = NoReg); + rTop31 = (rNext15 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop32 = nativeRegisterOrNone(ssNativeTop()); + rTop31 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext15 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36429,15 +36434,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask13 = 1U << reg16; } } - if (rTop32 == NoReg) { - rTop32 = allocateRegNotConflictingWith(topRegistersMask13); + if (rTop31 == NoReg) { + rTop31 = allocateRegNotConflictingWith(topRegistersMask13); } if (rNext15 == NoReg) { - rNext15 = allocateRegNotConflictingWith(1U << rTop32); + rNext15 = allocateRegNotConflictingWith(1U << rTop31); } - assert(!(((rTop32 == NoReg) + assert(!(((rTop31 == NoReg) || (rNext15 == NoReg)))); - source2 = rTop32; + source2 = rTop31; dest2 = rNext15; nativePopToReg(ssNativeTop(), source2); ssNativePop(1); @@ -36971,10 +36976,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 164: /* begin genLowcodePointerAddOffset64 */ /* begin allocateRegistersForLowcodeInteger3: */ - rTop20 = (rNext7 = (rNextNext1 = NoReg)); + rTop19 = (rNext7 = (rNextNext1 = NoReg)); nativeValueIndex1 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop20 = nativeRegisterOrNone(ssNativeTop()); + rTop19 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext7 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -36993,7 +36998,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext1 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex1)); } } - if (rTop20 == NoReg) { + if (rTop19 == NoReg) { nextRegisterMask1 = 0; if (rNext7 != NoReg) { /* begin registerMaskFor: */ @@ -37002,11 +37007,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext1 != NoReg) { nextRegisterMask1 = nextRegisterMask1 | (1U << rNextNext1); } - rTop20 = allocateRegNotConflictingWith(nextRegisterMask1); + rTop19 = allocateRegNotConflictingWith(nextRegisterMask1); } if (rNext7 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask1 = 1U << rTop20; + nextRegisterMask1 = 1U << rTop19; if (rNextNext1 != NoReg) { nextRegisterMask1 = nextRegisterMask1 | (1U << rNextNext1); } @@ -37014,13 +37019,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext1 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask1 = (1U << rTop20) | (1U << rNext7); + nextRegisterMask1 = (1U << rTop19) | (1U << rNext7); rNextNext1 = allocateRegNotConflictingWith(nextRegisterMask1); } - assert(!(((rTop20 == NoReg) + assert(!(((rTop19 == NoReg) || ((rNext7 == NoReg) || (rNextNext1 == NoReg))))); - offsetLow = rTop20; + offsetLow = rTop19; offsetHigh = rNext7; base2 = rNextNext1; nativePopToRegsecondReg(ssNativeTop(), offsetLow, offsetHigh); @@ -37035,9 +37040,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 165: /* begin genLowcodePointerEqual */ topRegistersMask8 = 0; - rTop21 = (rNext8 = NoReg); + rTop20 = (rNext8 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop21 = nativeRegisterOrNone(ssNativeTop()); + rTop20 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext8 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -37049,15 +37054,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask8 = 1U << reg9; } } - if (rTop21 == NoReg) { - rTop21 = allocateRegNotConflictingWith(topRegistersMask8); + if (rTop20 == NoReg) { + rTop20 = allocateRegNotConflictingWith(topRegistersMask8); } if (rNext8 == NoReg) { - rNext8 = allocateRegNotConflictingWith(1U << rTop21); + rNext8 = allocateRegNotConflictingWith(1U << rTop20); } - assert(!(((rTop21 == NoReg) + assert(!(((rTop20 == NoReg) || (rNext8 == NoReg)))); - second4 = rTop21; + second4 = rTop20; first4 = rNext8; nativePopToReg(ssNativeTop(), second4); ssNativePop(1); @@ -37087,9 +37092,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 166: /* begin genLowcodePointerNotEqual */ topRegistersMask9 = 0; - rTop22 = (rNext9 = NoReg); + rTop21 = (rNext9 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop22 = nativeRegisterOrNone(ssNativeTop()); + rTop21 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext9 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -37101,15 +37106,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask9 = 1U << reg10; } } - if (rTop22 == NoReg) { - rTop22 = allocateRegNotConflictingWith(topRegistersMask9); + if (rTop21 == NoReg) { + rTop21 = allocateRegNotConflictingWith(topRegistersMask9); } if (rNext9 == NoReg) { - rNext9 = allocateRegNotConflictingWith(1U << rTop22); + rNext9 = allocateRegNotConflictingWith(1U << rTop21); } - assert(!(((rTop22 == NoReg) + assert(!(((rTop21 == NoReg) || (rNext9 == NoReg)))); - second5 = rTop22; + second5 = rTop21; first5 = rNext9; nativePopToReg(ssNativeTop(), second5); ssNativePop(1); @@ -37138,15 +37143,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 167: /* begin genLowcodePointerToInt32 */ - rTop23 = NoReg; + rTop22 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop23 = nativeRegisterOrNone(ssNativeTop()); + rTop22 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop23 == NoReg) { - rTop23 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop22 == NoReg) { + rTop22 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop23 == NoReg))); - pointer5 = rTop23; + assert(!((rTop22 == NoReg))); + pointer5 = rTop22; nativePopToReg(ssNativeTop(), pointer5); ssNativePop(1); ssPushNativeRegister(pointer5); @@ -37155,20 +37160,20 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 168: /* begin genLowcodePointerToInt64 */ /* begin allocateRegistersForLowcodeIntegerResultInteger2: */ - rTop24 = NoReg; + rTop23 = NoReg; rResult7 = (rResult7 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop24 = nativeRegisterOrNone(ssNativeTop()); + rTop23 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop24 == NoReg) { - rTop24 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop23 == NoReg) { + rTop23 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult7 = allocateRegNotConflictingWith(1U << rTop24); - rResult22 = allocateRegNotConflictingWith((1U << rTop24) | (1U << rResult7)); - assert(!(((rTop24 == NoReg) + rResult7 = allocateRegNotConflictingWith(1U << rTop23); + rResult22 = allocateRegNotConflictingWith((1U << rTop23) | (1U << rResult7)); + assert(!(((rTop23 == NoReg) || ((rResult7 == NoReg) || (rResult22 == NoReg))))); - pointer6 = rTop24; + pointer6 = rTop23; resultLow1 = rResult7; resultHigh1 = rResult22; nativePopToReg(ssNativeTop(), pointer6); @@ -37217,30 +37222,30 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 171: /* begin genLowcodePopInt32 */ - rTop25 = NoReg; + rTop24 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop25 = nativeRegisterOrNone(ssNativeTop()); + rTop24 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop25 == NoReg) { - rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop24 == NoReg) { + rTop24 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop25 == NoReg))); - value10 = rTop25; + assert(!((rTop24 == NoReg))); + value10 = rTop24; nativePopToReg(ssNativeTop(), value10); ssNativePop(1); return 0; case 172: /* begin genLowcodePopInt64 */ - rTop26 = NoReg; + rTop25 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop26 = nativeRegisterOrNone(ssNativeTop()); + rTop25 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop26 == NoReg) { - rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop25 == NoReg) { + rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop26 == NoReg))); - value11 = rTop26; + assert(!((rTop25 == NoReg))); + value11 = rTop25; nativePopToReg(ssNativeTop(), value11); ssNativePop(1); return 0; @@ -37253,15 +37258,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 174: /* begin genLowcodePopPointer */ - rTop27 = NoReg; + rTop26 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop27 = nativeRegisterOrNone(ssNativeTop()); + rTop26 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop27 == NoReg) { - rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop26 == NoReg) { + rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop27 == NoReg))); - pointerValue7 = rTop27; + assert(!((rTop26 == NoReg))); + pointerValue7 = rTop26; nativePopToReg(ssNativeTop(), pointerValue7); ssNativePop(1); return 0; @@ -37567,7 +37572,7 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) sqInt rTop10; sqInt rTop14; sqInt rTop15; - sqInt rTop17; + sqInt rTop16; sqInt rTop18; sqInt rTop19; sqInt rTop2; @@ -38270,10 +38275,10 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) case 205: /* begin genLowcodeStoreInt64ToMemory */ /* begin allocateRegistersForLowcodeInteger3: */ - rTop17 = (rNext8 = (rNextNext = NoReg)); + rTop16 = (rNext8 = (rNextNext = NoReg)); nativeValueIndex = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop17 = nativeRegisterOrNone(ssNativeTop()); + rTop16 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext8 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -38292,7 +38297,7 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) rNextNext = nativeRegisterOrNone(ssNativeValue(nativeValueIndex)); } } - if (rTop17 == NoReg) { + if (rTop16 == NoReg) { nextRegisterMask = 0; if (rNext8 != NoReg) { /* begin registerMaskFor: */ @@ -38301,11 +38306,11 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) if (rNextNext != NoReg) { nextRegisterMask = nextRegisterMask | (1U << rNextNext); } - rTop17 = allocateRegNotConflictingWith(nextRegisterMask); + rTop16 = allocateRegNotConflictingWith(nextRegisterMask); } if (rNext8 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask = 1U << rTop17; + nextRegisterMask = 1U << rTop16; if (rNextNext != NoReg) { nextRegisterMask = nextRegisterMask | (1U << rNextNext); } @@ -38313,13 +38318,13 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) } if (rNextNext == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask = (1U << rTop17) | (1U << rNext8); + nextRegisterMask = (1U << rTop16) | (1U << rNext8); rNextNext = allocateRegNotConflictingWith(nextRegisterMask); } - assert(!(((rTop17 == NoReg) + assert(!(((rTop16 == NoReg) || ((rNext8 == NoReg) || (rNextNext == NoReg))))); - pointer4 = rTop17; + pointer4 = rTop16; valueLow3 = rNext8; valueHigh3 = rNextNext; nativePopToReg(ssNativeTop(), pointer4); @@ -39952,7 +39957,6 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction112; AbstractInstruction *anInstruction113; AbstractInstruction *anInstruction114; - AbstractInstruction *anInstruction115; AbstractInstruction *anInstruction12; AbstractInstruction *anInstruction13; AbstractInstruction *anInstruction14; @@ -39967,6 +39971,7 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction22; AbstractInstruction *anInstruction23; AbstractInstruction *anInstruction24; + AbstractInstruction *anInstruction25; AbstractInstruction *anInstruction3; AbstractInstruction *anInstruction4; AbstractInstruction *anInstruction5; @@ -41725,17 +41730,17 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump5 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction17 = genoperandoperand(MoveCqR, 1, value13); - if (usesOutOfLineLiteral(anInstruction17)) { - (anInstruction17->dependent = locateLiteral(1)); + anInstruction18 = genoperandoperand(MoveCqR, 1, value13); + if (usesOutOfLineLiteral(anInstruction18)) { + (anInstruction18->dependent = locateLiteral(1)); } /* begin Jump: */ contJump5 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction18 = genoperandoperand(MoveCqR, 0, value13); - if (usesOutOfLineLiteral(anInstruction18)) { - (anInstruction18->dependent = locateLiteral(0)); + anInstruction17 = genoperandoperand(MoveCqR, 0, value13); + if (usesOutOfLineLiteral(anInstruction17)) { + (anInstruction17->dependent = locateLiteral(0)); } jmpTarget(contJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value13); @@ -42000,17 +42005,17 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump6 = gJumpFPNotEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction19 = genoperandoperand(MoveCqR, 1, value19); - if (usesOutOfLineLiteral(anInstruction19)) { - (anInstruction19->dependent = locateLiteral(1)); + anInstruction20 = genoperandoperand(MoveCqR, 1, value19); + if (usesOutOfLineLiteral(anInstruction20)) { + (anInstruction20->dependent = locateLiteral(1)); } /* begin Jump: */ contJump6 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction110 = genoperandoperand(MoveCqR, 0, value19); - if (usesOutOfLineLiteral(anInstruction110)) { - (anInstruction110->dependent = locateLiteral(0)); + anInstruction19 = genoperandoperand(MoveCqR, 0, value19); + if (usesOutOfLineLiteral(anInstruction19)) { + (anInstruction19->dependent = locateLiteral(0)); } jmpTarget(contJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value19); @@ -42052,17 +42057,17 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump7 = gJumpFPLessOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction20 = genoperandoperand(MoveCqR, 1, value20); - if (usesOutOfLineLiteral(anInstruction20)) { - (anInstruction20->dependent = locateLiteral(1)); + anInstruction21 = genoperandoperand(MoveCqR, 1, value20); + if (usesOutOfLineLiteral(anInstruction21)) { + (anInstruction21->dependent = locateLiteral(1)); } /* begin Jump: */ contJump7 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction111 = genoperandoperand(MoveCqR, 0, value20); - if (usesOutOfLineLiteral(anInstruction111)) { - (anInstruction111->dependent = locateLiteral(0)); + anInstruction110 = genoperandoperand(MoveCqR, 0, value20); + if (usesOutOfLineLiteral(anInstruction110)) { + (anInstruction110->dependent = locateLiteral(0)); } jmpTarget(contJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value20); @@ -42104,17 +42109,17 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump8 = gJumpFPLess(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction21 = genoperandoperand(MoveCqR, 1, value21); - if (usesOutOfLineLiteral(anInstruction21)) { - (anInstruction21->dependent = locateLiteral(1)); + anInstruction22 = genoperandoperand(MoveCqR, 1, value21); + if (usesOutOfLineLiteral(anInstruction22)) { + (anInstruction22->dependent = locateLiteral(1)); } /* begin Jump: */ contJump8 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction112 = genoperandoperand(MoveCqR, 0, value21); - if (usesOutOfLineLiteral(anInstruction112)) { - (anInstruction112->dependent = locateLiteral(0)); + anInstruction111 = genoperandoperand(MoveCqR, 0, value21); + if (usesOutOfLineLiteral(anInstruction111)) { + (anInstruction111->dependent = locateLiteral(0)); } jmpTarget(contJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value21); @@ -42156,17 +42161,17 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump9 = gJumpFPGreaterOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction22 = genoperandoperand(MoveCqR, 1, value22); - if (usesOutOfLineLiteral(anInstruction22)) { - (anInstruction22->dependent = locateLiteral(1)); + anInstruction23 = genoperandoperand(MoveCqR, 1, value22); + if (usesOutOfLineLiteral(anInstruction23)) { + (anInstruction23->dependent = locateLiteral(1)); } /* begin Jump: */ contJump9 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction113 = genoperandoperand(MoveCqR, 0, value22); - if (usesOutOfLineLiteral(anInstruction113)) { - (anInstruction113->dependent = locateLiteral(0)); + anInstruction112 = genoperandoperand(MoveCqR, 0, value22); + if (usesOutOfLineLiteral(anInstruction112)) { + (anInstruction112->dependent = locateLiteral(0)); } jmpTarget(contJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value22); @@ -42208,17 +42213,17 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump10 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction23 = genoperandoperand(MoveCqR, 1, value23); - if (usesOutOfLineLiteral(anInstruction23)) { - (anInstruction23->dependent = locateLiteral(1)); + anInstruction24 = genoperandoperand(MoveCqR, 1, value23); + if (usesOutOfLineLiteral(anInstruction24)) { + (anInstruction24->dependent = locateLiteral(1)); } /* begin Jump: */ contJump10 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction114 = genoperandoperand(MoveCqR, 0, value23); - if (usesOutOfLineLiteral(anInstruction114)) { - (anInstruction114->dependent = locateLiteral(0)); + anInstruction113 = genoperandoperand(MoveCqR, 0, value23); + if (usesOutOfLineLiteral(anInstruction113)) { + (anInstruction113->dependent = locateLiteral(0)); } jmpTarget(contJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value23); @@ -42317,17 +42322,17 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump11 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction24 = genoperandoperand(MoveCqR, 1, value25); - if (usesOutOfLineLiteral(anInstruction24)) { - (anInstruction24->dependent = locateLiteral(1)); + anInstruction25 = genoperandoperand(MoveCqR, 1, value25); + if (usesOutOfLineLiteral(anInstruction25)) { + (anInstruction25->dependent = locateLiteral(1)); } /* begin Jump: */ contJump11 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction115 = genoperandoperand(MoveCqR, 0, value25); - if (usesOutOfLineLiteral(anInstruction115)) { - (anInstruction115->dependent = locateLiteral(0)); + anInstruction114 = genoperandoperand(MoveCqR, 0, value25); + if (usesOutOfLineLiteral(anInstruction114)) { + (anInstruction114->dependent = locateLiteral(0)); } jmpTarget(contJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value25); @@ -42726,7 +42731,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -42755,7 +42761,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -42885,7 +42892,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -42947,7 +42955,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -43389,16 +43398,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -43637,7 +43650,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(gMoveCwR(association, ReceiverResultReg), association); @@ -43654,7 +43668,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -43732,7 +43747,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -43782,7 +43798,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -43820,7 +43837,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -44230,11 +44248,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -44765,45 +44786,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredFloatRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -44904,39 +44886,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spurlowcodesrc/vm/cogitIA32.c b/spurlowcodesrc/vm/cogitIA32.c index f4b6199cd3..b4112caba6 100644 --- a/spurlowcodesrc/vm/cogitIA32.c +++ b/spurlowcodesrc/vm/cogitIA32.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -682,7 +682,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -1322,15 +1322,9 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); static void NoDbgRegParms ssAllocateRequiredFloatReg(sqInt requiredReg); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssNativeFlushTo(sqInt index); @@ -10767,7 +10761,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -11075,7 +11069,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -11355,6 +11349,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -23191,6 +23188,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -23242,6 +23241,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24907,7 +24908,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27961,7 +27962,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -28209,7 +28210,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -28493,7 +28494,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -28664,7 +28666,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -28837,7 +28840,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -28874,7 +28878,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29659,7 +29664,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) sqInt i114; sqInt i115; sqInt i116; - sqInt i118; + sqInt i117; sqInt i119; sqInt i12; sqInt i120; @@ -30211,7 +30216,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) /* Ensure we are not using a duplicated register. */ rOopTop17 = registerOrNone(ssTop()); index117 = ((simSpillBase < 0) ? 0 : simSpillBase); - for (i118 = index117; i118 <= (simStackPtr); i118 += 1) { + for (i117 = index117; i117 <= (simStackPtr); i117 += 1) { if ((registerOrNone(simStackAt(index117))) == rOopTop17) { goto l59; } @@ -33558,6 +33563,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) sqInt rTop16; sqInt rTop17; sqInt rTop18; + sqInt rTop19; sqInt rTop2; sqInt rTop20; sqInt rTop21; @@ -33572,7 +33578,6 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) sqInt rTop3; sqInt rTop30; sqInt rTop31; - sqInt rTop32; sqInt rTop4; sqInt rTop5; sqInt rTop6; @@ -33841,18 +33846,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 129: /* begin genLowcodeMalloc32 */ - rTop28 = NoReg; + rTop27 = NoReg; rResult8 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop28 = nativeRegisterOrNone(ssNativeTop()); + rTop27 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop28 == NoReg) { - rTop28 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop27 == NoReg) { + rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult8 = allocateRegNotConflictingWith(1U << rTop28); - assert(!(((rTop28 == NoReg) + rResult8 = allocateRegNotConflictingWith(1U << rTop27); + assert(!(((rTop27 == NoReg) || (rResult8 == NoReg)))); - size1 = rTop28; + size1 = rTop27; pointer7 = rResult8; nativePopToReg(ssNativeTop(), size1); ssNativePop(1); @@ -33883,10 +33888,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) /* begin genLowcodeMalloc64 */ /* begin allocateRegistersForLowcodeInteger2ResultInteger: */ topRegistersMask12 = 0; - rTop29 = (rNext10 = NoReg); + rTop28 = (rNext10 = NoReg); rResult9 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop29 = nativeRegisterOrNone(ssNativeTop()); + rTop28 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext10 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -33898,17 +33903,17 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask12 = 1U << reg15; } } - if (rTop29 == NoReg) { - rTop29 = allocateRegNotConflictingWith(topRegistersMask12); + if (rTop28 == NoReg) { + rTop28 = allocateRegNotConflictingWith(topRegistersMask12); } if (rNext10 == NoReg) { - rNext10 = allocateRegNotConflictingWith(1U << rTop29); + rNext10 = allocateRegNotConflictingWith(1U << rTop28); } - assert(!(((rTop29 == NoReg) + assert(!(((rTop28 == NoReg) || (rNext10 == NoReg)))); - rResult9 = allocateFloatRegNotConflictingWith((1U << rTop29) | (1U << rNext10)); + rResult9 = allocateFloatRegNotConflictingWith((1U << rTop28) | (1U << rNext10)); assert(!((rResult9 == NoReg))); - sizeLow = rTop29; + sizeLow = rTop28; sizeHigh = rNext10; pointer8 = rResult9; nativePopToRegsecondReg(ssNativeTop(), sizeLow, sizeHigh); @@ -33938,10 +33943,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 131: /* begin genLowcodeMemcpy32 */ - rTop30 = (rNext13 = (rNextNext2 = NoReg)); + rTop29 = (rNext13 = (rNextNext2 = NoReg)); nativeValueIndex2 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop30 = nativeRegisterOrNone(ssNativeTop()); + rTop29 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext13 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -33960,7 +33965,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext2 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex2)); } } - if (rTop30 == NoReg) { + if (rTop29 == NoReg) { nextRegisterMask2 = 0; if (rNext13 != NoReg) { /* begin registerMaskFor: */ @@ -33969,11 +33974,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1U << rNextNext2); } - rTop30 = allocateRegNotConflictingWith(nextRegisterMask2); + rTop29 = allocateRegNotConflictingWith(nextRegisterMask2); } if (rNext13 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask2 = 1U << rTop30; + nextRegisterMask2 = 1U << rTop29; if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1U << rNextNext2); } @@ -33981,13 +33986,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext2 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask2 = (1U << rTop30) | (1U << rNext13); + nextRegisterMask2 = (1U << rTop29) | (1U << rNext13); rNextNext2 = allocateRegNotConflictingWith(nextRegisterMask2); } - assert(!(((rTop30 == NoReg) + assert(!(((rTop29 == NoReg) || ((rNext13 == NoReg) || (rNextNext2 == NoReg))))); - size3 = rTop30; + size3 = rTop29; source = rNext13; dest = rNextNext2; nativePopToReg(ssNativeTop(), size3); @@ -34012,10 +34017,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 132: /* begin genLowcodeMemcpy64 */ - rTop31 = (rNext14 = (rNextNext3 = NoReg)); + rTop30 = (rNext14 = (rNextNext3 = NoReg)); nativeValueIndex3 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop31 = nativeRegisterOrNone(ssNativeTop()); + rTop30 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext14 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -34034,7 +34039,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext3 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex3)); } } - if (rTop31 == NoReg) { + if (rTop30 == NoReg) { nextRegisterMask3 = 0; if (rNext14 != NoReg) { /* begin registerMaskFor: */ @@ -34043,11 +34048,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1U << rNextNext3); } - rTop31 = allocateRegNotConflictingWith(nextRegisterMask3); + rTop30 = allocateRegNotConflictingWith(nextRegisterMask3); } if (rNext14 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask3 = 1U << rTop31; + nextRegisterMask3 = 1U << rTop30; if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1U << rNextNext3); } @@ -34055,13 +34060,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext3 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask3 = (1U << rTop31) | (1U << rNext14); + nextRegisterMask3 = (1U << rTop30) | (1U << rNext14); rNextNext3 = allocateRegNotConflictingWith(nextRegisterMask3); } - assert(!(((rTop31 == NoReg) + assert(!(((rTop30 == NoReg) || ((rNext14 == NoReg) || (rNextNext3 == NoReg))))); - size4 = rTop31; + size4 = rTop30; source1 = rNext14; dest1 = rNextNext3; nativePopToReg(ssNativeTop(), size4); @@ -34089,9 +34094,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) size5 = extA; /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask13 = 0; - rTop32 = (rNext15 = NoReg); + rTop31 = (rNext15 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop32 = nativeRegisterOrNone(ssNativeTop()); + rTop31 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext15 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -34103,15 +34108,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask13 = 1U << reg16; } } - if (rTop32 == NoReg) { - rTop32 = allocateRegNotConflictingWith(topRegistersMask13); + if (rTop31 == NoReg) { + rTop31 = allocateRegNotConflictingWith(topRegistersMask13); } if (rNext15 == NoReg) { - rNext15 = allocateRegNotConflictingWith(1U << rTop32); + rNext15 = allocateRegNotConflictingWith(1U << rTop31); } - assert(!(((rTop32 == NoReg) + assert(!(((rTop31 == NoReg) || (rNext15 == NoReg)))); - source2 = rTop32; + source2 = rTop31; dest2 = rNext15; nativePopToReg(ssNativeTop(), source2); ssNativePop(1); @@ -34630,10 +34635,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 164: /* begin genLowcodePointerAddOffset64 */ /* begin allocateRegistersForLowcodeInteger3: */ - rTop20 = (rNext7 = (rNextNext1 = NoReg)); + rTop19 = (rNext7 = (rNextNext1 = NoReg)); nativeValueIndex1 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop20 = nativeRegisterOrNone(ssNativeTop()); + rTop19 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext7 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -34652,7 +34657,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext1 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex1)); } } - if (rTop20 == NoReg) { + if (rTop19 == NoReg) { nextRegisterMask1 = 0; if (rNext7 != NoReg) { /* begin registerMaskFor: */ @@ -34661,11 +34666,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext1 != NoReg) { nextRegisterMask1 = nextRegisterMask1 | (1U << rNextNext1); } - rTop20 = allocateRegNotConflictingWith(nextRegisterMask1); + rTop19 = allocateRegNotConflictingWith(nextRegisterMask1); } if (rNext7 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask1 = 1U << rTop20; + nextRegisterMask1 = 1U << rTop19; if (rNextNext1 != NoReg) { nextRegisterMask1 = nextRegisterMask1 | (1U << rNextNext1); } @@ -34673,13 +34678,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext1 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask1 = (1U << rTop20) | (1U << rNext7); + nextRegisterMask1 = (1U << rTop19) | (1U << rNext7); rNextNext1 = allocateRegNotConflictingWith(nextRegisterMask1); } - assert(!(((rTop20 == NoReg) + assert(!(((rTop19 == NoReg) || ((rNext7 == NoReg) || (rNextNext1 == NoReg))))); - offsetLow = rTop20; + offsetLow = rTop19; offsetHigh = rNext7; base2 = rNextNext1; nativePopToRegsecondReg(ssNativeTop(), offsetLow, offsetHigh); @@ -34694,9 +34699,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 165: /* begin genLowcodePointerEqual */ topRegistersMask8 = 0; - rTop21 = (rNext8 = NoReg); + rTop20 = (rNext8 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop21 = nativeRegisterOrNone(ssNativeTop()); + rTop20 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext8 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -34708,15 +34713,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask8 = 1U << reg9; } } - if (rTop21 == NoReg) { - rTop21 = allocateRegNotConflictingWith(topRegistersMask8); + if (rTop20 == NoReg) { + rTop20 = allocateRegNotConflictingWith(topRegistersMask8); } if (rNext8 == NoReg) { - rNext8 = allocateRegNotConflictingWith(1U << rTop21); + rNext8 = allocateRegNotConflictingWith(1U << rTop20); } - assert(!(((rTop21 == NoReg) + assert(!(((rTop20 == NoReg) || (rNext8 == NoReg)))); - second4 = rTop21; + second4 = rTop20; first4 = rNext8; nativePopToReg(ssNativeTop(), second4); ssNativePop(1); @@ -34740,9 +34745,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 166: /* begin genLowcodePointerNotEqual */ topRegistersMask9 = 0; - rTop22 = (rNext9 = NoReg); + rTop21 = (rNext9 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop22 = nativeRegisterOrNone(ssNativeTop()); + rTop21 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext9 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -34754,15 +34759,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask9 = 1U << reg10; } } - if (rTop22 == NoReg) { - rTop22 = allocateRegNotConflictingWith(topRegistersMask9); + if (rTop21 == NoReg) { + rTop21 = allocateRegNotConflictingWith(topRegistersMask9); } if (rNext9 == NoReg) { - rNext9 = allocateRegNotConflictingWith(1U << rTop22); + rNext9 = allocateRegNotConflictingWith(1U << rTop21); } - assert(!(((rTop22 == NoReg) + assert(!(((rTop21 == NoReg) || (rNext9 == NoReg)))); - second5 = rTop22; + second5 = rTop21; first5 = rNext9; nativePopToReg(ssNativeTop(), second5); ssNativePop(1); @@ -34785,15 +34790,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 167: /* begin genLowcodePointerToInt32 */ - rTop23 = NoReg; + rTop22 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop23 = nativeRegisterOrNone(ssNativeTop()); + rTop22 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop23 == NoReg) { - rTop23 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop22 == NoReg) { + rTop22 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop23 == NoReg))); - pointer5 = rTop23; + assert(!((rTop22 == NoReg))); + pointer5 = rTop22; nativePopToReg(ssNativeTop(), pointer5); ssNativePop(1); ssPushNativeRegister(pointer5); @@ -34802,20 +34807,20 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 168: /* begin genLowcodePointerToInt64 */ /* begin allocateRegistersForLowcodeIntegerResultInteger2: */ - rTop24 = NoReg; + rTop23 = NoReg; rResult7 = (rResult7 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop24 = nativeRegisterOrNone(ssNativeTop()); + rTop23 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop24 == NoReg) { - rTop24 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop23 == NoReg) { + rTop23 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult7 = allocateRegNotConflictingWith(1U << rTop24); - rResult22 = allocateRegNotConflictingWith((1U << rTop24) | (1U << rResult7)); - assert(!(((rTop24 == NoReg) + rResult7 = allocateRegNotConflictingWith(1U << rTop23); + rResult22 = allocateRegNotConflictingWith((1U << rTop23) | (1U << rResult7)); + assert(!(((rTop23 == NoReg) || ((rResult7 == NoReg) || (rResult22 == NoReg))))); - pointer6 = rTop24; + pointer6 = rTop23; resultLow1 = rResult7; resultHigh1 = rResult22; nativePopToReg(ssNativeTop(), pointer6); @@ -34861,30 +34866,30 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 171: /* begin genLowcodePopInt32 */ - rTop25 = NoReg; + rTop24 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop25 = nativeRegisterOrNone(ssNativeTop()); + rTop24 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop25 == NoReg) { - rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop24 == NoReg) { + rTop24 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop25 == NoReg))); - value10 = rTop25; + assert(!((rTop24 == NoReg))); + value10 = rTop24; nativePopToReg(ssNativeTop(), value10); ssNativePop(1); return 0; case 172: /* begin genLowcodePopInt64 */ - rTop26 = NoReg; + rTop25 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop26 = nativeRegisterOrNone(ssNativeTop()); + rTop25 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop26 == NoReg) { - rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop25 == NoReg) { + rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop26 == NoReg))); - value11 = rTop26; + assert(!((rTop25 == NoReg))); + value11 = rTop25; nativePopToReg(ssNativeTop(), value11); ssNativePop(1); return 0; @@ -34897,15 +34902,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 174: /* begin genLowcodePopPointer */ - rTop27 = NoReg; + rTop26 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop27 = nativeRegisterOrNone(ssNativeTop()); + rTop26 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop27 == NoReg) { - rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop26 == NoReg) { + rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop27 == NoReg))); - pointerValue7 = rTop27; + assert(!((rTop26 == NoReg))); + pointerValue7 = rTop26; nativePopToReg(ssNativeTop(), pointerValue7); ssNativePop(1); return 0; @@ -35204,7 +35209,7 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) sqInt rTop10; sqInt rTop14; sqInt rTop15; - sqInt rTop17; + sqInt rTop16; sqInt rTop18; sqInt rTop19; sqInt rTop2; @@ -35833,10 +35838,10 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) case 205: /* begin genLowcodeStoreInt64ToMemory */ /* begin allocateRegistersForLowcodeInteger3: */ - rTop17 = (rNext8 = (rNextNext = NoReg)); + rTop16 = (rNext8 = (rNextNext = NoReg)); nativeValueIndex = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop17 = nativeRegisterOrNone(ssNativeTop()); + rTop16 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext8 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35855,7 +35860,7 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) rNextNext = nativeRegisterOrNone(ssNativeValue(nativeValueIndex)); } } - if (rTop17 == NoReg) { + if (rTop16 == NoReg) { nextRegisterMask = 0; if (rNext8 != NoReg) { /* begin registerMaskFor: */ @@ -35864,11 +35869,11 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) if (rNextNext != NoReg) { nextRegisterMask = nextRegisterMask | (1U << rNextNext); } - rTop17 = allocateRegNotConflictingWith(nextRegisterMask); + rTop16 = allocateRegNotConflictingWith(nextRegisterMask); } if (rNext8 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask = 1U << rTop17; + nextRegisterMask = 1U << rTop16; if (rNextNext != NoReg) { nextRegisterMask = nextRegisterMask | (1U << rNextNext); } @@ -35876,13 +35881,13 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) } if (rNextNext == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask = (1U << rTop17) | (1U << rNext8); + nextRegisterMask = (1U << rTop16) | (1U << rNext8); rNextNext = allocateRegNotConflictingWith(nextRegisterMask); } - assert(!(((rTop17 == NoReg) + assert(!(((rTop16 == NoReg) || ((rNext8 == NoReg) || (rNextNext == NoReg))))); - pointer4 = rTop17; + pointer4 = rTop16; valueLow3 = rNext8; valueHigh3 = rNextNext; nativePopToReg(ssNativeTop(), pointer4); @@ -37390,7 +37395,6 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction113; AbstractInstruction *anInstruction114; AbstractInstruction *anInstruction115; - AbstractInstruction *anInstruction116; AbstractInstruction *anInstruction12; AbstractInstruction *anInstruction13; AbstractInstruction *anInstruction14; @@ -37406,6 +37410,7 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction23; AbstractInstruction *anInstruction24; AbstractInstruction *anInstruction25; + AbstractInstruction *anInstruction26; AbstractInstruction *anInstruction3; AbstractInstruction *anInstruction4; AbstractInstruction *anInstruction5; @@ -39012,12 +39017,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump4 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction17 = genoperandoperand(MoveCqR, 1, value11); + anInstruction18 = genoperandoperand(MoveCqR, 1, value11); /* begin Jump: */ contJump4 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump4, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction18 = genoperandoperand(MoveCqR, 0, value11); + anInstruction17 = genoperandoperand(MoveCqR, 0, value11); jmpTarget(contJump4, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value11); return 0; @@ -39115,12 +39120,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump5 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction19 = genoperandoperand(MoveCqR, 1, value13); + anInstruction20 = genoperandoperand(MoveCqR, 1, value13); /* begin Jump: */ contJump5 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction110 = genoperandoperand(MoveCqR, 0, value13); + anInstruction19 = genoperandoperand(MoveCqR, 0, value13); jmpTarget(contJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value13); return 0; @@ -39384,12 +39389,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump6 = gJumpFPNotEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction20 = genoperandoperand(MoveCqR, 1, value19); + anInstruction21 = genoperandoperand(MoveCqR, 1, value19); /* begin Jump: */ contJump6 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction111 = genoperandoperand(MoveCqR, 0, value19); + anInstruction110 = genoperandoperand(MoveCqR, 0, value19); jmpTarget(contJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value19); return 0; @@ -39430,12 +39435,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump7 = gJumpFPLessOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction21 = genoperandoperand(MoveCqR, 1, value20); + anInstruction22 = genoperandoperand(MoveCqR, 1, value20); /* begin Jump: */ contJump7 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction112 = genoperandoperand(MoveCqR, 0, value20); + anInstruction111 = genoperandoperand(MoveCqR, 0, value20); jmpTarget(contJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value20); return 0; @@ -39476,12 +39481,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump8 = gJumpFPLess(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction22 = genoperandoperand(MoveCqR, 1, value21); + anInstruction23 = genoperandoperand(MoveCqR, 1, value21); /* begin Jump: */ contJump8 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction113 = genoperandoperand(MoveCqR, 0, value21); + anInstruction112 = genoperandoperand(MoveCqR, 0, value21); jmpTarget(contJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value21); return 0; @@ -39522,12 +39527,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump9 = gJumpFPGreaterOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction23 = genoperandoperand(MoveCqR, 1, value22); + anInstruction24 = genoperandoperand(MoveCqR, 1, value22); /* begin Jump: */ contJump9 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction114 = genoperandoperand(MoveCqR, 0, value22); + anInstruction113 = genoperandoperand(MoveCqR, 0, value22); jmpTarget(contJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value22); return 0; @@ -39568,12 +39573,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump10 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction24 = genoperandoperand(MoveCqR, 1, value23); + anInstruction25 = genoperandoperand(MoveCqR, 1, value23); /* begin Jump: */ contJump10 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction115 = genoperandoperand(MoveCqR, 0, value23); + anInstruction114 = genoperandoperand(MoveCqR, 0, value23); jmpTarget(contJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value23); return 0; @@ -39671,12 +39676,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump11 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction25 = genoperandoperand(MoveCqR, 1, value25); + anInstruction26 = genoperandoperand(MoveCqR, 1, value25); /* begin Jump: */ contJump11 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction116 = genoperandoperand(MoveCqR, 0, value25); + anInstruction115 = genoperandoperand(MoveCqR, 0, value25); jmpTarget(contJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value25); return 0; @@ -40034,7 +40039,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -40063,7 +40069,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -40190,7 +40197,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -40246,7 +40254,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -40669,16 +40678,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -40911,7 +40924,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -40925,7 +40939,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -41000,7 +41015,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -41047,7 +41063,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -41085,7 +41102,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -41482,11 +41500,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -42017,45 +42038,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredFloatRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -42156,39 +42138,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spurlowcodesrc/vm/cogitMIPSEL.c b/spurlowcodesrc/vm/cogitMIPSEL.c index fd7bd35fe4..482c68af46 100644 --- a/spurlowcodesrc/vm/cogitMIPSEL.c +++ b/spurlowcodesrc/vm/cogitMIPSEL.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -654,7 +654,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -733,7 +733,7 @@ static BytecodeDescriptor * loadBytesAndGetDescriptor(void); static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor *descriptor, sqInt pc); static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); -static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1493,15 +1493,9 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); static void NoDbgRegParms ssAllocateRequiredFloatReg(sqInt requiredReg); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssNativeFlushTo(sqInt index); @@ -2850,7 +2844,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - sqInt end; + usqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -5644,7 +5638,7 @@ configureMNUCPICmethodOperandnumArgsdelta(CogMethod *cPIC, sqInt methodOperand, static sqInt NoDbgRegParms cPICCompactAndIsNowEmpty(CogMethod *cPIC) { - sqInt entryPoint; + usqInt entryPoint; sqInt followingAddress; sqInt i; sqInt methods[MaxCPICCases]; @@ -5745,7 +5739,7 @@ cPICHasForwardedClass(CogMethod *cPIC) static sqInt NoDbgRegParms cPICHasFreedTargets(CogMethod *cPIC) { - sqInt entryPoint; + usqInt entryPoint; sqInt i; sqInt pc; CogMethod *targetMethod; @@ -5900,7 +5894,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -6206,7 +6200,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static sqInt NoDbgRegParms +static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -6486,6 +6480,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -8099,7 +8096,7 @@ gMoveCwR(sqInt wordConstant, sqInt reg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static sqInt NoDbgRegParms +static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -9630,7 +9627,7 @@ static void NoDbgRegParms relocateCallsInClosedPIC(CogMethod *cPIC) { sqInt callDelta; - sqInt entryPoint; + usqInt entryPoint; sqInt i; sqInt pc; sqLong refDelta; @@ -22416,6 +22413,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -22467,6 +22466,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24109,7 +24110,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27166,7 +27167,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27418,7 +27419,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27702,7 +27703,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -27873,7 +27875,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -28052,7 +28055,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -28089,7 +28093,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -28874,7 +28879,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) sqInt i114; sqInt i115; sqInt i116; - sqInt i118; + sqInt i117; sqInt i119; sqInt i12; sqInt i120; @@ -29426,7 +29431,7 @@ genLowcodeBinaryInlinePrimitive(sqInt prim) /* Ensure we are not using a duplicated register. */ rOopTop17 = registerOrNone(ssTop()); index117 = ((simSpillBase < 0) ? 0 : simSpillBase); - for (i118 = index117; i118 <= (simStackPtr); i118 += 1) { + for (i117 = index117; i117 <= (simStackPtr); i117 += 1) { if ((registerOrNone(simStackAt(index117))) == rOopTop17) { goto l59; } @@ -32845,6 +32850,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) sqInt rTop16; sqInt rTop17; sqInt rTop18; + sqInt rTop19; sqInt rTop2; sqInt rTop20; sqInt rTop21; @@ -32859,7 +32865,6 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) sqInt rTop3; sqInt rTop30; sqInt rTop31; - sqInt rTop32; sqInt rTop4; sqInt rTop5; sqInt rTop6; @@ -33128,18 +33133,18 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 129: /* begin genLowcodeMalloc32 */ - rTop28 = NoReg; + rTop27 = NoReg; rResult8 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop28 = nativeRegisterOrNone(ssNativeTop()); + rTop27 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop28 == NoReg) { - rTop28 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop27 == NoReg) { + rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult8 = allocateRegNotConflictingWith(1U << rTop28); - assert(!(((rTop28 == NoReg) + rResult8 = allocateRegNotConflictingWith(1U << rTop27); + assert(!(((rTop27 == NoReg) || (rResult8 == NoReg)))); - size1 = rTop28; + size1 = rTop27; pointer7 = rResult8; nativePopToReg(ssNativeTop(), size1); ssNativePop(1); @@ -33170,10 +33175,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) /* begin genLowcodeMalloc64 */ /* begin allocateRegistersForLowcodeInteger2ResultInteger: */ topRegistersMask12 = 0; - rTop29 = (rNext10 = NoReg); + rTop28 = (rNext10 = NoReg); rResult9 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop29 = nativeRegisterOrNone(ssNativeTop()); + rTop28 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext10 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -33185,17 +33190,17 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask12 = 1U << reg15; } } - if (rTop29 == NoReg) { - rTop29 = allocateRegNotConflictingWith(topRegistersMask12); + if (rTop28 == NoReg) { + rTop28 = allocateRegNotConflictingWith(topRegistersMask12); } if (rNext10 == NoReg) { - rNext10 = allocateRegNotConflictingWith(1U << rTop29); + rNext10 = allocateRegNotConflictingWith(1U << rTop28); } - assert(!(((rTop29 == NoReg) + assert(!(((rTop28 == NoReg) || (rNext10 == NoReg)))); - rResult9 = allocateFloatRegNotConflictingWith((1U << rTop29) | (1U << rNext10)); + rResult9 = allocateFloatRegNotConflictingWith((1U << rTop28) | (1U << rNext10)); assert(!((rResult9 == NoReg))); - sizeLow = rTop29; + sizeLow = rTop28; sizeHigh = rNext10; pointer8 = rResult9; nativePopToRegsecondReg(ssNativeTop(), sizeLow, sizeHigh); @@ -33225,10 +33230,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 131: /* begin genLowcodeMemcpy32 */ - rTop30 = (rNext13 = (rNextNext2 = NoReg)); + rTop29 = (rNext13 = (rNextNext2 = NoReg)); nativeValueIndex2 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop30 = nativeRegisterOrNone(ssNativeTop()); + rTop29 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext13 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -33247,7 +33252,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext2 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex2)); } } - if (rTop30 == NoReg) { + if (rTop29 == NoReg) { nextRegisterMask2 = 0; if (rNext13 != NoReg) { /* begin registerMaskFor: */ @@ -33256,11 +33261,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1U << rNextNext2); } - rTop30 = allocateRegNotConflictingWith(nextRegisterMask2); + rTop29 = allocateRegNotConflictingWith(nextRegisterMask2); } if (rNext13 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask2 = 1U << rTop30; + nextRegisterMask2 = 1U << rTop29; if (rNextNext2 != NoReg) { nextRegisterMask2 = nextRegisterMask2 | (1U << rNextNext2); } @@ -33268,13 +33273,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext2 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask2 = (1U << rTop30) | (1U << rNext13); + nextRegisterMask2 = (1U << rTop29) | (1U << rNext13); rNextNext2 = allocateRegNotConflictingWith(nextRegisterMask2); } - assert(!(((rTop30 == NoReg) + assert(!(((rTop29 == NoReg) || ((rNext13 == NoReg) || (rNextNext2 == NoReg))))); - size3 = rTop30; + size3 = rTop29; source = rNext13; dest = rNextNext2; nativePopToReg(ssNativeTop(), size3); @@ -33299,10 +33304,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 132: /* begin genLowcodeMemcpy64 */ - rTop31 = (rNext14 = (rNextNext3 = NoReg)); + rTop30 = (rNext14 = (rNextNext3 = NoReg)); nativeValueIndex3 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop31 = nativeRegisterOrNone(ssNativeTop()); + rTop30 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext14 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -33321,7 +33326,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext3 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex3)); } } - if (rTop31 == NoReg) { + if (rTop30 == NoReg) { nextRegisterMask3 = 0; if (rNext14 != NoReg) { /* begin registerMaskFor: */ @@ -33330,11 +33335,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1U << rNextNext3); } - rTop31 = allocateRegNotConflictingWith(nextRegisterMask3); + rTop30 = allocateRegNotConflictingWith(nextRegisterMask3); } if (rNext14 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask3 = 1U << rTop31; + nextRegisterMask3 = 1U << rTop30; if (rNextNext3 != NoReg) { nextRegisterMask3 = nextRegisterMask3 | (1U << rNextNext3); } @@ -33342,13 +33347,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext3 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask3 = (1U << rTop31) | (1U << rNext14); + nextRegisterMask3 = (1U << rTop30) | (1U << rNext14); rNextNext3 = allocateRegNotConflictingWith(nextRegisterMask3); } - assert(!(((rTop31 == NoReg) + assert(!(((rTop30 == NoReg) || ((rNext14 == NoReg) || (rNextNext3 == NoReg))))); - size4 = rTop31; + size4 = rTop30; source1 = rNext14; dest1 = rNextNext3; nativePopToReg(ssNativeTop(), size4); @@ -33376,9 +33381,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) size5 = extA; /* begin allocateRegistersForLowcodeInteger2: */ topRegistersMask13 = 0; - rTop32 = (rNext15 = NoReg); + rTop31 = (rNext15 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop32 = nativeRegisterOrNone(ssNativeTop()); + rTop31 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext15 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -33390,15 +33395,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask13 = 1U << reg16; } } - if (rTop32 == NoReg) { - rTop32 = allocateRegNotConflictingWith(topRegistersMask13); + if (rTop31 == NoReg) { + rTop31 = allocateRegNotConflictingWith(topRegistersMask13); } if (rNext15 == NoReg) { - rNext15 = allocateRegNotConflictingWith(1U << rTop32); + rNext15 = allocateRegNotConflictingWith(1U << rTop31); } - assert(!(((rTop32 == NoReg) + assert(!(((rTop31 == NoReg) || (rNext15 == NoReg)))); - source2 = rTop32; + source2 = rTop31; dest2 = rNext15; nativePopToReg(ssNativeTop(), source2); ssNativePop(1); @@ -33917,10 +33922,10 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 164: /* begin genLowcodePointerAddOffset64 */ /* begin allocateRegistersForLowcodeInteger3: */ - rTop20 = (rNext7 = (rNextNext1 = NoReg)); + rTop19 = (rNext7 = (rNextNext1 = NoReg)); nativeValueIndex1 = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop20 = nativeRegisterOrNone(ssNativeTop()); + rTop19 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext7 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -33939,7 +33944,7 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) rNextNext1 = nativeRegisterOrNone(ssNativeValue(nativeValueIndex1)); } } - if (rTop20 == NoReg) { + if (rTop19 == NoReg) { nextRegisterMask1 = 0; if (rNext7 != NoReg) { /* begin registerMaskFor: */ @@ -33948,11 +33953,11 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) if (rNextNext1 != NoReg) { nextRegisterMask1 = nextRegisterMask1 | (1U << rNextNext1); } - rTop20 = allocateRegNotConflictingWith(nextRegisterMask1); + rTop19 = allocateRegNotConflictingWith(nextRegisterMask1); } if (rNext7 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask1 = 1U << rTop20; + nextRegisterMask1 = 1U << rTop19; if (rNextNext1 != NoReg) { nextRegisterMask1 = nextRegisterMask1 | (1U << rNextNext1); } @@ -33960,13 +33965,13 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) } if (rNextNext1 == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask1 = (1U << rTop20) | (1U << rNext7); + nextRegisterMask1 = (1U << rTop19) | (1U << rNext7); rNextNext1 = allocateRegNotConflictingWith(nextRegisterMask1); } - assert(!(((rTop20 == NoReg) + assert(!(((rTop19 == NoReg) || ((rNext7 == NoReg) || (rNextNext1 == NoReg))))); - offsetLow = rTop20; + offsetLow = rTop19; offsetHigh = rNext7; base2 = rNextNext1; nativePopToRegsecondReg(ssNativeTop(), offsetLow, offsetHigh); @@ -33981,9 +33986,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 165: /* begin genLowcodePointerEqual */ topRegistersMask8 = 0; - rTop21 = (rNext8 = NoReg); + rTop20 = (rNext8 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop21 = nativeRegisterOrNone(ssNativeTop()); + rTop20 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext8 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -33995,15 +34000,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask8 = 1U << reg9; } } - if (rTop21 == NoReg) { - rTop21 = allocateRegNotConflictingWith(topRegistersMask8); + if (rTop20 == NoReg) { + rTop20 = allocateRegNotConflictingWith(topRegistersMask8); } if (rNext8 == NoReg) { - rNext8 = allocateRegNotConflictingWith(1U << rTop21); + rNext8 = allocateRegNotConflictingWith(1U << rTop20); } - assert(!(((rTop21 == NoReg) + assert(!(((rTop20 == NoReg) || (rNext8 == NoReg)))); - second4 = rTop21; + second4 = rTop20; first4 = rNext8; nativePopToReg(ssNativeTop(), second4); ssNativePop(1); @@ -34027,9 +34032,9 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 166: /* begin genLowcodePointerNotEqual */ topRegistersMask9 = 0; - rTop22 = (rNext9 = NoReg); + rTop21 = (rNext9 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop22 = nativeRegisterOrNone(ssNativeTop()); + rTop21 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext9 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -34041,15 +34046,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) topRegistersMask9 = 1U << reg10; } } - if (rTop22 == NoReg) { - rTop22 = allocateRegNotConflictingWith(topRegistersMask9); + if (rTop21 == NoReg) { + rTop21 = allocateRegNotConflictingWith(topRegistersMask9); } if (rNext9 == NoReg) { - rNext9 = allocateRegNotConflictingWith(1U << rTop22); + rNext9 = allocateRegNotConflictingWith(1U << rTop21); } - assert(!(((rTop22 == NoReg) + assert(!(((rTop21 == NoReg) || (rNext9 == NoReg)))); - second5 = rTop22; + second5 = rTop21; first5 = rNext9; nativePopToReg(ssNativeTop(), second5); ssNativePop(1); @@ -34072,15 +34077,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 167: /* begin genLowcodePointerToInt32 */ - rTop23 = NoReg; + rTop22 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop23 = nativeRegisterOrNone(ssNativeTop()); + rTop22 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop23 == NoReg) { - rTop23 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop22 == NoReg) { + rTop22 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop23 == NoReg))); - pointer5 = rTop23; + assert(!((rTop22 == NoReg))); + pointer5 = rTop22; nativePopToReg(ssNativeTop(), pointer5); ssNativePop(1); ssPushNativeRegister(pointer5); @@ -34089,20 +34094,20 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 168: /* begin genLowcodePointerToInt64 */ /* begin allocateRegistersForLowcodeIntegerResultInteger2: */ - rTop24 = NoReg; + rTop23 = NoReg; rResult7 = (rResult7 = NoReg); if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop24 = nativeRegisterOrNone(ssNativeTop()); + rTop23 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop24 == NoReg) { - rTop24 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop23 == NoReg) { + rTop23 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - rResult7 = allocateRegNotConflictingWith(1U << rTop24); - rResult22 = allocateRegNotConflictingWith((1U << rTop24) | (1U << rResult7)); - assert(!(((rTop24 == NoReg) + rResult7 = allocateRegNotConflictingWith(1U << rTop23); + rResult22 = allocateRegNotConflictingWith((1U << rTop23) | (1U << rResult7)); + assert(!(((rTop23 == NoReg) || ((rResult7 == NoReg) || (rResult22 == NoReg))))); - pointer6 = rTop24; + pointer6 = rTop23; resultLow1 = rResult7; resultHigh1 = rResult22; nativePopToReg(ssNativeTop(), pointer6); @@ -34148,30 +34153,30 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 171: /* begin genLowcodePopInt32 */ - rTop25 = NoReg; + rTop24 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop25 = nativeRegisterOrNone(ssNativeTop()); + rTop24 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop25 == NoReg) { - rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop24 == NoReg) { + rTop24 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop25 == NoReg))); - value10 = rTop25; + assert(!((rTop24 == NoReg))); + value10 = rTop24; nativePopToReg(ssNativeTop(), value10); ssNativePop(1); return 0; case 172: /* begin genLowcodePopInt64 */ - rTop26 = NoReg; + rTop25 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop26 = nativeRegisterOrNone(ssNativeTop()); + rTop25 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop26 == NoReg) { - rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop25 == NoReg) { + rTop25 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop26 == NoReg))); - value11 = rTop26; + assert(!((rTop25 == NoReg))); + value11 = rTop25; nativePopToReg(ssNativeTop(), value11); ssNativePop(1); return 0; @@ -34184,15 +34189,15 @@ genLowcodeUnaryInlinePrimitive3(sqInt prim) case 174: /* begin genLowcodePopPointer */ - rTop27 = NoReg; + rTop26 = NoReg; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop27 = nativeRegisterOrNone(ssNativeTop()); + rTop26 = nativeRegisterOrNone(ssNativeTop()); } - if (rTop27 == NoReg) { - rTop27 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); + if (rTop26 == NoReg) { + rTop26 = allocateRegNotConflictingWith(0 /* emptyRegisterMask */); } - assert(!((rTop27 == NoReg))); - pointerValue7 = rTop27; + assert(!((rTop26 == NoReg))); + pointerValue7 = rTop26; nativePopToReg(ssNativeTop(), pointerValue7); ssNativePop(1); return 0; @@ -34498,7 +34503,7 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) sqInt rTop10; sqInt rTop14; sqInt rTop15; - sqInt rTop17; + sqInt rTop16; sqInt rTop18; sqInt rTop19; sqInt rTop2; @@ -35171,10 +35176,10 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) case 205: /* begin genLowcodeStoreInt64ToMemory */ /* begin allocateRegistersForLowcodeInteger3: */ - rTop17 = (rNext8 = (rNextNext = NoReg)); + rTop16 = (rNext8 = (rNextNext = NoReg)); nativeValueIndex = 1; if ((nativeRegisterOrNone(ssNativeTop())) != NoReg) { - rTop17 = nativeRegisterOrNone(ssNativeTop()); + rTop16 = nativeRegisterOrNone(ssNativeTop()); if ((nativeRegisterSecondOrNone(ssNativeTop())) != NoReg) { rNext8 = nativeRegisterSecondOrNone(ssNativeTop()); } @@ -35193,7 +35198,7 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) rNextNext = nativeRegisterOrNone(ssNativeValue(nativeValueIndex)); } } - if (rTop17 == NoReg) { + if (rTop16 == NoReg) { nextRegisterMask = 0; if (rNext8 != NoReg) { /* begin registerMaskFor: */ @@ -35202,11 +35207,11 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) if (rNextNext != NoReg) { nextRegisterMask = nextRegisterMask | (1U << rNextNext); } - rTop17 = allocateRegNotConflictingWith(nextRegisterMask); + rTop16 = allocateRegNotConflictingWith(nextRegisterMask); } if (rNext8 == NoReg) { /* begin registerMaskFor: */ - nextRegisterMask = 1U << rTop17; + nextRegisterMask = 1U << rTop16; if (rNextNext != NoReg) { nextRegisterMask = nextRegisterMask | (1U << rNextNext); } @@ -35214,13 +35219,13 @@ genLowcodeUnaryInlinePrimitive4(sqInt prim) } if (rNextNext == NoReg) { /* begin registerMaskFor:and: */ - nextRegisterMask = (1U << rTop17) | (1U << rNext8); + nextRegisterMask = (1U << rTop16) | (1U << rNext8); rNextNext = allocateRegNotConflictingWith(nextRegisterMask); } - assert(!(((rTop17 == NoReg) + assert(!(((rTop16 == NoReg) || ((rNext8 == NoReg) || (rNextNext == NoReg))))); - pointer4 = rTop17; + pointer4 = rTop16; valueLow3 = rNext8; valueHigh3 = rNextNext; nativePopToReg(ssNativeTop(), pointer4); @@ -36773,7 +36778,6 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction113; AbstractInstruction *anInstruction114; AbstractInstruction *anInstruction115; - AbstractInstruction *anInstruction116; AbstractInstruction *anInstruction12; AbstractInstruction *anInstruction13; AbstractInstruction *anInstruction14; @@ -36789,6 +36793,7 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) AbstractInstruction *anInstruction23; AbstractInstruction *anInstruction24; AbstractInstruction *anInstruction25; + AbstractInstruction *anInstruction26; AbstractInstruction *anInstruction3; AbstractInstruction *anInstruction4; AbstractInstruction *anInstruction5; @@ -38395,12 +38400,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump4 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction17 = genoperandoperand(MoveCqR, 1, value11); + anInstruction18 = genoperandoperand(MoveCqR, 1, value11); /* begin Jump: */ contJump4 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump4, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction18 = genoperandoperand(MoveCqR, 0, value11); + anInstruction17 = genoperandoperand(MoveCqR, 0, value11); jmpTarget(contJump4, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value11); return 0; @@ -38498,12 +38503,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump5 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction19 = genoperandoperand(MoveCqR, 1, value13); + anInstruction20 = genoperandoperand(MoveCqR, 1, value13); /* begin Jump: */ contJump5 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction110 = genoperandoperand(MoveCqR, 0, value13); + anInstruction19 = genoperandoperand(MoveCqR, 0, value13); jmpTarget(contJump5, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value13); return 0; @@ -38767,12 +38772,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump6 = gJumpFPNotEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction20 = genoperandoperand(MoveCqR, 1, value19); + anInstruction21 = genoperandoperand(MoveCqR, 1, value19); /* begin Jump: */ contJump6 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction111 = genoperandoperand(MoveCqR, 0, value19); + anInstruction110 = genoperandoperand(MoveCqR, 0, value19); jmpTarget(contJump6, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value19); return 0; @@ -38813,12 +38818,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump7 = gJumpFPLessOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction21 = genoperandoperand(MoveCqR, 1, value20); + anInstruction22 = genoperandoperand(MoveCqR, 1, value20); /* begin Jump: */ contJump7 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction112 = genoperandoperand(MoveCqR, 0, value20); + anInstruction111 = genoperandoperand(MoveCqR, 0, value20); jmpTarget(contJump7, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value20); return 0; @@ -38859,12 +38864,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump8 = gJumpFPLess(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction22 = genoperandoperand(MoveCqR, 1, value21); + anInstruction23 = genoperandoperand(MoveCqR, 1, value21); /* begin Jump: */ contJump8 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction113 = genoperandoperand(MoveCqR, 0, value21); + anInstruction112 = genoperandoperand(MoveCqR, 0, value21); jmpTarget(contJump8, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value21); return 0; @@ -38905,12 +38910,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump9 = gJumpFPGreaterOrEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction23 = genoperandoperand(MoveCqR, 1, value22); + anInstruction24 = genoperandoperand(MoveCqR, 1, value22); /* begin Jump: */ contJump9 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction114 = genoperandoperand(MoveCqR, 0, value22); + anInstruction113 = genoperandoperand(MoveCqR, 0, value22); jmpTarget(contJump9, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value22); return 0; @@ -38951,12 +38956,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump10 = gJumpFPGreater(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction24 = genoperandoperand(MoveCqR, 1, value23); + anInstruction25 = genoperandoperand(MoveCqR, 1, value23); /* begin Jump: */ contJump10 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction115 = genoperandoperand(MoveCqR, 0, value23); + anInstruction114 = genoperandoperand(MoveCqR, 0, value23); jmpTarget(contJump10, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value23); return 0; @@ -39054,12 +39059,12 @@ genLowcodeUnaryInlinePrimitive(sqInt prim) /* True result */ falseJump11 = gJumpFPEqual(0); /* begin checkQuickConstant:forInstruction: */ - anInstruction25 = genoperandoperand(MoveCqR, 1, value25); + anInstruction26 = genoperandoperand(MoveCqR, 1, value25); /* begin Jump: */ contJump11 = genoperand(Jump, ((sqInt)0)); jmpTarget(falseJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); /* begin checkQuickConstant:forInstruction: */ - anInstruction116 = genoperandoperand(MoveCqR, 0, value25); + anInstruction115 = genoperandoperand(MoveCqR, 0, value25); jmpTarget(contJump11, genoperandoperand(Label, (labelCounter += 1), bytecodePC)); ssPushNativeRegister(value25); return 0; @@ -39420,7 +39425,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -39449,7 +39455,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -39576,7 +39583,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -39632,7 +39640,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -40055,16 +40064,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -40297,7 +40310,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -40311,7 +40325,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -40386,7 +40401,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -40433,7 +40449,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -40471,7 +40488,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -40870,11 +40888,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -41405,45 +41426,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredFloatRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredFloatRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -41544,39 +41526,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spurlowcodesrc/vm/cointerp.c b/spurlowcodesrc/vm/cointerp.c index 1271b88022..173d02bfff 100644 --- a/spurlowcodesrc/vm/cointerp.c +++ b/spurlowcodesrc/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2568,7 +2568,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -5394,18 +5394,18 @@ interpret(void) fp = (thePage->headFP); if (fp == theFP) { frameAbove = 0; - goto l3177; + goto l3176; } while (((callerFP = pointerForOop(longAt(fp + FoxSavedFP)))) != 0) { if (callerFP == theFP) { frameAbove = fp; - goto l3177; + goto l3176; } fp = callerFP; } error("did not find theFP in stack page"); frameAbove = 0; - l3177: /* end findFrameAbove:inPage: */; + l3176: /* end findFrameAbove:inPage: */; /* begin newStackPage */ lruOrFree = (GIV(mostRecentlyUsedPage)->nextPage); if (((lruOrFree->baseFP)) == 0) { @@ -34922,12 +34922,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -63421,11 +63429,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } diff --git a/spurlowcodesrc/vm/cointerp.h b/spurlowcodesrc/vm/cointerp.h index 82de70daf5..0f5fbf1716 100644 --- a/spurlowcodesrc/vm/cointerp.h +++ b/spurlowcodesrc/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spurlowcodesrc/vm/gcc3x-cointerp.c b/spurlowcodesrc/vm/gcc3x-cointerp.c index a9ed5a5c0a..72490fea27 100644 --- a/spurlowcodesrc/vm/gcc3x-cointerp.c +++ b/spurlowcodesrc/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2571,7 +2571,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -5403,18 +5403,18 @@ interpret(void) fp = (thePage->headFP); if (fp == theFP) { frameAbove = 0; - goto l3177; + goto l3176; } while (((callerFP = pointerForOop(longAt(fp + FoxSavedFP)))) != 0) { if (callerFP == theFP) { frameAbove = fp; - goto l3177; + goto l3176; } fp = callerFP; } error("did not find theFP in stack page"); frameAbove = 0; - l3177: /* end findFrameAbove:inPage: */; + l3176: /* end findFrameAbove:inPage: */; /* begin newStackPage */ lruOrFree = (GIV(mostRecentlyUsedPage)->nextPage); if (((lruOrFree->baseFP)) == 0) { @@ -34931,12 +34931,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -63430,11 +63438,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } diff --git a/spursista64src/vm/cogit.h b/spursista64src/vm/cogit.h index dcbdb2b508..b137fef6e4 100644 --- a/spursista64src/vm/cogit.h +++ b/spursista64src/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spursista64src/vm/cogitX64SysV.c b/spursista64src/vm/cogitX64SysV.c index 23198cfc62..42450fe98f 100644 --- a/spursista64src/vm/cogitX64SysV.c +++ b/spursista64src/vm/cogitX64SysV.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -1308,13 +1308,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -5669,7 +5663,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -6241,6 +6235,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -18005,6 +18002,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -18056,6 +18055,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -25086,7 +25087,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27789,7 +27790,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -28069,7 +28070,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) val = ((ssTop())->constant); if (primIndex == 65) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); rr = ReceiverResultReg; } @@ -28085,7 +28087,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) genLoadSlotsourceRegdestReg(zeroBasedIndex, rr, rr); break; case 65: - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); if (CallerSavedRegisterMask & (1U << ReceiverResultReg)) { @@ -29516,7 +29519,8 @@ genCounterTripOnlyJumpIfto(sqInt boolean, sqInt targetBytecodePC) /* counters are increased / decreased in the inlined branch */ /* We need SendNumArgsReg because of the mustBeBooleanTrampoline */ counterIndex += 1; - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction1 = genoperandoperand(MoveCqR, 1, SendNumArgsReg); mustBeBooleanTrampoline = genCallMustBeBooleanFor(boolean); @@ -30092,7 +30096,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) However, if one of the operand is an unnanotable constant, does not allocate a register for it (machine code will use operations on constants). */ unforwardArg = !(isUnannotatableConstant(ssTop())); - rcvrReg = (argReg = NoReg); /* begin allocateEqualsEqualsRegistersArgNeedsReg:rcvrNeedsReg:into: */ assert(unforwardArg || (unforwardRcvr)); @@ -30145,13 +30148,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) && (rcvrReg3 == NoReg)))); rcvrReg = rcvrReg3; argReg = argReg3; - if (argReg != NoReg) { - /* begin registerMaskFor: */ - regMask = 1ULL << argReg; - } - if (rcvrReg != NoReg) { - regMask = regMask | (1ULL << rcvrReg); - } if (!(((branchDescriptor->isBranchTrue)) || ((branchDescriptor->isBranchFalse)))) { return genIdenticalNoBranchArgIsConstantrcvrIsConstantargRegrcvrRegorNotIf(!unforwardArg, !unforwardRcvr, argReg, rcvrReg, orNot); @@ -30171,6 +30167,20 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) if (unforwardRcvr) { genEnsureOopInRegNotForwardedscratchReg(rcvrReg, TempReg); } + if (argReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1ULL << rcvrReg; + } + else { + if (rcvrReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1ULL << argReg; + } + else { + /* begin registerMaskFor:and: */ + regMask = (1ULL << rcvrReg) | (1ULL << argReg); + } + } counterReg = allocateRegNotConflictingWith(regMask); /* begin genExecutionCountLogicInto:counterReg: */ counterAddress1 = counters + (CounterBytes * counterIndex); @@ -30790,7 +30800,8 @@ genJumpIfto(sqInt boolean, sqInt targetBytecodePC) } popToReg(desc, TempReg); ssPop(1); - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin Label */ retry = genoperandoperand(Label, (labelCounter += 1), bytecodePC); /* begin genExecutionCountLogicInto:counterReg: */ @@ -31220,8 +31231,10 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex rcvrReg = allocateRegForStackEntryAtnotConflictingWith(2, 1ULL << valReg); } } - ssAllocateRequiredRegupThrough(valReg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(rcvrReg, simStackPtr - 3); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << valReg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << rcvrReg, simStackPtr - 3, simNativeStackPtr); popToReg(ssTop(), valReg); ssPop(1); if (((ssTop())->spilled)) { @@ -31263,7 +31276,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(0, ClassReg); @@ -31294,7 +31308,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(0, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -31588,7 +31603,8 @@ genPointerAtPutStoreCheck(sqInt needsStoreCheck) /* Free ReceiverResultReg if it was not free */ rThird1 = ReceiverResultReg; - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); } else { @@ -34018,7 +34034,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -34276,7 +34292,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -34415,7 +34432,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -34555,7 +34573,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -34592,7 +34611,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -35348,7 +35368,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -35377,7 +35398,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -35504,7 +35526,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -35560,7 +35583,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -35982,16 +36006,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1ULL << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -36063,7 +36091,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -36077,7 +36106,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -36151,7 +36181,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -36197,7 +36228,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -36234,7 +36266,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -36575,11 +36608,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -37099,45 +37135,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1ULL << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1ULL << requiredReg1) | (1ULL << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1ULL << requiredReg1) | (1ULL << requiredReg2)) | (1ULL << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -37176,39 +37173,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1ULL << requiredReg1) | (1ULL << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spursista64src/vm/cogitX64WIN64.c b/spursista64src/vm/cogitX64WIN64.c index ed5f46af29..23c4f33751 100644 --- a/spursista64src/vm/cogitX64WIN64.c +++ b/spursista64src/vm/cogitX64WIN64.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -1308,13 +1308,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -5677,7 +5671,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -6249,6 +6243,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -18013,6 +18010,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -18064,6 +18063,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -25115,7 +25116,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27818,7 +27819,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -28098,7 +28099,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) val = ((ssTop())->constant); if (primIndex == 65) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); rr = ReceiverResultReg; } @@ -28114,7 +28116,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) genLoadSlotsourceRegdestReg(zeroBasedIndex, rr, rr); break; case 65: - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); if (CallerSavedRegisterMask & (1U << ReceiverResultReg)) { @@ -29545,7 +29548,8 @@ genCounterTripOnlyJumpIfto(sqInt boolean, sqInt targetBytecodePC) /* counters are increased / decreased in the inlined branch */ /* We need SendNumArgsReg because of the mustBeBooleanTrampoline */ counterIndex += 1; - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction1 = genoperandoperand(MoveCqR, 1, SendNumArgsReg); mustBeBooleanTrampoline = genCallMustBeBooleanFor(boolean); @@ -30121,7 +30125,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) However, if one of the operand is an unnanotable constant, does not allocate a register for it (machine code will use operations on constants). */ unforwardArg = !(isUnannotatableConstant(ssTop())); - rcvrReg = (argReg = NoReg); /* begin allocateEqualsEqualsRegistersArgNeedsReg:rcvrNeedsReg:into: */ assert(unforwardArg || (unforwardRcvr)); @@ -30174,13 +30177,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) && (rcvrReg3 == NoReg)))); rcvrReg = rcvrReg3; argReg = argReg3; - if (argReg != NoReg) { - /* begin registerMaskFor: */ - regMask = 1ULL << argReg; - } - if (rcvrReg != NoReg) { - regMask = regMask | (1ULL << rcvrReg); - } if (!(((branchDescriptor->isBranchTrue)) || ((branchDescriptor->isBranchFalse)))) { return genIdenticalNoBranchArgIsConstantrcvrIsConstantargRegrcvrRegorNotIf(!unforwardArg, !unforwardRcvr, argReg, rcvrReg, orNot); @@ -30200,6 +30196,20 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) if (unforwardRcvr) { genEnsureOopInRegNotForwardedscratchReg(rcvrReg, TempReg); } + if (argReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1ULL << rcvrReg; + } + else { + if (rcvrReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1ULL << argReg; + } + else { + /* begin registerMaskFor:and: */ + regMask = (1ULL << rcvrReg) | (1ULL << argReg); + } + } counterReg = allocateRegNotConflictingWith(regMask); /* begin genExecutionCountLogicInto:counterReg: */ counterAddress1 = counters + (CounterBytes * counterIndex); @@ -30819,7 +30829,8 @@ genJumpIfto(sqInt boolean, sqInt targetBytecodePC) } popToReg(desc, TempReg); ssPop(1); - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin Label */ retry = genoperandoperand(Label, (labelCounter += 1), bytecodePC); /* begin genExecutionCountLogicInto:counterReg: */ @@ -31249,8 +31260,10 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex rcvrReg = allocateRegForStackEntryAtnotConflictingWith(2, 1ULL << valReg); } } - ssAllocateRequiredRegupThrough(valReg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(rcvrReg, simStackPtr - 3); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << valReg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << rcvrReg, simStackPtr - 3, simNativeStackPtr); popToReg(ssTop(), valReg); ssPop(1); if (((ssTop())->spilled)) { @@ -31292,7 +31305,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(0, ClassReg); @@ -31323,7 +31337,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(0, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -31617,7 +31632,8 @@ genPointerAtPutStoreCheck(sqInt needsStoreCheck) /* Free ReceiverResultReg if it was not free */ rThird1 = ReceiverResultReg; - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); } else { @@ -34047,7 +34063,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -34305,7 +34321,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -34444,7 +34461,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1ULL << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -34584,7 +34602,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -34621,7 +34640,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -35377,7 +35397,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -35406,7 +35427,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -35533,7 +35555,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -35589,7 +35612,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -36011,16 +36035,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1ULL << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -36092,7 +36120,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -36106,7 +36135,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -36180,7 +36210,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -36226,7 +36257,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -36263,7 +36295,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -36604,11 +36637,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -37128,45 +37164,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1ULL << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1ULL << requiredReg1) | (1ULL << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1ULL << requiredReg1) | (1ULL << requiredReg2)) | (1ULL << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -37205,39 +37202,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1ULL << requiredReg1) | (1ULL << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1ULL << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spursista64src/vm/cointerp.c b/spursista64src/vm/cointerp.c index 113d0e17f0..e0fe497cd1 100644 --- a/spursista64src/vm/cointerp.c +++ b/spursista64src/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2579,7 +2579,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -18696,12 +18696,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/spursista64src/vm/cointerp.h b/spursista64src/vm/cointerp.h index 462d541f02..1c5596a5bf 100644 --- a/spursista64src/vm/cointerp.h +++ b/spursista64src/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spursista64src/vm/gcc3x-cointerp.c b/spursista64src/vm/gcc3x-cointerp.c index be2643ee71..9a4ef154dd 100644 --- a/spursista64src/vm/gcc3x-cointerp.c +++ b/spursista64src/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2582,7 +2582,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -18705,12 +18705,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/spursistasrc/vm/cogit.h b/spursistasrc/vm/cogit.h index dcbdb2b508..b137fef6e4 100644 --- a/spursistasrc/vm/cogit.h +++ b/spursistasrc/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spursistasrc/vm/cogitARMv5.c b/spursistasrc/vm/cogitARMv5.c index cdc0b07caf..8f8e4aab79 100644 --- a/spursistasrc/vm/cogitARMv5.c +++ b/spursistasrc/vm/cogitARMv5.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -656,7 +656,7 @@ static sqInt extBBytecode(void); static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -726,7 +726,7 @@ static BytecodeDescriptor * loadBytesAndGetDescriptor(void); static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor *descriptor, sqInt pc); static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); -static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1312,13 +1312,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -7517,7 +7511,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - usqInt end; + sqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -10356,7 +10350,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -10649,7 +10643,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -10929,6 +10923,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -12382,7 +12379,7 @@ gMoveCwR(sqInt wordConstant, sqInt reg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -23245,6 +23242,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -23298,6 +23297,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -25475,7 +25476,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27889,7 +27890,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -28174,7 +28175,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) val = ((ssTop())->constant); if (primIndex == 65) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); rr = ReceiverResultReg; } @@ -28190,7 +28192,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) genLoadSlotsourceRegdestReg(zeroBasedIndex, rr, rr); break; case 65: - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); if (CallerSavedRegisterMask & (1U << ReceiverResultReg)) { @@ -29724,7 +29727,8 @@ genCounterTripOnlyJumpIfto(sqInt boolean, sqInt targetBytecodePC) /* counters are increased / decreased in the inlined branch */ /* We need SendNumArgsReg because of the mustBeBooleanTrampoline */ counterIndex += 1; - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction1 = genoperandoperand(MoveCqR, 1, SendNumArgsReg); if (usesOutOfLineLiteral(anInstruction1)) { @@ -30334,7 +30338,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) However, if one of the operand is an unnanotable constant, does not allocate a register for it (machine code will use operations on constants). */ unforwardArg = !(isUnannotatableConstant(ssTop())); - rcvrReg = (argReg = NoReg); /* begin allocateEqualsEqualsRegistersArgNeedsReg:rcvrNeedsReg:into: */ assert(unforwardArg || (unforwardRcvr)); @@ -30389,13 +30392,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) && (rcvrReg3 == NoReg)))); rcvrReg = rcvrReg3; argReg = argReg3; - if (argReg != NoReg) { - /* begin registerMaskFor: */ - regMask = 1U << argReg; - } - if (rcvrReg != NoReg) { - regMask = regMask | (1U << rcvrReg); - } if (!(((branchDescriptor->isBranchTrue)) || ((branchDescriptor->isBranchFalse)))) { return genIdenticalNoBranchArgIsConstantrcvrIsConstantargRegrcvrRegorNotIf(!unforwardArg, !unforwardRcvr, argReg, rcvrReg, orNot); @@ -30415,6 +30411,20 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) if (unforwardRcvr) { genEnsureOopInRegNotForwardedscratchReg(rcvrReg, TempReg); } + if (argReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1U << rcvrReg; + } + else { + if (rcvrReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1U << argReg; + } + else { + /* begin registerMaskFor:and: */ + regMask = (1U << rcvrReg) | (1U << argReg); + } + } counterReg = allocateRegNotConflictingWith(regMask); /* begin genExecutionCountLogicInto:counterReg: */ counterAddress1 = counters + (CounterBytes * counterIndex); @@ -31069,7 +31079,8 @@ genJumpIfto(sqInt boolean, sqInt targetBytecodePC) } popToReg(desc, TempReg); ssPop(1); - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin Label */ retry = genoperandoperand(Label, (labelCounter += 1), bytecodePC); /* begin genExecutionCountLogicInto:counterReg: */ @@ -31536,8 +31547,10 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex rcvrReg = allocateRegForStackEntryAtnotConflictingWith(2, 1U << valReg); } } - ssAllocateRequiredRegupThrough(valReg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(rcvrReg, simStackPtr - 3); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << valReg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << rcvrReg, simStackPtr - 3, simNativeStackPtr); popToReg(ssTop(), valReg); ssPop(1); if (((ssTop())->spilled)) { @@ -31584,7 +31597,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(0, ClassReg); @@ -31618,7 +31632,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(0, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -31925,7 +31940,8 @@ genPointerAtPutStoreCheck(sqInt needsStoreCheck) /* Free ReceiverResultReg if it was not free */ rThird1 = ReceiverResultReg; - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); } else { @@ -34400,7 +34416,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -34664,7 +34680,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -34803,7 +34820,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -34943,7 +34961,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -34980,7 +34999,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -35793,7 +35813,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -35822,7 +35843,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -35952,7 +35974,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -36013,7 +36036,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -36454,16 +36478,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -36535,7 +36563,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(gMoveCwR(association, ReceiverResultReg), association); @@ -36552,7 +36581,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -36629,7 +36659,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -36678,7 +36709,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -36715,7 +36747,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -37064,11 +37097,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -37588,45 +37624,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -37665,39 +37662,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spursistasrc/vm/cogitIA32.c b/spursistasrc/vm/cogitIA32.c index 781dd09dc1..b2f9b2de73 100644 --- a/spursistasrc/vm/cogitIA32.c +++ b/spursistasrc/vm/cogitIA32.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -643,7 +643,7 @@ static sqInt extBBytecode(void); static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -712,7 +712,7 @@ static BytecodeDescriptor * loadBytesAndGetDescriptor(void); static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor *descriptor, sqInt pc); static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); -static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1273,13 +1273,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -7554,7 +7548,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - usqInt end; + sqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -10323,7 +10317,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -10616,7 +10610,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static sqInt NoDbgRegParms +static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -10896,6 +10890,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -12281,7 +12278,7 @@ gMoveCwR(sqInt wordConstant, sqInt reg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -22154,6 +22151,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -22205,6 +22204,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -23596,7 +23597,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -25954,7 +25955,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -26234,7 +26235,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) val = ((ssTop())->constant); if (primIndex == 65) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); rr = ReceiverResultReg; } @@ -26250,7 +26252,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) genLoadSlotsourceRegdestReg(zeroBasedIndex, rr, rr); break; case 65: - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); if (CallerSavedRegisterMask & (1U << ReceiverResultReg)) { @@ -27680,7 +27683,8 @@ genCounterTripOnlyJumpIfto(sqInt boolean, sqInt targetBytecodePC) /* counters are increased / decreased in the inlined branch */ /* We need SendNumArgsReg because of the mustBeBooleanTrampoline */ counterIndex += 1; - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction1 = genoperandoperand(MoveCqR, 1, SendNumArgsReg); mustBeBooleanTrampoline = genCallMustBeBooleanFor(boolean); @@ -28256,7 +28260,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) However, if one of the operand is an unnanotable constant, does not allocate a register for it (machine code will use operations on constants). */ unforwardArg = !(isUnannotatableConstant(ssTop())); - rcvrReg = (argReg = NoReg); /* begin allocateEqualsEqualsRegistersArgNeedsReg:rcvrNeedsReg:into: */ assert(unforwardArg || (unforwardRcvr)); @@ -28309,13 +28312,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) && (rcvrReg3 == NoReg)))); rcvrReg = rcvrReg3; argReg = argReg3; - if (argReg != NoReg) { - /* begin registerMaskFor: */ - regMask = 1U << argReg; - } - if (rcvrReg != NoReg) { - regMask = regMask | (1U << rcvrReg); - } if (!(((branchDescriptor->isBranchTrue)) || ((branchDescriptor->isBranchFalse)))) { return genIdenticalNoBranchArgIsConstantrcvrIsConstantargRegrcvrRegorNotIf(!unforwardArg, !unforwardRcvr, argReg, rcvrReg, orNot); @@ -28335,6 +28331,20 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) if (unforwardRcvr) { genEnsureOopInRegNotForwardedscratchReg(rcvrReg, TempReg); } + if (argReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1U << rcvrReg; + } + else { + if (rcvrReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1U << argReg; + } + else { + /* begin registerMaskFor:and: */ + regMask = (1U << rcvrReg) | (1U << argReg); + } + } counterReg = allocateRegNotConflictingWith(regMask); /* begin genExecutionCountLogicInto:counterReg: */ counterAddress1 = counters + (CounterBytes * counterIndex); @@ -28954,7 +28964,8 @@ genJumpIfto(sqInt boolean, sqInt targetBytecodePC) } popToReg(desc, TempReg); ssPop(1); - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin Label */ retry = genoperandoperand(Label, (labelCounter += 1), bytecodePC); /* begin genExecutionCountLogicInto:counterReg: */ @@ -29384,8 +29395,10 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex rcvrReg = allocateRegForStackEntryAtnotConflictingWith(2, 1U << valReg); } } - ssAllocateRequiredRegupThrough(valReg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(rcvrReg, simStackPtr - 3); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << valReg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << rcvrReg, simStackPtr - 3, simNativeStackPtr); popToReg(ssTop(), valReg); ssPop(1); if (((ssTop())->spilled)) { @@ -29427,7 +29440,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(0, ClassReg); @@ -29458,7 +29472,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(0, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -29752,7 +29767,8 @@ genPointerAtPutStoreCheck(sqInt needsStoreCheck) /* Free ReceiverResultReg if it was not free */ rThird1 = ReceiverResultReg; - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); } else { @@ -32199,7 +32215,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -32457,7 +32473,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -32596,7 +32613,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -32730,7 +32748,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -32767,7 +32786,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -33523,7 +33543,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -33552,7 +33573,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -33679,7 +33701,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -33734,7 +33757,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -34156,16 +34180,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -34237,7 +34265,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -34251,7 +34280,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -34325,7 +34355,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -34371,7 +34402,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -34408,7 +34440,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -34749,11 +34782,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -35273,45 +35309,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -35350,39 +35347,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spursistasrc/vm/cogitMIPSEL.c b/spursistasrc/vm/cogitMIPSEL.c index 5b15576c14..44e7eea560 100644 --- a/spursistasrc/vm/cogitMIPSEL.c +++ b/spursistasrc/vm/cogitMIPSEL.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "SistaCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -589,7 +589,7 @@ static sqInt extBBytecode(void); static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -658,7 +658,7 @@ static BytecodeDescriptor * loadBytesAndGetDescriptor(void); static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor *descriptor, sqInt pc); static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); -static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1421,13 +1421,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -2758,7 +2752,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - usqInt end; + sqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -5385,7 +5379,7 @@ cPICHasForwardedClass(CogMethod *cPIC) static sqInt NoDbgRegParms cPICHasFreedTargets(CogMethod *cPIC) { - sqInt entryPoint; + usqInt entryPoint; sqInt i; sqInt pc; CogMethod *targetMethod; @@ -5528,7 +5522,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -5819,7 +5813,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -6099,6 +6093,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -7490,7 +7487,7 @@ gMoveCwR(sqInt wordConstant, sqInt reg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -9066,7 +9063,7 @@ static void NoDbgRegParms relocateCallsInClosedPIC(CogMethod *cPIC) { sqInt callDelta; - sqInt entryPoint; + usqInt entryPoint; sqInt i; sqInt pc; sqLong refDelta; @@ -21546,6 +21543,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -21597,6 +21596,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -22992,7 +22993,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -25340,7 +25341,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -25622,7 +25623,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) val = ((ssTop())->constant); if (primIndex == 65) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); rr = ReceiverResultReg; } @@ -25638,7 +25640,8 @@ genBinaryAtConstInlinePrimitive(sqInt primIndex) genLoadSlotsourceRegdestReg(zeroBasedIndex, rr, rr); break; case 65: - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); if (CallerSavedRegisterMask & (1U << ReceiverResultReg)) { @@ -27068,7 +27071,8 @@ genCounterTripOnlyJumpIfto(sqInt boolean, sqInt targetBytecodePC) /* counters are increased / decreased in the inlined branch */ /* We need SendNumArgsReg because of the mustBeBooleanTrampoline */ counterIndex += 1; - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin checkQuickConstant:forInstruction: */ anInstruction1 = genoperandoperand(MoveCqR, 1, SendNumArgsReg); mustBeBooleanTrampoline = genCallMustBeBooleanFor(boolean); @@ -27644,7 +27648,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) However, if one of the operand is an unnanotable constant, does not allocate a register for it (machine code will use operations on constants). */ unforwardArg = !(isUnannotatableConstant(ssTop())); - rcvrReg = (argReg = NoReg); /* begin allocateEqualsEqualsRegistersArgNeedsReg:rcvrNeedsReg:into: */ assert(unforwardArg || (unforwardRcvr)); @@ -27697,13 +27700,6 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) && (rcvrReg3 == NoReg)))); rcvrReg = rcvrReg3; argReg = argReg3; - if (argReg != NoReg) { - /* begin registerMaskFor: */ - regMask = 1U << argReg; - } - if (rcvrReg != NoReg) { - regMask = regMask | (1U << rcvrReg); - } if (!(((branchDescriptor->isBranchTrue)) || ((branchDescriptor->isBranchFalse)))) { return genIdenticalNoBranchArgIsConstantrcvrIsConstantargRegrcvrRegorNotIf(!unforwardArg, !unforwardRcvr, argReg, rcvrReg, orNot); @@ -27723,6 +27719,20 @@ genForwardersInlinedIdenticalOrNotIf(sqInt orNot) if (unforwardRcvr) { genEnsureOopInRegNotForwardedscratchReg(rcvrReg, TempReg); } + if (argReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1U << rcvrReg; + } + else { + if (rcvrReg == NoReg) { + /* begin registerMaskFor: */ + regMask = 1U << argReg; + } + else { + /* begin registerMaskFor:and: */ + regMask = (1U << rcvrReg) | (1U << argReg); + } + } counterReg = allocateRegNotConflictingWith(regMask); /* begin genExecutionCountLogicInto:counterReg: */ counterAddress1 = counters + (CounterBytes * counterIndex); @@ -28342,7 +28352,8 @@ genJumpIfto(sqInt boolean, sqInt targetBytecodePC) } popToReg(desc, TempReg); ssPop(1); - ssAllocateRequiredReg(SendNumArgsReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << SendNumArgsReg, simStackPtr, simNativeStackPtr); /* begin Label */ retry = genoperandoperand(Label, (labelCounter += 1), bytecodePC); /* begin genExecutionCountLogicInto:counterReg: */ @@ -28774,8 +28785,10 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex rcvrReg = allocateRegForStackEntryAtnotConflictingWith(2, 1U << valReg); } } - ssAllocateRequiredRegupThrough(valReg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(rcvrReg, simStackPtr - 3); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << valReg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << rcvrReg, simStackPtr - 3, simNativeStackPtr); popToReg(ssTop(), valReg); ssPop(1); if (((ssTop())->spilled)) { @@ -28817,7 +28830,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(0, ClassReg); @@ -28848,7 +28862,8 @@ genPointerAtPutConstantMaybeContextstoreCheckimmutabilityCheck(sqInt maybeContex # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(0, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -29142,7 +29157,8 @@ genPointerAtPutStoreCheck(sqInt needsStoreCheck) /* Free ReceiverResultReg if it was not free */ rThird1 = ReceiverResultReg; - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); } else { @@ -30856,7 +30872,7 @@ populatewithPICInfoForfirstCacheTag(sqInt tuple, CogMethod *cPIC, sqInt firstCac { sqInt cacheTag; sqInt classOop; - sqInt entryPoint; + usqInt entryPoint; sqInt i; sqInt pc; CogMethod *targetMethod; @@ -31564,7 +31580,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -31822,7 +31838,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -31961,7 +31978,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -32101,7 +32119,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -32138,7 +32157,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -32897,7 +32917,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -32926,7 +32947,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -33053,7 +33075,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -33108,7 +33131,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -33530,16 +33554,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -33611,7 +33639,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -33625,7 +33654,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -33699,7 +33729,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -33745,7 +33776,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -33782,7 +33814,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -34125,11 +34158,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -34649,45 +34685,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -34726,39 +34723,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spursistasrc/vm/cointerp.c b/spursistasrc/vm/cointerp.c index 49a70b3bd4..14d64e6238 100644 --- a/spursistasrc/vm/cointerp.c +++ b/spursistasrc/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -918,7 +918,7 @@ static sqInt NoDbgRegParms changeClassOfto(sqInt rcvr, sqInt argClass); static double NoDbgRegParms dbgFloatValueOf(sqInt oop); static sqInt defaultEdenBytes(void); extern sqInt fetchClassTagOf(sqInt oop); -extern usqInt floatObjectOf(double aFloat); +extern sqInt floatObjectOf(double aFloat); extern double floatValueOf(sqInt oop); static sqInt hasSixtyFourBitImmediates(void); extern sqInt headerIndicatesAlternateBytecodeSet(sqInt methodHeader); @@ -1521,7 +1521,7 @@ extern sqInt methodReturnValue(sqInt oop); extern sqInt methodUsesAlternateBytecodeSet(sqInt aMethodObj); EXPORT(void) moduleUnloaded(char *aModuleName); static char * NoDbgRegParms nameOfClass(sqInt classOop); -static usqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue); +static sqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue); static sqInt NoDbgRegParms noInlineTemporaryin(sqInt offset, char *theFP); static sqInt NoDbgRegParms noInlineTemporaryinput(sqInt offset, char *theFP, sqInt valueOop); static sqInt NoDbgRegParms noMarkedContextsOnPage(StackPage *thePage); @@ -2558,7 +2558,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -17937,12 +17937,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -42342,7 +42350,7 @@ fetchClassTagOf(sqInt oop) } /* Spur32BitMemoryManager>>#floatObjectOf: */ -usqInt +sqInt floatObjectOf(double aFloat) { DECL_MAYBE_SQ_GLOBAL_STRUCT usqInt newFloatObj; @@ -46471,11 +46479,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } @@ -69480,7 +69488,7 @@ nameOfClass(sqInt classOop) code for creating a four byte LargeInteger in one place. */ /* StackInterpreter>>#noInlineSigned32BitIntegerGutsFor: */ -static usqInt NoDbgRegParms NeverInline +static sqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue) { DECL_MAYBE_SQ_GLOBAL_STRUCT sqInt largeClass; diff --git a/spursistasrc/vm/cointerp.h b/spursistasrc/vm/cointerp.h index e8dd48345c..f809f3792d 100644 --- a/spursistasrc/vm/cointerp.h +++ b/spursistasrc/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ @@ -148,7 +148,7 @@ extern usqInt scavengeThresholdAddress(void); extern sqInt withoutForwardingOnandwithsendToCogit(sqInt obj1, sqInt obj2, sqInt aBool, sqInt (*selector)(sqInt,sqInt,sqInt)); extern sqInt byteSwapped(sqInt w); extern sqInt fetchClassTagOf(sqInt oop); -extern usqInt floatObjectOf(double aFloat); +extern sqInt floatObjectOf(double aFloat); extern sqInt headerIndicatesAlternateBytecodeSet(sqInt methodHeader); extern sqInt instantiateClassindexableSize(sqInt classObj, usqInt nElements); extern sqInt isIntegerValue(sqInt intValue); diff --git a/spursistasrc/vm/gcc3x-cointerp.c b/spursistasrc/vm/gcc3x-cointerp.c index e2c542e759..f72c1267c1 100644 --- a/spursistasrc/vm/gcc3x-cointerp.c +++ b/spursistasrc/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -921,7 +921,7 @@ static sqInt NoDbgRegParms changeClassOfto(sqInt rcvr, sqInt argClass); static double NoDbgRegParms dbgFloatValueOf(sqInt oop); static sqInt defaultEdenBytes(void); extern sqInt fetchClassTagOf(sqInt oop); -extern usqInt floatObjectOf(double aFloat); +extern sqInt floatObjectOf(double aFloat); extern double floatValueOf(sqInt oop); static sqInt hasSixtyFourBitImmediates(void); extern sqInt headerIndicatesAlternateBytecodeSet(sqInt methodHeader); @@ -1524,7 +1524,7 @@ extern sqInt methodReturnValue(sqInt oop); extern sqInt methodUsesAlternateBytecodeSet(sqInt aMethodObj); EXPORT(void) moduleUnloaded(char *aModuleName); static char * NoDbgRegParms nameOfClass(sqInt classOop); -static usqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue); +static sqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue); static sqInt NoDbgRegParms noInlineTemporaryin(sqInt offset, char *theFP); static sqInt NoDbgRegParms noInlineTemporaryinput(sqInt offset, char *theFP, sqInt valueOop); static sqInt NoDbgRegParms noMarkedContextsOnPage(StackPage *thePage); @@ -2561,7 +2561,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -17946,12 +17946,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -42351,7 +42359,7 @@ fetchClassTagOf(sqInt oop) } /* Spur32BitMemoryManager>>#floatObjectOf: */ -usqInt +sqInt floatObjectOf(double aFloat) { DECL_MAYBE_SQ_GLOBAL_STRUCT usqInt newFloatObj; @@ -46480,11 +46488,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } @@ -69489,7 +69497,7 @@ nameOfClass(sqInt classOop) code for creating a four byte LargeInteger in one place. */ /* StackInterpreter>>#noInlineSigned32BitIntegerGutsFor: */ -static usqInt NoDbgRegParms NeverInline +static sqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue) { DECL_MAYBE_SQ_GLOBAL_STRUCT sqInt largeClass; diff --git a/spursrc/vm/cogit.h b/spursrc/vm/cogit.h index 08dd7f2e94..e3a3f28dd0 100644 --- a/spursrc/vm/cogit.h +++ b/spursrc/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spursrc/vm/cogitARMv5.c b/spursrc/vm/cogitARMv5.c index e8ee179328..4f30cfe294 100644 --- a/spursrc/vm/cogitARMv5.c +++ b/spursrc/vm/cogitARMv5.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -1228,13 +1228,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -10170,7 +10164,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -10758,6 +10752,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -22484,6 +22481,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -22537,6 +22536,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -24470,7 +24471,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -27383,7 +27384,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27645,7 +27646,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -27909,7 +27910,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -28048,7 +28050,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -28213,7 +28216,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -28250,7 +28254,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29377,7 +29382,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -29406,7 +29412,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -29536,7 +29543,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -29597,7 +29605,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -30038,16 +30047,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -30285,7 +30298,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(gMoveCwR(association, ReceiverResultReg), association); @@ -30302,7 +30316,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30379,7 +30394,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -30428,7 +30444,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -30465,7 +30482,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -30814,11 +30832,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -31335,45 +31356,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -31412,39 +31394,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spursrc/vm/cogitIA32.c b/spursrc/vm/cogitIA32.c index 43227f5bc8..0953a6fdb4 100644 --- a/spursrc/vm/cogitIA32.c +++ b/spursrc/vm/cogitIA32.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -1190,13 +1190,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -10139,7 +10133,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -10727,6 +10721,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -21477,6 +21474,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -21528,6 +21527,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -22675,7 +22676,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -25517,7 +25518,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -25765,7 +25766,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -26023,7 +26024,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -26162,7 +26164,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -26321,7 +26324,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -26358,7 +26362,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -27415,7 +27420,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -27444,7 +27450,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -27571,7 +27578,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -27626,7 +27634,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -28048,16 +28057,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -28289,7 +28302,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -28303,7 +28317,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -28377,7 +28392,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -28423,7 +28439,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -28460,7 +28477,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -28801,11 +28819,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -29322,45 +29343,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -29399,39 +29381,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spursrc/vm/cogitMIPSEL.c b/spursrc/vm/cogitMIPSEL.c index 051047139a..0edaefdc22 100644 --- a/spursrc/vm/cogitMIPSEL.c +++ b/spursrc/vm/cogitMIPSEL.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -585,7 +585,7 @@ static sqInt NoDbgRegParms fillInBlockHeadersAt(sqInt startAddress); static CogMethod * NoDbgRegParms fillInMethodHeadersizeselector(CogMethod *method, sqInt size, sqInt selector); static sqInt NoDbgRegParms findBackwardBranchIsBackwardBranchMcpcBcpcMatchingBcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetBcpc); static usqInt NoDbgRegParms findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc); -static usqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod); extern CogBlockMethod * findMethodForStartBcpcinHomeMethod(sqInt startbcpc, CogMethod *cogMethod); static sqInt NoDbgRegParms findIsBackwardBranchMcpcBcpcMatchingMcpc(BytecodeDescriptor *descriptor, sqInt isBackwardBranchAndAnnotation, char *mcpc, sqInt bcpc, void *targetMcpc); static sqInt NoDbgRegParms firstMappedPCFor(CogMethod *cogMethod); @@ -1336,13 +1336,7 @@ static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); static sqInt NoDbgRegParms squeakV3orSistaV1PushNilSizenumInitialNils(sqInt aMethodObj, sqInt numInitialNils); static sqInt NoDbgRegParms squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nExts, sqInt aMethodObj); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -5092,7 +5086,7 @@ configureMNUCPICmethodOperandnumArgsdelta(CogMethod *cPIC, sqInt methodOperand, static sqInt NoDbgRegParms cPICCompactAndIsNowEmpty(CogMethod *cPIC) { - sqInt entryPoint; + usqInt entryPoint; sqInt followingAddress; sqInt i; sqInt methods[MaxCPICCases]; @@ -5193,7 +5187,7 @@ cPICHasForwardedClass(CogMethod *cPIC) static sqInt NoDbgRegParms cPICHasFreedTargets(CogMethod *cPIC) { - sqInt entryPoint; + usqInt entryPoint; sqInt i; sqInt pc; CogMethod *targetMethod; @@ -5348,7 +5342,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -5654,7 +5648,7 @@ findBlockMethodWithEntrystartBcpc(sqInt blockEntryMcpc, sqInt startBcpc) } /* Cogit>>#findMapLocationForMcpc:inMethod: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms findMapLocationForMcpcinMethod(usqInt targetMcpc, CogMethod *cogMethod) { sqInt annotation; @@ -5934,6 +5928,9 @@ freeUnmarkedMachineCode(void) } } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -8910,7 +8907,7 @@ static void NoDbgRegParms relocateCallsInClosedPIC(CogMethod *cPIC) { sqInt callDelta; - sqInt entryPoint; + usqInt entryPoint; sqInt i; sqInt pc; sqLong refDelta; @@ -20782,6 +20779,8 @@ genStoreWithImmutabilityAndStoreCheckSourceRegslotIndexdestRegscratchRegneedRest /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -20833,6 +20832,8 @@ genStoreWithImmutabilityButNoStoreCheckSourceRegslotIndexdestRegscratchRegneedRe /* begin annotateBytecode: */ abstractInstruction2 = genoperandoperand(Label, (labelCounter += 1), bytecodePC); (abstractInstruction2->annotation = HasBytecodePC); + /* begin voidReceiverOptStatus */ + ((simSelf())->liveRegister = NoReg); if (needRestoreRcvr) { /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); @@ -21984,7 +21985,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -24810,7 +24811,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -25062,7 +25063,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -25320,7 +25321,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -25459,7 +25461,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -25624,7 +25627,8 @@ genExtPushClosureBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -25661,7 +25665,8 @@ genExtPushFullClosureBytecode(void) receiverIsOnStack = byte2 & (1U << 7); ignoreContext = byte2 & (1U << 6); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genCreateFullClosurenumArgsnumCopiedignoreContextcontextNumArgslargeinBlock(compiledBlock, argumentCountOf(compiledBlock), numCopied, ignoreContext, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -26721,7 +26726,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -26750,7 +26756,8 @@ genPushClosureCopyCopiedValuesBytecode(void) /* begin genInlineClosure:numArgs:numCopied: */ assert(getActiveContextAllocatesInMachineCode()); voidReceiverResultRegContainsSelf(); - ssAllocateCallRegandand(ReceiverResultReg, SendNumArgsReg, ClassReg); + /* begin ssAllocateCallReg:and:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << ReceiverResultReg) | (1U << SendNumArgsReg)) | (1U << ClassReg)), simStackPtr, simNativeStackPtr); genNoPopCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); for (i = 1; i <= numCopied; i += 1) { reg = ssStorePoptoPreferredReg(1, TempReg); @@ -26877,7 +26884,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -26932,7 +26940,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -27354,16 +27363,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -27595,7 +27608,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -27609,7 +27623,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -27683,7 +27698,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -27729,7 +27745,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -27766,7 +27783,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -28109,11 +28127,14 @@ marshallSendArguments(sqInt numArgs) Also check for any arg registers in use by other args. */ if (numArgs > 0) { if (numArgs > 1) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 2); - ssAllocateRequiredRegupThrough(Arg1Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 2, simNativeStackPtr); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg1Reg, simStackPtr - 1, simNativeStackPtr); } else { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } } if (numArgs > 1) { @@ -28630,45 +28651,6 @@ squeakV3orSistaV1NumPushNils(BytecodeDescriptor *descriptor, sqInt pc, sqInt nEx : 0))); } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -28707,39 +28689,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/spursrc/vm/cointerp.c b/spursrc/vm/cointerp.c index 7b2aee5164..900e19d795 100644 --- a/spursrc/vm/cointerp.c +++ b/spursrc/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2541,7 +2541,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -15099,12 +15099,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -43449,11 +43457,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } diff --git a/spursrc/vm/cointerp.h b/spursrc/vm/cointerp.h index 7dd77baa8a..eb52ecf84e 100644 --- a/spursrc/vm/cointerp.h +++ b/spursrc/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spursrc/vm/cointerpmt.c b/spursrc/vm/cointerpmt.c index a62f93f046..188445b3d7 100644 --- a/spursrc/vm/cointerpmt.c +++ b/spursrc/vm/cointerpmt.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -1586,7 +1586,7 @@ extern sqInt methodReturnValue(sqInt oop); extern sqInt methodUsesAlternateBytecodeSet(sqInt aMethodObj); EXPORT(void) moduleUnloaded(char *aModuleName); static char * NoDbgRegParms nameOfClass(sqInt classOop); -static usqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue); +static sqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue); static sqInt NoDbgRegParms noInlineTemporaryin(sqInt offset, char *theFP); static sqInt NoDbgRegParms noInlineTemporaryinput(sqInt offset, char *theFP, sqInt valueOop); static sqInt NoDbgRegParms noMarkedContextsOnPage(StackPage *thePage); @@ -2640,7 +2640,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -15566,12 +15566,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -45899,11 +45907,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } @@ -68566,7 +68574,7 @@ nameOfClass(sqInt classOop) code for creating a four byte LargeInteger in one place. */ /* StackInterpreter>>#noInlineSigned32BitIntegerGutsFor: */ -static usqInt NoDbgRegParms NeverInline +static sqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue) { DECL_MAYBE_SQ_GLOBAL_STRUCT sqInt largeClass; diff --git a/spursrc/vm/cointerpmt.h b/spursrc/vm/cointerpmt.h index 8bb4e03ba6..d427a13d2b 100644 --- a/spursrc/vm/cointerpmt.h +++ b/spursrc/vm/cointerpmt.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/spursrc/vm/gcc3x-cointerp.c b/spursrc/vm/gcc3x-cointerp.c index 16020e4f60..1881d0ae5f 100644 --- a/spursrc/vm/gcc3x-cointerp.c +++ b/spursrc/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2544,7 +2544,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[Spur] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -15108,12 +15108,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -43458,11 +43466,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } diff --git a/spursrc/vm/gcc3x-cointerpmt.c b/spursrc/vm/gcc3x-cointerpmt.c index c13a216460..1d19fcf2bf 100644 --- a/spursrc/vm/gcc3x-cointerpmt.c +++ b/spursrc/vm/gcc3x-cointerpmt.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -1589,7 +1589,7 @@ extern sqInt methodReturnValue(sqInt oop); extern sqInt methodUsesAlternateBytecodeSet(sqInt aMethodObj); EXPORT(void) moduleUnloaded(char *aModuleName); static char * NoDbgRegParms nameOfClass(sqInt classOop); -static usqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue); +static sqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue); static sqInt NoDbgRegParms noInlineTemporaryin(sqInt offset, char *theFP); static sqInt NoDbgRegParms noInlineTemporaryinput(sqInt offset, char *theFP, sqInt valueOop); static sqInt NoDbgRegParms noMarkedContextsOnPage(StackPage *thePage); @@ -2643,7 +2643,7 @@ static signed char primitiveAccessorDepthTable[MaxPrimitiveIndex + 2 /* 577 */] }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -15575,12 +15575,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void @@ -45908,11 +45916,11 @@ becomewithtwoWaycopyHash(sqInt array1, sqInt array2, sqInt twoWayFlag, sqInt cop sp = longAt((array1 + BaseHeaderSize) + (((int)((usqInt)(StackPointerIndex) << (shiftForWord()))))); if (!((sp & 1))) { contextSize = 0; - goto l27; + goto l28; } assert((ReceiverIndex + ((sp >> 1))) < (lengthOf(array1))); contextSize = (sp >> 1); - l27: /* end fetchStackPointerOf: */; + l28: /* end fetchStackPointerOf: */; fieldOffset = (((CtxtTempFrameStart - 1) + contextSize) * BytesPerOop) + BaseHeaderSize; goto l23; } @@ -68575,7 +68583,7 @@ nameOfClass(sqInt classOop) code for creating a four byte LargeInteger in one place. */ /* StackInterpreter>>#noInlineSigned32BitIntegerGutsFor: */ -static usqInt NoDbgRegParms NeverInline +static sqInt NoDbgRegParms NeverInline noInlineSigned32BitIntegerGutsFor(sqInt integerValue) { DECL_MAYBE_SQ_GLOBAL_STRUCT sqInt largeClass; diff --git a/src/vm/cogit.h b/src/vm/cogit.h index cdbe50e162..40baddebfe 100644 --- a/src/vm/cogit.h +++ b/src/vm/cogit.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/src/vm/cogitARMv5.c b/src/vm/cogitARMv5.c index 10ccd4b4d1..79a52272eb 100644 --- a/src/vm/cogitARMv5.c +++ b/src/vm/cogitARMv5.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -701,7 +701,7 @@ static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveMwrR(sqInt offset, sqInt baseReg, sqInt destReg); -static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1113,13 +1113,7 @@ static sqInt receiverIsInReceiverResultReg(void); static void NoDbgRegParms reinitializeFixupsFromthrough(sqInt start, sqInt end); static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -6966,7 +6960,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - usqInt end; + sqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -9532,7 +9526,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -9915,6 +9909,9 @@ fixupAt(sqInt fixupPC) return fixupAtIndex(fixupPC - initialPC); } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -11383,7 +11380,7 @@ gMoveMwrR(sqInt offset, sqInt baseReg, sqInt destReg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static usqInt NoDbgRegParms +static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -19152,7 +19149,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -21419,7 +21416,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -21558,7 +21555,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -21819,7 +21816,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -21958,7 +21956,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -22874,7 +22873,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallReg(ReceiverResultReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << ReceiverResultReg), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -22912,7 +22912,8 @@ genPushClosureCopyCopiedValuesBytecode(void) } } voidReceiverResultRegContainsSelf(); - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); genCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); if (numCopied > 0) { ssPop(numCopied); @@ -22989,7 +22990,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -23050,7 +23052,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -23494,16 +23497,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -23741,7 +23748,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(gMoveCwR(association, ReceiverResultReg), association); @@ -23758,7 +23766,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -23812,7 +23821,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -23861,7 +23871,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -23898,7 +23909,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -24462,7 +24474,8 @@ marshallSendArguments(sqInt numArgs) so last to first so e.g. previous contents don't get overwritten. Also check for any arg registers in use by other args. */ if (numArgs > 0) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } if (numArgs > 0) { popToReg(simStackAt((simStackPtr - numArgs) + 1), Arg0Reg); @@ -24948,45 +24961,6 @@ scanMethod(void) return numBlocks; } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -25025,39 +24999,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/src/vm/cogitIA32.c b/src/vm/cogitIA32.c index 6392966483..396ab570bc 100644 --- a/src/vm/cogitIA32.c +++ b/src/vm/cogitIA32.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -1076,13 +1076,7 @@ static sqInt receiverIsInReceiverResultReg(void); static void NoDbgRegParms reinitializeFixupsFromthrough(sqInt start, sqInt end); static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -9524,7 +9518,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -9907,6 +9901,9 @@ fixupAt(sqInt fixupPC) return fixupAtIndex(fixupPC - initialPC); } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -17999,7 +17996,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -20199,7 +20196,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -20333,7 +20330,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -20593,7 +20590,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -20732,7 +20730,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -21585,7 +21584,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallReg(ReceiverResultReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << ReceiverResultReg), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -21623,7 +21623,8 @@ genPushClosureCopyCopiedValuesBytecode(void) } } voidReceiverResultRegContainsSelf(); - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); genCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); if (numCopied > 0) { ssPop(numCopied); @@ -21697,7 +21698,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -21752,7 +21754,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -22177,16 +22180,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -22418,7 +22425,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -22432,7 +22440,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -22486,7 +22495,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -22532,7 +22542,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -22569,7 +22580,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -23118,7 +23130,8 @@ marshallSendArguments(sqInt numArgs) so last to first so e.g. previous contents don't get overwritten. Also check for any arg registers in use by other args. */ if (numArgs > 0) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } if (numArgs > 0) { popToReg(simStackAt((simStackPtr - numArgs) + 1), Arg0Reg); @@ -23604,45 +23617,6 @@ scanMethod(void) return numBlocks; } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -23681,39 +23655,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/src/vm/cogitMIPSEL.c b/src/vm/cogitMIPSEL.c index 6f8166dc52..a98b234d34 100644 --- a/src/vm/cogitMIPSEL.c +++ b/src/vm/cogitMIPSEL.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGenerator VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGenerator VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "StackToRegisterMappingCogit VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__cogitBuildInfo = __buildInfo; @@ -633,7 +633,7 @@ static void NoDbgRegParms loadSubsequentBytesForDescriptorat(BytecodeDescriptor static AbstractInstruction * NoDbgRegParms gMoveAwR(sqInt address, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveCwR(sqInt wordConstant, sqInt reg); static AbstractInstruction * NoDbgRegParms gMoveMwrR(sqInt offset, sqInt baseReg, sqInt destReg); -static sqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); +static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod); static sqInt NoDbgRegParms mapForperformUntilarg(CogMethod *cogMethod, sqInt (*functionSymbol)(sqInt annotation, char *mcpc, sqInt arg), sqInt arg); static sqInt NoDbgRegParms mapObjectReferencesInClosedPIC(CogMethod *cPIC); static void mapObjectReferencesInGeneratedRuntime(void); @@ -1220,13 +1220,7 @@ static sqInt receiverIsInReceiverResultReg(void); static void NoDbgRegParms reinitializeFixupsFromthrough(sqInt start, sqInt end); static sqInt NoDbgRegParms scanBlock(BlockStart *blockStart); static sqInt scanMethod(void); -static void NoDbgRegParms ssAllocateCallReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3); static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr); -static void NoDbgRegParms ssAllocateRequiredReg(sqInt requiredReg); -static void NoDbgRegParms ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2); -static void NoDbgRegParms ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr); static void NoDbgRegParms ssFlushUpThroughReceiverVariable(sqInt slotIndex); static void NoDbgRegParms ssFlushUpThroughTemporaryVariable(sqInt tempIndex); static void NoDbgRegParms ssPop(sqInt n); @@ -2227,7 +2221,7 @@ static sqInt NoDbgRegParms blockDispatchTargetsForperformarg(CogMethod *cogMethod, usqInt (*binaryFunction)(sqInt mcpc, sqInt arg), sqInt arg) { sqInt blockEntry; - sqInt end; + usqInt end; sqInt pc; sqInt result; usqInt targetpc; @@ -4725,7 +4719,7 @@ static sqInt NoDbgRegParms deltaToSkipPrimAndErrorStoreInheader(sqInt aMethodObj, sqInt aMethodHeader) { return (((primitiveIndexOfMethodheader(aMethodObj, aMethodHeader)) > 0) - && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) + && ((longStoreBytecodeForHeader(aMethodHeader)) == (fetchByteofObject((startPCOfMethod(aMethodObj)) + (sizeOfCallPrimitiveBytecode(aMethodHeader)), aMethodObj))) ? (sizeOfCallPrimitiveBytecode(aMethodHeader)) + (sizeOfLongStoreTempBytecode(aMethodHeader)) : 0); } @@ -5106,6 +5100,9 @@ fixupAt(sqInt fixupPC) return fixupAtIndex(fixupPC - initialPC); } + +/* Call ceSendMustBeBooleanTo: via the relevant trampoline. */ + /* Cogit>>#genCallMustBeBooleanFor: */ static AbstractInstruction * NoDbgRegParms genCallMustBeBooleanFor(sqInt boolean) @@ -6509,7 +6506,7 @@ gMoveMwrR(sqInt offset, sqInt baseReg, sqInt destReg) /* Answer the address of the null byte at the end of the method map. */ /* Cogit>>#mapEndFor: */ -static sqInt NoDbgRegParms +static usqInt NoDbgRegParms mapEndFor(CogMethod *cogMethod) { usqInt end; @@ -17269,7 +17266,7 @@ compilePrimitive(void) } if ((code == CompletePrimitive) && (!(((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))))) { return 0; } if (code == UnimplementedPrimitive) { @@ -19453,7 +19450,7 @@ compileFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -19589,7 +19586,7 @@ compileTwoPathFrameBuild(void) genoperand(PushR, SendNumArgsReg); } if (((primitiveIndexOfMethodheader(methodObj, methodHeader)) > 0) - && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject(initialPC + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { + && ((longStoreBytecodeForHeader(methodHeader)) == (fetchByteofObject((startPCOfMethod(methodObj)) + (sizeOfCallPrimitiveBytecode(methodHeader)), methodObj)))) { compileGetErrorCode(); } /* begin MoveAw:R: */ @@ -19849,7 +19846,8 @@ ensureReceiverResultRegContainsSelf(void) { if (needsFrame) { if (!((((simSelf())->liveRegister)) == ReceiverResultReg)) { - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin putSelfInReceiverResultReg */ storeToReg(simSelf(), ReceiverResultReg); ((simSelf())->liveRegister = ReceiverResultReg); @@ -19988,7 +19986,8 @@ freeAnyRegNotConflictingWith(sqInt regMask) index += 1; } assert(!((reg == NoReg))); - ssAllocateRequiredReg(reg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << reg, simStackPtr, simNativeStackPtr); return reg; } @@ -20850,7 +20849,8 @@ genPushActiveContextBytecode(void) { assert(needsFrame); voidReceiverResultRegContainsSelf(); - ssAllocateCallReg(ReceiverResultReg); + /* begin ssAllocateCallReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << ReceiverResultReg), simStackPtr, simNativeStackPtr); genGetActiveContextNumArgslargeinBlock(methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); return ssPushRegister(ReceiverResultReg); } @@ -20888,7 +20888,8 @@ genPushClosureCopyCopiedValuesBytecode(void) } } voidReceiverResultRegContainsSelf(); - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); genCreateClosureAtnumArgsnumCopiedcontextNumArgslargeinBlock(startpc + 1, numArgs, numCopied, methodOrBlockNumArgs, methodNeedsLargeContext(methodObj), inBlock); if (numCopied > 0) { ssPop(numCopied); @@ -20962,7 +20963,8 @@ genPushMaybeContextReceiverVariable(sqInt slotIndex) AbstractInstruction *jmpDone; AbstractInstruction *jmpSingle; - ssAllocateCallRegand(ReceiverResultReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ReceiverResultReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ensureReceiverResultRegContainsSelf(); /* begin genPushMaybeContextSlotIndex: */ assert(needsFrame); @@ -21017,7 +21019,8 @@ genPushNewArrayBytecode(void) } } else { - ssAllocateCallRegand(SendNumArgsReg, ReceiverResultReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << SendNumArgsReg) | (1U << ReceiverResultReg)), simStackPtr, simNativeStackPtr); } size = byte1 & 0x7F; if (!popValues) { @@ -21442,16 +21445,20 @@ genSpecialSelectorArithmetic(void) static sqInt genSpecialSelectorClass(void) { + sqInt requiredReg1; sqInt topReg; topReg = registerOrNone(ssTop()); ssPop(1); if ((topReg == NoReg) || (topReg == ClassReg)) { - ssAllocateRequiredRegand((topReg = SendNumArgsReg), ClassReg); + /* begin ssAllocateRequiredReg:and: */ + requiredReg1 = (topReg = SendNumArgsReg); + ssAllocateRequiredRegMaskupThroughupThroughNative((1U << requiredReg1) | (1U << ClassReg), simStackPtr, simNativeStackPtr); } else { - ssAllocateRequiredReg(ClassReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr, simNativeStackPtr); } ssPush(1); popToReg(ssTop(), topReg); @@ -21683,7 +21690,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean /* begin genLoadLiteralVariable:in: */ association = getLiteral(litVarIndex); voidReceiverResultRegContainsSelf(); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); /* begin genMoveConstant:R: */ if (shouldAnnotateObjectReference(association)) { annotateobjRef(checkLiteralforInstruction(association, genoperandoperand(MoveCwR, association, ReceiverResultReg)), association); @@ -21697,7 +21705,8 @@ genStorePopLiteralVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolean # if IMMUTABILITY if (needsImmCheck) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -21751,7 +21760,8 @@ genStorePopMaybeContextReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqI } # endif /* IMMUTABILITY */ ssPop(1); - ssAllocateCallRegand(ClassReg, SendNumArgsReg); + /* begin ssAllocateCallReg:and: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << ClassReg) | (1U << SendNumArgsReg)), simStackPtr, simNativeStackPtr); ssPush(1); genLoadSlotsourceRegdestReg(SenderIndex, ReceiverResultReg, TempReg); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); @@ -21797,7 +21807,8 @@ genStorePopReceiverVariableneedsStoreCheckneedsImmutabilityCheck(sqInt popBoolea # if IMMUTABILITY if (needsImmCheck1) { - ssAllocateRequiredRegupThrough(ClassReg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ClassReg, simStackPtr - 1, simNativeStackPtr); ssStoreAndReplacePoptoReg(popBoolean, ClassReg); /* begin ssFlushTo: */ assert(tempsValidAndVolatileEntriesSpilled()); @@ -21834,7 +21845,8 @@ genStorePopRemoteTempAtneedsStoreCheck(sqInt popBoolean, sqInt slotIndex, sqInt sqInt topReg; assert(needsFrame); - ssAllocateRequiredReg(ReceiverResultReg); + /* begin ssAllocateRequiredReg: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << ReceiverResultReg, simStackPtr, simNativeStackPtr); voidReceiverResultRegContainsSelf(); /* begin MoveMw:r:R: */ offset = frameOffsetOfTemporary(remoteTempIndex); @@ -22385,7 +22397,8 @@ marshallSendArguments(sqInt numArgs) so last to first so e.g. previous contents don't get overwritten. Also check for any arg registers in use by other args. */ if (numArgs > 0) { - ssAllocateRequiredRegupThrough(Arg0Reg, simStackPtr - 1); + /* begin ssAllocateRequiredReg:upThrough: */ + ssAllocateRequiredRegMaskupThroughupThroughNative(1U << Arg0Reg, simStackPtr - 1, simNativeStackPtr); } if (numArgs > 0) { popToReg(simStackAt((simStackPtr - numArgs) + 1), Arg0Reg); @@ -22871,45 +22884,6 @@ scanMethod(void) return numBlocks; } - -/* Allocate a register needed in a run-time call (i.e. flush uses of the - register to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg: */ -static void NoDbgRegParms -ssAllocateCallReg(sqInt requiredReg) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (1U << requiredReg), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and: */ -static void NoDbgRegParms -ssAllocateCallRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | ((1U << requiredReg1) | (1U << requiredReg2)), simStackPtr, simNativeStackPtr); -} - - -/* Allocate registers needed in a run-time call (i.e. flush uses of the - registers to the real stack). Since the run-time can smash any and - all caller-saved registers also flush all caller-saved registers. */ - - /* StackToRegisterMappingCogit>>#ssAllocateCallReg:and:and: */ -static void NoDbgRegParms -ssAllocateCallRegandand(sqInt requiredReg1, sqInt requiredReg2, sqInt requiredReg3) -{ - /* begin ssAllocateRequiredRegMask:upThrough: */ - ssAllocateRequiredRegMaskupThroughupThroughNative(CallerSavedRegisterMask | (((1U << requiredReg1) | (1U << requiredReg2)) | (1U << requiredReg3)), simStackPtr, simNativeStackPtr); -} - /* StackToRegisterMappingCogit>>#ssAllocateRequiredRegMask:upThrough:upThroughNative: */ static void NoDbgRegParms ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt stackPtr, sqInt nativeStackPtr) @@ -22948,39 +22922,6 @@ ssAllocateRequiredRegMaskupThroughupThroughNative(sqInt requiredRegsMask, sqInt } } - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg: */ -static void NoDbgRegParms -ssAllocateRequiredReg(sqInt requiredReg) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:and: */ -static void NoDbgRegParms -ssAllocateRequiredRegand(sqInt requiredReg1, sqInt requiredReg2) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = (1U << requiredReg1) | (1U << requiredReg2); - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, simStackPtr, simNativeStackPtr); -} - - /* StackToRegisterMappingCogit>>#ssAllocateRequiredReg:upThrough: */ -static void NoDbgRegParms -ssAllocateRequiredRegupThrough(sqInt requiredReg, sqInt stackPtr) -{ - sqInt requiredRegsMask; - - /* begin ssAllocateRequiredRegMask:upThrough: */ - requiredRegsMask = 1U << requiredReg; - ssAllocateRequiredRegMaskupThroughupThroughNative(requiredRegsMask, stackPtr, simNativeStackPtr); -} - /* Any occurrences on the stack of the value being stored (which is the top of stack) diff --git a/src/vm/cointerp.c b/src/vm/cointerp.c index 73a3f8b78c..76a0cd778a 100644 --- a/src/vm/cointerp.c +++ b/src/vm/cointerp.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2185,7 +2185,7 @@ static void (*primitiveTable[MaxPrimitiveIndex + 2 /* 577 */])(void) = { 0 }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[SqueakV3] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[SqueakV3] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -5095,7 +5095,7 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } /* second probe */ @@ -5105,7 +5105,7 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } probe = (((usqInt) hash) >> 2) & MethodCacheMask; if (((GIV(methodCache)[probe + MethodCacheSelector]) == GIV(messageSelector)) @@ -5113,10 +5113,10 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } ok = 0; - l350: /* end inlineLookupInMethodCacheSel:classTag: */; + l352: /* end inlineLookupInMethodCacheSel:classTag: */; if (ok) { /* begin ifAppropriateCompileToNativeCode:selector: */ aMethodObj = GIV(newMethod); @@ -10618,12 +10618,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/src/vm/cointerp.h b/src/vm/cointerp.h index 000eb09967..713aa8fc09 100644 --- a/src/vm/cointerp.h +++ b/src/vm/cointerp.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/src/vm/cointerpmt.c b/src/vm/cointerpmt.c index e01ba37b7f..f2131686f8 100644 --- a/src/vm/cointerpmt.c +++ b/src/vm/cointerpmt.c @@ -1,9 +1,9 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2283,7 +2283,7 @@ static void (*primitiveTable[MaxPrimitiveIndex + 2 /* 577 */])(void) = { 0 }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -5194,7 +5194,7 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } /* second probe */ @@ -5204,7 +5204,7 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } probe = (((usqInt) hash) >> 2) & MethodCacheMask; if (((GIV(methodCache)[probe + MethodCacheSelector]) == GIV(messageSelector)) @@ -5212,10 +5212,10 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } ok = 0; - l350: /* end inlineLookupInMethodCacheSel:classTag: */; + l352: /* end inlineLookupInMethodCacheSel:classTag: */; if (ok) { /* begin ifAppropriateCompileToNativeCode:selector: */ aMethodObj = GIV(newMethod); @@ -11084,12 +11084,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/src/vm/cointerpmt.h b/src/vm/cointerpmt.h index 5969c92901..a4c6d3e60a 100644 --- a/src/vm/cointerpmt.h +++ b/src/vm/cointerpmt.h @@ -1,5 +1,5 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ diff --git a/src/vm/gcc3x-cointerp.c b/src/vm/gcc3x-cointerp.c index fb4afade46..ddcd7350d8 100644 --- a/src/vm/gcc3x-cointerp.c +++ b/src/vm/gcc3x-cointerp.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreter VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2188,7 +2188,7 @@ static void (*primitiveTable[MaxPrimitiveIndex + 2 /* 577 */])(void) = { 0 }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog[SqueakV3] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog[SqueakV3] VM [CoInterpreterPrimitives VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -5104,7 +5104,7 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } /* second probe */ @@ -5114,7 +5114,7 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } probe = (((usqInt) hash) >> 2) & MethodCacheMask; if (((GIV(methodCache)[probe + MethodCacheSelector]) == GIV(messageSelector)) @@ -5122,10 +5122,10 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } ok = 0; - l350: /* end inlineLookupInMethodCacheSel:classTag: */; + l352: /* end inlineLookupInMethodCacheSel:classTag: */; if (ok) { /* begin ifAppropriateCompileToNativeCode:selector: */ aMethodObj = GIV(newMethod); @@ -10627,12 +10627,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void diff --git a/src/vm/gcc3x-cointerpmt.c b/src/vm/gcc3x-cointerpmt.c index 3c63d11af6..e850be1207 100644 --- a/src/vm/gcc3x-cointerpmt.c +++ b/src/vm/gcc3x-cointerpmt.c @@ -2,11 +2,11 @@ /* Automatically generated by - CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CCodeGeneratorGlobalStructure VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 from - CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 + CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 */ -static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2420 uuid: f303796f-283f-4d4c-a910-bf205a0b4600 " __DATE__ ; +static char __buildInfo[] = "CoInterpreterMT VMMaker.oscog-eem.2424 uuid: 84c4eaef-c4f2-4d8e-85d8-01b8aebfa554 " __DATE__ ; char *__interpBuildInfo = __buildInfo; @@ -2286,7 +2286,7 @@ static void (*primitiveTable[MaxPrimitiveIndex + 2 /* 577 */])(void) = { 0 }; sqInt checkedPluginName; char expensiveAsserts = 0; -const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2420]"; +const char *interpreterVersion = "Croquet Closure Cog MT VM [CoInterpreterMT VMMaker.oscog-eem.2424]"; sqInt minBackwardJumpCountForCompile = MinBackwardJumpCountForCompile /* 40 */; volatile int sendTrace; @@ -5203,7 +5203,7 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } /* second probe */ @@ -5213,7 +5213,7 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } probe = (((usqInt) hash) >> 2) & MethodCacheMask; if (((GIV(methodCache)[probe + MethodCacheSelector]) == GIV(messageSelector)) @@ -5221,10 +5221,10 @@ longAt((GIV(method) + BaseHeaderSize) + (((sqInt)((usqInt)((variableIndex + Lite GIV(newMethod) = GIV(methodCache)[probe + MethodCacheMethod]; primitiveFunctionPointer = ((void (*)()) (GIV(methodCache)[probe + MethodCachePrimFunction])); ok = 1; - goto l350; + goto l352; } ok = 0; - l350: /* end inlineLookupInMethodCacheSel:classTag: */; + l352: /* end inlineLookupInMethodCacheSel:classTag: */; if (ok) { /* begin ifAppropriateCompileToNativeCode:selector: */ aMethodObj = GIV(newMethod); @@ -11093,12 +11093,20 @@ ceSendFromInLineCacheMiss(CogMethod *cogMethodOrPIC) } -/* For RegisterAllocatingCogit we want the address following a conditional - branch not to be reachable, so we - don't have to generate code to reload registers. Instead simply convert to - an interpreter frame, - backup the pc to the branch, reenter the interpreter and hence retry the - mustBeBoolean send therein. */ +/* For RegisterAllocatingCogit we want the pc following a conditional branch + not to be reachable, so + we don't have to generate code to reload registers. But notionally the pc + following a conditional + branch is reached when continuing from a mustBeBoolean error. Instead of + supporting this in the + JIT, simply convert to an interpreter frame, backup the pc to the branch, + reenter the interpreter + and hence retry the mustBeBoolean send therein. N.B. We could do this for + immutability violations + too, but immutability is used in actual applications and so should be + performant, whereas + mustBeBoolean errors are extremely rare and so we choose brevity over + performance in this case. */ /* CoInterpreter>>#ceSendMustBeBooleanTo:interpretingAtDelta: */ void