Skip to content
This repository has been archived by the owner on Nov 1, 2020. It is now read-only.

Commit

Permalink
Typos (#5441)
Browse files Browse the repository at this point in the history
* crtitical -> critical

* elligible -> eligible

* emory -> memory

* enquing -> enqueuing

* enumarate -> enumerate

* eqivalent -> equivalent

* execeptions -> exceptions

* exectuing -> executing

* forceably -> forcibly

* fragmentatioin -> fragmentation

* fragramented -> fragmented

* geneated -> generated

* generaion -> generation

* handes -> handles

* higest -> highest
  • Loading branch information
github-john-doe authored and jkotas committed Feb 24, 2018
1 parent 0ea2734 commit b65389a
Show file tree
Hide file tree
Showing 7 changed files with 23 additions and 23 deletions.
4 changes: 2 additions & 2 deletions src/Native/Runtime/StackFrameIterator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1870,7 +1870,7 @@ COOP_PINVOKE_HELPER(Boolean, RhpSfiInit, (StackFrameIterator* pThis, PAL_LIMITED
// The stackwalker is intolerant to hijacked threads, as it is largely expecting to be called from C++
// where the hijack state of the thread is invariant. Because we've exposed the iterator out to C#, we
// need to unhijack every time we callback into C++ because the thread could have been hijacked during our
// time exectuing C#.
// time executing C#.
pCurThread->Unhijack();

// Passing NULL is a special-case to request a standard managed stack trace for the current thread.
Expand All @@ -1890,7 +1890,7 @@ COOP_PINVOKE_HELPER(Boolean, RhpSfiNext, (StackFrameIterator* pThis, UInt32* puE
// The stackwalker is intolerant to hijacked threads, as it is largely expecting to be called from C++
// where the hijack state of the thread is invariant. Because we've exposed the iterator out to C#, we
// need to unhijack every time we callback into C++ because the thread could have been hijacked during our
// time exectuing C#.
// time executing C#.
ThreadStore::GetCurrentThread()->Unhijack();

const UInt32 MaxTryRegionIdx = 0xFFFFFFFF;
Expand Down
16 changes: 8 additions & 8 deletions src/Native/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3065,7 +3065,7 @@ gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number)
}

// DTREVIEW: Right now we only estimate gen2 fragmentation.
// on 64-bit though we should consider gen1 or even gen0 fragmentatioin as
// on 64-bit though we should consider gen1 or even gen0 fragmentation as
// well
inline BOOL
gc_heap::dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem)
Expand Down Expand Up @@ -7065,7 +7065,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start,
if ((la != saved_g_lowest_address ) || (ha != saved_g_highest_address))
{
{
//modify the higest address so the span covered
//modify the highest address so the span covered
//is twice the previous one.
uint8_t* top = (uint8_t*)0 + Align (GCToOSInterface::GetVirtualMemoryLimit());
// On non-Windows systems, we get only an approximate value that can possibly be
Expand Down Expand Up @@ -14703,7 +14703,7 @@ int gc_heap::generation_to_condemn (int n_initial,
}
}

//figure out which ephemeral generation is too fragramented
//figure out which ephemeral generation is too fragmented
temp_gen = n;
for (i = n+1; i < max_generation; i++)
{
Expand Down Expand Up @@ -17404,7 +17404,7 @@ void gc_heap::enque_pinned_plug (uint8_t* plug,
}
}

dprintf (3, ("enquing P #%Id(%Ix): %Ix. oldest: %Id, LO: %Ix, pre: %d",
dprintf (3, ("enqueuing P #%Id(%Ix): %Ix. oldest: %Id, LO: %Ix, pre: %d",
mark_stack_tos, &mark_stack_array[mark_stack_tos], plug, mark_stack_bos, last_object_in_last_plug, (save_pre_plug_info_p ? 1 : 0)));
mark& m = mark_stack_array[mark_stack_tos];
m.first = plug;
Expand Down Expand Up @@ -29865,7 +29865,7 @@ size_t gc_heap::joined_youngest_desired (size_t new_allocation)
{
uint32_t memory_load = 0;
get_memory_info (&memory_load);
dprintf (2, ("Current emory load: %d", memory_load));
dprintf (2, ("Current memory load: %d", memory_load));

size_t final_total =
trim_youngest_desired (memory_load, total_new_allocation, total_min_allocation);
Expand Down Expand Up @@ -35262,7 +35262,7 @@ GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
size_t GCHeap::GetTotalBytesInUse ()
{
#ifdef MULTIPLE_HEAPS
//enumarate all the heaps and get their size.
//enumerate all the heaps and get their size.
size_t tot_size = 0;
for (int i = 0; i < gc_heap::n_heaps; i++)
{
Expand Down Expand Up @@ -35581,7 +35581,7 @@ HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters)
counters->promoted_size = 0;
counters->collection_count = 0;

//enumarate all the heaps and get their counters.
//enumerate all the heaps and get their counters.
for (int i = 0; i < gc_heap::n_heaps; i++)
{
dynamic_data* dd = gc_heap::g_heaps [i]->dynamic_data_of (gen);
Expand Down Expand Up @@ -35674,7 +35674,7 @@ Object* GCHeap::GetNextFinalizableObject()
if (O)
return O;
}
//return the first non crtitical/critical one in the first queue.
//return the first non critical/critical one in the first queue.
for (int hn = 0; hn < gc_heap::n_heaps; hn++)
{
gc_heap* hp = gc_heap::g_heaps [hn];
Expand Down
2 changes: 1 addition & 1 deletion src/Native/gc/handletable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1056,7 +1056,7 @@ void HndScanHandlesForGC(HHANDLETABLE hTable, HANDLESCANPROC scanProc, uintptr_t
/*
* HndResetAgeMap
*
* Service to forceably reset the age map for a set of handles.
* Service to forcibly reset the age map for a set of handles.
*
* Provided for GC-time resetting the handle table's write barrier. This is not
* normally advisable, as it increases the amount of work that will be done in
Expand Down
14 changes: 7 additions & 7 deletions src/Native/gc/handletablecore.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ void QuickSort(uintptr_t *pData, int left, int right, PFNCOMPARE pfnCompare)
*
* Returns:
* <0 - handle P should be freed before handle Q
* =0 - handles are eqivalent for free order purposes
* =0 - handles are equivalent for free order purposes
* >0 - handle Q should be freed before handle P
*
*/
Expand Down Expand Up @@ -1869,7 +1869,7 @@ void SegmentTrimExcessPages(TableSegment *pSegment)
/*
* BlockAllocHandlesInMask
*
* Attempts to allocate the requested number of handes of the specified type,
* Attempts to allocate the requested number of handles of the specified type,
* from the specified mask of the specified handle block.
*
* Returns the number of available handles actually allocated.
Expand Down Expand Up @@ -2029,7 +2029,7 @@ uint32_t BlockAllocHandlesInitial(TableSegment *pSegment, uint32_t uType, uint32
/*
* BlockAllocHandles
*
* Attempts to allocate the requested number of handes of the specified type,
* Attempts to allocate the requested number of handles of the specified type,
* from the specified handle block.
*
* Returns the number of available handles actually allocated.
Expand Down Expand Up @@ -2087,7 +2087,7 @@ uint32_t BlockAllocHandles(TableSegment *pSegment, uint32_t uBlock, OBJECTHANDLE
/*
* SegmentAllocHandlesFromTypeChain
*
* Attempts to allocate the requested number of handes of the specified type,
* Attempts to allocate the requested number of handles of the specified type,
* from the specified segment's block chain for the specified type. This routine
* ONLY scavenges existing blocks in the type chain. No new blocks are committed.
*
Expand Down Expand Up @@ -2171,7 +2171,7 @@ uint32_t SegmentAllocHandlesFromTypeChain(TableSegment *pSegment, uint32_t uType
/*
* SegmentAllocHandlesFromFreeList
*
* Attempts to allocate the requested number of handes of the specified type,
* Attempts to allocate the requested number of handles of the specified type,
* by committing blocks from the free list to that type's type chain.
*
* Returns the number of available handles actually allocated.
Expand Down Expand Up @@ -2230,7 +2230,7 @@ uint32_t SegmentAllocHandlesFromFreeList(TableSegment *pSegment, uint32_t uType,
/*
* SegmentAllocHandles
*
* Attempts to allocate the requested number of handes of the specified type,
* Attempts to allocate the requested number of handles of the specified type,
* from the specified segment.
*
* Returns the number of available handles actually allocated.
Expand Down Expand Up @@ -2268,7 +2268,7 @@ uint32_t SegmentAllocHandles(TableSegment *pSegment, uint32_t uType, OBJECTHANDL
/*
* TableAllocBulkHandles
*
* Attempts to allocate the requested number of handes of the specified type.
* Attempts to allocate the requested number of handles of the specified type.
*
* Returns the number of handles that were actually allocated. This is always
* the same as the number of handles requested except in out-of-memory conditions,
Expand Down
6 changes: 3 additions & 3 deletions src/Native/gc/handletablescan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ If you change any of those algorithm, please verify it by this program:
assert (mask == 0);
return;
}
//any generaion bigger than 2 is actually 2
//any generation bigger than 2 is actually 2
if (gen > 2)
gen = 2;
Expand Down Expand Up @@ -716,10 +716,10 @@ void CALLBACK BlockScanBlocksEphemeral(PTR_TableSegment pSegment, uint32_t uBloc
uint32_t *pdwGen = (uint32_t *)pSegment->rgGeneration + uBlock;
uint32_t *pdwGenLast = pdwGen + uCount;

// loop over all the blocks, checking for elligible clumps as we go
// loop over all the blocks, checking for eligible clumps as we go
do
{
// determine if any clumps in this block are elligible
// determine if any clumps in this block are eligible
uint32_t dwClumpMask = COMPUTE_CLUMP_MASK(*pdwGen, dwAgeMask);

// if there are any clumps to scan then scan them now
Expand Down
2 changes: 1 addition & 1 deletion src/Native/libunwind/src/Unwind-EHABI.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@ _LIBUNWIND_EXPORT void _Unwind_Complete(_Unwind_Exception* exception_object) {
/// may force a jump to a landing pad in that function, the landing
/// pad code may then call _Unwind_Resume() to continue with the
/// unwinding. Note: the call to _Unwind_Resume() is from compiler
/// geneated user code. All other _Unwind_* routines are called
/// generated user code. All other _Unwind_* routines are called
/// by the C++ runtime __cxa_* routines.
///
/// Note: re-throwing an exception (as opposed to continuing the unwind)
Expand Down
2 changes: 1 addition & 1 deletion src/Native/libunwind/src/Unwind_AppleExtras.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ bool checkKeyMgrRegisteredFDEs(uintptr_t pc, void *&fde) {
_Unwind_FunctionContext *fc_ = nullptr;
#endif

// Accessors to get get/set linked list of frames for sjlj based execeptions.
// Accessors to get get/set linked list of frames for sjlj based exceptions.
_LIBUNWIND_HIDDEN
struct _Unwind_FunctionContext *__Unwind_SjLj_GetTopOfFunctionStack() {
#ifndef _LIBUNWIND_HAS_NO_THREADS
Expand Down

0 comments on commit b65389a

Please sign in to comment.