diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..1680c89 --- /dev/null +++ b/.clang-format @@ -0,0 +1,32 @@ +#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#- +Language : Cpp +BasedOnStyle : Google +Standard : Auto +#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#- +AccessModifierOffset : -4 +AlignTrailingComments : true +AllowAllParametersOfDeclarationOnNextLine : false +AllowShortBlocksOnASingleLine : true +AllowShortFunctionsOnASingleLine : true +AllowShortIfStatementsOnASingleLine : false +AllowShortLoopsOnASingleLine : false +BinPackParameters : false +BreakBeforeBraces : Allman +BreakBeforeTernaryOperators : false +BreakConstructorInitializersBeforeComma : true +ColumnLimit : 120 +Cpp11BracedListStyle : true +DerivePointerAlignment : true +DerivePointerBinding : false +IndentWidth : 4 +KeepEmptyLinesAtTheStartOfBlocks : true +MaxEmptyLinesToKeep : 2 +NamespaceIndentation : All +PointerBindsToType : true +SpacesBeforeTrailingComments : 1 +SpacesInAngles : false +SpacesInSquareBrackets : false +TabWidth : 4 +UseTab : ForIndentation +#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#- +#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#- diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..5e47b8d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,6 @@ +# Auto detect text files and perform LF normalization +# http://git-scm.com/docs/gitattributes +* text=auto +.appveyor.yml -text eol=crlf +.appveyor-mingw.yml -text eol=crlf +ci-*.cmd -text eol=crlf \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8d148cd --- /dev/null +++ b/.gitignore @@ -0,0 +1,49 @@ +tags +cscope.out +**/*.swp +**/*.swo +.swp +*.swp +.swo +.TMP +-.d +eastl_build_out +build_bench +bench.bat +build.bat +.p4config + +## CMake generated files +CMakeCache.txt +cmake_install.cmake + +## Patch files +*.patch + +## For Visual Studio Generated projects +*.sln +**/*.vcxproj +**/*.vcxproj.filters +*.VC.opendb +*.sdf +**/*.suo +**/*.user +.vs/* +**/Debug/* +CMakeFiles/* +EASTL.dir/** +RelWithDebInfo/* +Release/* +Win32/* +x64/* +MinSizeRel/* +build*/* +Testing/* +%ALLUSERSPROFILE%/* + +# Buck +/buck-out/ +/.buckd/ +/buckaroo/ +.buckconfig.local +BUCKAROO_DEPS diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..0b6afcc --- /dev/null +++ b/.gitmodules @@ -0,0 +1,18 @@ +[submodule "test/packages/EABase"] + path = test/packages/EABase + url = ../EABase.git +[submodule "test/packages/EAAssert"] + path = test/packages/EAAssert + url = ../EAAssert.git +[submodule "test/packages/EAMain"] + path = test/packages/EAMain + url = ../EAMain.git +[submodule "test/packages/EAStdC"] + path = test/packages/EAStdC + url = ../EAStdC.git +[submodule "test/packages/EATest"] + path = test/packages/EATest + url = ../EATest.git +[submodule "test/packages/EAThread"] + path = test/packages/EAThread + url = ../EAThread.git diff --git a/.p4ignore b/.p4ignore new file mode 100644 index 0000000..4bddd61 --- /dev/null +++ b/.p4ignore @@ -0,0 +1,4 @@ +/.git/ +tags +.gitignore +cscope.out diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..c451af6 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,83 @@ +language: cpp + +cache: + - ccache: true + +os: + - linux + - osx + - windows + +compiler: + - gcc + - clang + - msvc + +env: + - EASTL_CONFIG=Debug + - EASTL_CONFIG=Release + +addons: + apt: + sources: + - ubuntu-toolchain-r-test + - george-edison55-precise-backports + - llvm-toolchain-trusty-7 + packages: + - cmake + - cmake-data + - g++-7 + - clang-7 + +matrix: + include: + - compiler: clang "release build with clang to trigger MOJI check" + env: EASTL_CONFIG=Release USE_MOJI_CHECK=yes + os: linux + - compiler: msvc + env: EASTL_CONFIG=Release CXXFLAGS="/std:c++latest /Zc:char8_t" + os: windows + + exclude: + - os: osx + compiler: gcc + - os: osx + compiler: msvc + - os: linux + compiler: msvc + - os: windows + compiler: clang + - os: windows + compiler: gcc + +# Handle git submodules yourself +git: + submodules: false + +before_install: + - git submodule update --init + - if [[ "$CXX" == "g++" ]]; then export CC="gcc-7" ;fi + - if [[ "$CXX" == "g++" ]]; then export CXX="g++-7" ;fi + - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CC="clang-7" ;fi + - if [[ "$CXX" == "clang++" && "${TRAVIS_OS_NAME}" != "osx" ]]; then export CXX="clang++-7" ;fi + +install: +# MOJI check; exit 1 if non-ascii characters detected in C++ + - if [[ -n "$USE_MOJI_CHECK" && -n `git grep -P "[^[:ascii:]]" source test` ]]; then echo "Moji Detected" && exit 1 ;fi + - if [[ -n "$USE_MOJI_CHECK" ]]; then exit 0 ;fi + +before_script: + - mkdir build_$EASTL_CONFIG + - cd build_$EASTL_CONFIG + - cmake .. -DEASTL_BUILD_BENCHMARK:BOOL=ON -DEASTL_BUILD_TESTS:BOOL=ON + - cmake --build . --config $EASTL_CONFIG + +script: + # Run Tests + - cd $TRAVIS_BUILD_DIR/build_$EASTL_CONFIG/test + - ctest -C $EASTL_CONFIG -V || exit 1 + + # Run Benchmarks + - cd $TRAVIS_BUILD_DIR/build_$EASTL_CONFIG/benchmark + - ctest -C $EASTL_CONFIG -V || exit 1 + diff --git a/3RDPARTYLICENSES.TXT b/3RDPARTYLICENSES.TXT new file mode 100644 index 0000000..41fe473 --- /dev/null +++ b/3RDPARTYLICENSES.TXT @@ -0,0 +1,110 @@ +Additional licenses also apply to this software package as detailed below. + + + +HP STL comes with the following license: + +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 1994 +// Hewlett-Packard Company +// +// Permission to use, copy, modify, distribute and sell this software +// and its documentation for any purpose is hereby granted without fee, +// provided that the above copyright notice appear in all copies and +// that both that copyright notice and this permission notice appear +// in supporting documentation. Hewlett-Packard Company makes no +// representations about the suitability of this software for any +// purpose. It is provided "as is" without express or implied warranty. +/////////////////////////////////////////////////////////////////////////////// + + + +libc++ comes with the following license: + +============================================================================== +libc++ License +============================================================================== + +The libc++ library is dual licensed under both the University of Illinois +"BSD-Like" license and the MIT license. As a user of this code you may choose +to use it under either license. As a contributor, you agree to allow your code +to be used under both. + +Full text of the relevant licenses is included below. + +============================================================================== + +University of Illinois/NCSA +Open Source License + +Copyright (c) 2009-2015 by the contributors listed at +http://llvm.org/svn/llvm-project/libcxx/trunk/CREDITS.TXT + +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== + +Copyright (c) 2009-2014 by the contributors listed at +http://llvm.org/svn/llvm-project/libcxx/trunk/CREDITS.TXT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +============================================================================== + +*No express or implied license to use PlayStation®4 libraries included. +PlayStation®4 development tools and libraries are subject to separate license +with Sony Interactive Entertainment LLC. + +============================================================================== + diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..b8171cd --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,50 @@ +#------------------------------------------------------------------------------------------- +# Copyright (C) Electronic Arts Inc. All rights reserved. +#------------------------------------------------------------------------------------------- +cmake_minimum_required(VERSION 3.1) +project(EASTL CXX) + +#------------------------------------------------------------------------------------------- +# Options +#------------------------------------------------------------------------------------------- +option(EASTL_BUILD_BENCHMARK "Enable generation of build files for benchmark" OFF) +option(EASTL_BUILD_TESTS "Enable generation of build files for tests" OFF) + +#------------------------------------------------------------------------------------------- +# Compiler Flags +#------------------------------------------------------------------------------------------- +set (CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/scripts/CMake") +include(CommonCppFlags) + +#------------------------------------------------------------------------------------------- +# Library definition +#------------------------------------------------------------------------------------------- +file(GLOB EASTL_SOURCES "source/*.cpp") +add_library(EASTL ${EASTL_SOURCES}) + +if(EASTL_BUILD_BENCHMARK) + add_subdirectory(benchmark) +endif() + +if(EASTL_BUILD_TESTS) + add_subdirectory(test) +endif() + +#------------------------------------------------------------------------------------------- +# Defines +#------------------------------------------------------------------------------------------- +add_definitions(-D_CHAR16T) +add_definitions(-D_CRT_SECURE_NO_WARNINGS) +add_definitions(-D_SCL_SECURE_NO_WARNINGS) +add_definitions(-DEASTL_OPENSOURCE=1) + +#------------------------------------------------------------------------------------------- +# Include dirs +#------------------------------------------------------------------------------------------- +target_include_directories(EASTL PUBLIC include) + +#------------------------------------------------------------------------------------------- +# Dependencies +#------------------------------------------------------------------------------------------- +target_link_libraries(EASTL EABase) + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..2ec4df4 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,83 @@ +## Contributing + +Before you can contribute, EA must have a Contributor License Agreement (CLA) on file that has been signed by each contributor. +You can sign here: [Go to CLA](https://electronicarts.na1.echosign.com/public/esignWidget?wid=CBFCIBAA3AAABLblqZhByHRvZqmltGtliuExmuV-WNzlaJGPhbSRg2ufuPsM3P0QmILZjLpkGslg24-UJtek*) + +### Pull Request Policy + +All code contributions to EASTL are submitted as [Github pull requests](https://help.github.com/articles/using-pull-requests/). All pull requests will be reviewed by an EASTL maintainer according to the guidelines found in the next section. + +Your pull request should: + +* merge cleanly +* come with tests + * tests should be minimal and stable + * fail before your fix is applied +* pass the test suite +* code formatting is encoded in clang format + * limit using clang format on new code + * do not deviate from style already established in the files + +### Getting the Repository + +EASTL uses git submodules for its dependencies as they are seperate git repositories. Recursive clones will continue until HD space is exhausted unless they are manually limited. +It is recommended to use the following to get the source: + +```bash +git clone https://github.com/electronicarts/EASTL +cd EASTL +git submodule update --init +``` + +### Running the Unit Tests + +EASTL uses CMake as its build system. + +* Create and navigate to "your_build_folder": + * mkdir your_build_folder && cd your_build_folder +* Generate build scripts: + * cmake eastl_source_folder -DEASTL_BUILD_TESTS:BOOL=ON +* Build unit tests for "your_config": + * cmake --build . --config your_config +* Run the unit tests for "your_config" from the test folder: + * cd test && ctest -C your_config + +Here is an example batch file. +```batch +set build_folder=out +mkdir %build_folder% +pushd %build_folder% +call cmake .. -DEASTL_BUILD_TESTS:BOOL=ON -DEASTL_BUILD_BENCHMARK:BOOL=OFF +call cmake --build . --config Release +call cmake --build . --config Debug +call cmake --build . --config RelWithDebInfo +call cmake --build . --config MinSizeRel +pushd test +call ctest -C Release +call ctest -C Debug +call ctest -C RelWithDebInfo +call ctest -C MinSizeRel +popd +popd +``` + +Here is an example bash file +```bash +build_folder=out +mkdir $build_folder +pushd $build_folder +cmake .. -DEASTL_BUILD_TESTS:BOOL=ON -DEASTL_BUILD_BENCHMARK:BOOL=OFF +cmake --build . --config Release +cmake --build . --config Debug +cmake --build . --config RelWithDebInfo +cmake --build . --config MinSizeRel +pushd test +ctest -C Release +ctest -C Debug +ctest -C RelWithDebInfo +ctest -C MinSizeRel +popd +popd +``` + +The value of EASTL_BUILD_BENCHMARK can be toggled to `ON` in order to build projects that include the benchmark program. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..1b112db --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Electronic Arts +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..da9ea48 --- /dev/null +++ b/README.md @@ -0,0 +1,64 @@ +# EA Standard Template Library + +[![Build Status](https://travis-ci.org/electronicarts/EASTL.svg?branch=master)](https://travis-ci.org/electronicarts/EASTL) + +EASTL stands for Electronic Arts Standard Template Library. It is a C++ template library of containers, algorithms, and iterators useful for runtime and tool development across multiple platforms. It is a fairly extensive and robust implementation of such a library and has an emphasis on high performance above all other considerations. + + +## Usage + +If you are familiar with the C++ STL or have worked with other templated container/algorithm libraries, you probably don't need to read this. If you have no familiarity with C++ templates at all, then you probably will need more than this document to get you up to speed. In this case, you need to understand that templates, when used properly, are powerful vehicles for the ease of creation of optimized C++ code. A description of C++ templates is outside the scope of this documentation, but there is plenty of such documentation on the Internet. + +EASTL is suitable for any tools and shipping applications where the functionality of EASTL is useful. Modern compilers are capable of producing good code with templates and many people are using them in both current generation and future generation applications on multiple platforms from embedded systems to servers and mainframes. + +## Package Managers + +You can download and install EASTL using the [Conan](https://github.com/conan-io/conan) package manager: + + conan install eastl/3.15.00@ + +The EASTL package in conan is kept up to date by Conan team members and community contributors. If the version is out-of-date, please [create an issue or pull request](https://github.com/conan-io/conan-center-index) on the Conan Center Index repository. + + +You can download and install EASTL using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + vcpkg install eastl + +The EASTL port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. + + +## Documentation + +Please see [EASTL Introduction](doc/Introduction.md). + + +## Compiling sources + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on compiling and testing the source. + +## Credits + +EASTL was created by Paul Pedriana and he maintained the project for roughly 10 years. + +Roberto Parolin is the current EASTL owner within EA and is responsible for the open source repository. + +Significant EASTL contributions were made by (in alphabetical order): + +* Avery Lee +* Colin Andrews +* JP Flouret +* Liam Mitchell +* Matt Newport +* Max Winkler +* Paul Pedriana +* Roberto Parolin +* Simon Everett + + +## License + +Modified BSD License (3-Clause BSD license) see the file LICENSE in the project root. diff --git a/_config.yml b/_config.yml new file mode 100644 index 0000000..2f7efbe --- /dev/null +++ b/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-minimal \ No newline at end of file diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt new file mode 100644 index 0000000..94bc971 --- /dev/null +++ b/benchmark/CMakeLists.txt @@ -0,0 +1,93 @@ +#------------------------------------------------------------------------------------------- +# Copyright (C) Electronic Arts Inc. All rights reserved. +#------------------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------------------- +# CMake info +#------------------------------------------------------------------------------------------- +cmake_minimum_required(VERSION 3.1) +project(EASTLBenchmarks CXX) +include(CTest) + +#------------------------------------------------------------------------------------------- +# Defines +#------------------------------------------------------------------------------------------- +add_definitions(-D_CHAR16T) + +#------------------------------------------------------------------------------------------- +# Include directories +#------------------------------------------------------------------------------------------- +include_directories(source) +include_directories(../test/source) + +#------------------------------------------------------------------------------------------- +# Compiler Flags +#------------------------------------------------------------------------------------------- +set (CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/../scripts/CMake") +include(CommonCppFlags) + +# Libstdc++ calls new internally, since DLLs have no weak symbols, runtime symbol resolution fails and EASTL's new is not called. +# Linking against static libstdc++ fixes this. +# See https://github.com/electronicarts/EASTL/issues/40 for more info. +if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND MINGW) + set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} -static-libstdc++") + set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -static-libstdc++") + set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} -static-libstdc++") +endif() + +if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND CMAKE_BUILD_TYPE MATCHES "MinSizeRel" AND MINGW) + message(FATAL_ERROR "FIXME: MinSizeRel on MingW-w64's Clang fails to link.") +endif() + +# The benchmark suite fails to compile if char8_t is enabled, so disable it. +if (EASTL_NO_CHAR8T_FLAG) + add_compile_options(${EASTL_NO_CHAR8T_FLAG}) +endif() + +#------------------------------------------------------------------------------------------- +# Source files +#------------------------------------------------------------------------------------------- +file(GLOB EASTLBENCHMARK_SOURCES "source/*.cpp" "../test/source/EASTLTestAllocator.cpp" "../test/source/EASTLTest.cpp") +set(SOURCES ${EASTLBENCHMARK_SOURCES}) + +#------------------------------------------------------------------------------------------- +# Defines +#------------------------------------------------------------------------------------------- +add_definitions(-D_CRT_SECURE_NO_WARNINGS) +add_definitions(-D_SCL_SECURE_NO_WARNINGS) +add_definitions(-DEASTL_THREAD_SUPPORT_AVAILABLE=0) +add_definitions(-DEASTL_OPENSOURCE=1) +add_definitions(-D_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS) # silence std::hash_map deprecation warnings + +if(NOT EASTL_BUILD_TESTS) + add_subdirectory(../test/packages/EAStdC ../test/EAStdC) + add_subdirectory(../test/packages/EAAssert ../test/EAAssert) + add_subdirectory(../test/packages/EAThread ../test/EAThread) + add_subdirectory(../test/packages/EATest ../test/EATest) + add_subdirectory(../test/packages/EAMain ../test/EAMain) +endif() + +#------------------------------------------------------------------------------------------- +# Executable definition +#------------------------------------------------------------------------------------------- +add_executable(EASTLBenchmarks ${EASTLBENCHMARK_SOURCES}) + +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +set(EASTLBenchmark_Libraries + EABase + EAAssert + EAMain + EAThread + EAStdC + EASTL + EATest) +target_link_libraries(EASTLBenchmarks ${EASTLBenchmark_Libraries} Threads::Threads) + +#------------------------------------------------------------------------------------------- +# Run Unit tests and verify the results. +#------------------------------------------------------------------------------------------- +add_test(EASTLBenchmarkRuns EASTLBenchmarks) +set_tests_properties (EASTLBenchmarkRuns PROPERTIES PASS_REGULAR_EXPRESSION "RETURNCODE=0") + diff --git a/benchmark/meson.build b/benchmark/meson.build new file mode 100644 index 0000000..e9b67df --- /dev/null +++ b/benchmark/meson.build @@ -0,0 +1,2 @@ + +subdir('source') \ No newline at end of file diff --git a/benchmark/source/BenchmarkAlgorithm.cpp b/benchmark/source/BenchmarkAlgorithm.cpp new file mode 100644 index 0000000..57e155e --- /dev/null +++ b/benchmark/source/BenchmarkAlgorithm.cpp @@ -0,0 +1,1241 @@ +///////////////////////////////////////////////////////////////////////////// +// BenchmarkAlgorithm.cpp +// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +#include +#include +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + +#ifdef _MSC_VER + #pragma warning(disable: 4996) // Function call with parameters that may be unsafe +#endif + + +using namespace EA; + + +typedef std::vector StdVectorUChar; +typedef eastl::vector EaVectorUChar; + +typedef std::vector StdVectorSChar; +typedef eastl::vector EaVectorSChar; + +typedef std::vector StdVectorUint32; +typedef eastl::vector EaVectorUint32; + +typedef std::vector StdVectorUint64; +typedef eastl::vector EaVectorUint64; + +typedef std::vector StdVectorTO; +typedef eastl::vector EaVectorTO; + + +// We make a fake version of C++11 std::next, as some C++ compilers don't currently +// provide the C++11 next algorithm in their standard libraries. +namespace std__ +{ + template + inline InputIterator + next(InputIterator it, typename std::iterator_traits::difference_type n = 1) + { + std::advance(it, n); + return it; + } +} + + +namespace +{ + void TestFindEndStd(EA::StdC::Stopwatch& stopwatch, const std::string& sTest, const char* pSearchStringBegin, const char* pSearchStringEnd) + { + stopwatch.Restart(); + std::string::const_iterator it = std::find_end(sTest.begin(), sTest.end(), pSearchStringBegin, pSearchStringEnd); + stopwatch.Stop(); + if(it != sTest.end()) + sprintf(Benchmark::gScratchBuffer, "%c", *it); + } + + void TestFindEndEa(EA::StdC::Stopwatch& stopwatch, const eastl::string& sTest, const char* pSearchStringBegin, const char* pSearchStringEnd) + { + stopwatch.Restart(); + eastl::string::const_iterator it = eastl::find_end(sTest.begin(), sTest.end(), pSearchStringBegin, pSearchStringEnd); + stopwatch.Stop(); + if(it != sTest.end()) + sprintf(Benchmark::gScratchBuffer, "%c", *it); + } + + + + void TestSearchStd(EA::StdC::Stopwatch& stopwatch, const std::string& sTest, const char* pSearchStringBegin, const char* pSearchStringEnd) + { + stopwatch.Restart(); + std::string::const_iterator it = std::search(sTest.begin(), sTest.end(), pSearchStringBegin, pSearchStringEnd); + stopwatch.Stop(); + if(it != sTest.end()) + sprintf(Benchmark::gScratchBuffer, "%c", *it); + } + + void TestSearchEa(EA::StdC::Stopwatch& stopwatch, const eastl::string& sTest, const char* pSearchStringBegin, const char* pSearchStringEnd) + { + stopwatch.Restart(); + eastl::string::const_iterator it = eastl::search(sTest.begin(), sTest.end(), pSearchStringBegin, pSearchStringEnd); + stopwatch.Stop(); + if(it != sTest.end()) + sprintf(Benchmark::gScratchBuffer, "%c", *it); + } + + + + void TestSearchNStd(EA::StdC::Stopwatch& stopwatch, const std::string& sTest, int n, char c) + { + stopwatch.Restart(); + std::string::const_iterator it = std::search_n(sTest.begin(), sTest.end(), n, c); + stopwatch.Stop(); + if(it != sTest.end()) + sprintf(Benchmark::gScratchBuffer, "%c", *it); + } + + void TestSearchNEa(EA::StdC::Stopwatch& stopwatch, const eastl::string& sTest, int n, char c) + { + stopwatch.Restart(); + eastl::string::const_iterator it = eastl::search_n(sTest.begin(), sTest.end(), n, c); + stopwatch.Stop(); + if(it != sTest.end()) + sprintf(Benchmark::gScratchBuffer, "%c", *it); + } + + + + template + void TestUniqueStd(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + typename Container::iterator it = std::unique(c.begin(), c.end()); + stopwatch.Stop(); + c.erase(it, c.end()); + } + + template + void TestUniqueEa(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + typename Container::iterator it = eastl::unique(c.begin(), c.end()); + stopwatch.Stop(); + c.erase(it, c.end()); + } + + + + template + void TestMinElementStd(EA::StdC::Stopwatch& stopwatch, const Container& c) + { + stopwatch.Restart(); + const typename Container::const_iterator it = std::min_element(c.begin(), c.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &it); + } + + template + void TestMinElementEa(EA::StdC::Stopwatch& stopwatch, const Container& c) + { + stopwatch.Restart(); + const typename Container::const_iterator it = eastl::min_element(c.begin(), c.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &it); + } + + + + template + void TestCountStd(EA::StdC::Stopwatch& stopwatch, const Container& c) + { + stopwatch.Restart(); + const typename Container::difference_type n = std::count(c.begin(), c.end(), (typename Container::value_type)999999); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%d", (int)n); + } + + template + void TestCountEa(EA::StdC::Stopwatch& stopwatch, const Container& c) + { + stopwatch.Restart(); + const typename Container::difference_type n = eastl::count(c.begin(), c.end(), (typename Container::value_type)999999); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%d", (int)n); + } + + + + template + void TestAdjacentFindStd(EA::StdC::Stopwatch& stopwatch, const Container& c) + { + stopwatch.Restart(); + const typename Container::const_iterator it = std::adjacent_find(c.begin(), c.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &it); + } + + template + void TestAdjacentFindEa(EA::StdC::Stopwatch& stopwatch, const Container& c) + { + stopwatch.Restart(); + const typename Container::const_iterator it = eastl::adjacent_find(c.begin(), c.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &it); + } + + + + template + void TestLowerBoundStd(EA::StdC::Stopwatch& stopwatch, const Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd) + { + + stopwatch.Restart(); + while(pBegin != pEnd) + { + typename Container::const_iterator it = std::lower_bound(c.begin(), c.end(), *pBegin++); + Benchmark::DoNothing(&it); + } + stopwatch.Stop(); + } + + template + void TestLowerBoundEa(EA::StdC::Stopwatch& stopwatch, Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd) + { + stopwatch.Restart(); + while(pBegin != pEnd) + { + typename Container::const_iterator it = eastl::lower_bound(c.begin(), c.end(), *pBegin++); + Benchmark::DoNothing(&it); + } + stopwatch.Stop(); + } + + + + template + void TestUpperBoundStd(EA::StdC::Stopwatch& stopwatch, const Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd) + { + stopwatch.Restart(); + while(pBegin != pEnd) + { + typename Container::const_iterator it = std::upper_bound(c.begin(), c.end(), *pBegin++); + Benchmark::DoNothing(&it); + } + stopwatch.Stop(); + } + + template + void TestUpperBoundEa(EA::StdC::Stopwatch& stopwatch, Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd) + { + stopwatch.Restart(); + while(pBegin != pEnd) + { + typename Container::const_iterator it = eastl::upper_bound(c.begin(), c.end(), *pBegin++); + Benchmark::DoNothing(&it); + } + stopwatch.Stop(); + } + + + + template + void TestEqualRangeStd(EA::StdC::Stopwatch& stopwatch, const Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd) + { + stopwatch.Restart(); + while(pBegin != pEnd) + { + std::pair itPair = std::equal_range(c.begin(), c.end(), *pBegin++); + + Benchmark::DoNothing(&itPair); + } + stopwatch.Stop(); + } + + template + void TestEqualRangeEa(EA::StdC::Stopwatch& stopwatch, Container& c, const typename Container::value_type* pBegin, const typename Container::value_type* pEnd) + { + stopwatch.Restart(); + while(pBegin != pEnd) + { + eastl::pair itPair = eastl::equal_range(c.begin(), c.end(), *pBegin++); + Benchmark::DoNothing(&itPair); + } + stopwatch.Stop(); + } + + + + template + void TestLexicographicalCompareStd(EA::StdC::Stopwatch& stopwatch, Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2) + { + stopwatch.Restart(); + const bool bResult = std::lexicographical_compare(first1, last1, first2, last2); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%d", bResult ? (int)1 : (int)0); + } + + template + void TestLexicographicalCompareEa(EA::StdC::Stopwatch& stopwatch, Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2) + { + stopwatch.Restart(); + const bool bResult = eastl::lexicographical_compare(first1, last1, first2, last2); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%d", bResult ? (int)1 : (int)0); + } + + + + template + void TestCopyStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, OutputIterator result) + { + stopwatch.Restart(); + std::copy(first, last, result); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%d", (int)*first); + } + + template + void TestCopyEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, OutputIterator result) + { + stopwatch.Restart(); + eastl::copy(first, last, result); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%d", (int)*first); + } + + + + template + void TestCopyBackwardStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, OutputIterator result) + { + stopwatch.Restart(); + std::copy_backward(first, last, result); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%d", (int)*first); + } + + template + void TestCopyBackwardEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, OutputIterator result) + { + stopwatch.Restart(); + eastl::copy_backward(first, last, result); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%d", (int)*first); + } + + + + template + void TestFillStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, const Value& v) + { + stopwatch.Restart(); + std::fill(first, last, v); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*first); + } + + template + void TestFillEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, const Value& v) + { + stopwatch.Restart(); + eastl::fill(first, last, v); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*first); + } + + + + template + void TestFillNStd(EA::StdC::Stopwatch& stopwatch, Iterator first, int n, const Value& v) + { + stopwatch.Restart(); + std::fill_n(first, n, v); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*first); + } + + template + void TestFillNEa(EA::StdC::Stopwatch& stopwatch, Iterator first, int n, const Value& v) + { + stopwatch.Restart(); + eastl::fill_n(first, n, v); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*first); + } + + + + template + void TestReverseStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last) + { + stopwatch.Restart(); + std::reverse(first, last); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*first); + } + + template + void TestReverseEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last) + { + stopwatch.Restart(); + eastl::reverse(first, last); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*first); + } + + + + template + void TestRotateStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator middle, Iterator last) + { + stopwatch.Restart(); + std::rotate(first, middle, last); // C++11 specifies that rotate has a return value, but not all std implementations return it. + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*first); + } + + template + void TestRotateEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator middle, Iterator last) + { + stopwatch.Restart(); + eastl::rotate(first, middle, last); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*first); + } + + template + void TestMergeStd(EA::StdC::Stopwatch& stopwatch, Iterator firstIn1, Iterator lastIn1, Iterator firstIn2, Iterator lastIn2, Iterator out) + { + stopwatch.Restart(); + std::merge(firstIn1, lastIn1, firstIn2, lastIn2, out); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*out); + } + + template + void TestMergeEa(EA::StdC::Stopwatch& stopwatch, Iterator firstIn1, Iterator lastIn1, Iterator firstIn2, Iterator lastIn2, Iterator out) + { + stopwatch.Restart(); + eastl::merge(firstIn1, lastIn1, firstIn2, lastIn2, out); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p", &*out); + } +} // namespace + + + + +void BenchmarkAlgorithm1(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2) +{ + { + std::string sTestStd; + eastl::string sTestEa; + const char* pSearchString1Begin = "AAA"; + const char* pSearchString1End = pSearchString1Begin + strlen(pSearchString1Begin); + const char* pSearchString2Begin = "BBB"; // This is something that doesn't exist searched string. + const char* pSearchString2End = pSearchString2Begin + strlen(pSearchString2Begin); + const char* pSearchString3Begin = "CCC"; + const char* pSearchString3End = pSearchString3Begin + strlen(pSearchString3Begin); + + for(int i = 0; i < 10000; i++) + sTestStd += "This is a test of the find_end algorithm. "; + sTestEa.assign(sTestStd.data(), (eastl_size_t)sTestStd.length()); + + for(int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test find_end + /////////////////////////////// + + sTestStd.insert(sTestStd.size() * 15 / 16, pSearchString1Begin); + sTestEa.insert (sTestEa.size() * 15 / 16, pSearchString1Begin); + TestFindEndStd(stopwatch1, sTestStd, pSearchString1Begin, pSearchString1End); + TestFindEndEa (stopwatch2, sTestEa, pSearchString1Begin, pSearchString1End); + + if(i == 1) + Benchmark::AddResult("algorithm/find_end/string/end", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + sTestStd.insert(sTestStd.size() / 2, pSearchString2Begin); + sTestEa.insert (sTestEa.size() / 2, pSearchString2Begin); + TestFindEndStd(stopwatch1, sTestStd, pSearchString2Begin, pSearchString2End); + TestFindEndEa (stopwatch2, sTestEa, pSearchString2Begin, pSearchString2End); + + if(i == 1) + Benchmark::AddResult("algorithm/find_end/string/middle", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFindEndStd(stopwatch1, sTestStd, pSearchString3Begin, pSearchString3End); + TestFindEndEa (stopwatch2, sTestEa, pSearchString3Begin, pSearchString3End); + + if(i == 1) + Benchmark::AddResult("algorithm/find_end/string/none", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test search + /////////////////////////////// + TestSearchStd(stopwatch1, sTestStd, pSearchString1Begin, pSearchString1End); + TestSearchEa (stopwatch2, sTestEa, pSearchString1Begin, pSearchString1End); + + if(i == 1) + Benchmark::AddResult("algorithm/search/string", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test search_n + /////////////////////////////// + TestSearchNStd(stopwatch1, sTestStd, 3, 'A'); + TestSearchNEa (stopwatch2, sTestEa, 3, 'A'); + + if(i == 1) + Benchmark::AddResult("algorithm/search_n/string", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test adjacent_find + /////////////////////////////// + + } + } +} + + +void BenchmarkAlgorithm2(EASTLTest_Rand& rng, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2) +{ + { + StdVectorUint32 stdVectorUint32; + EaVectorUint32 eaVectorUint32; + + StdVectorUint64 stdVectorUint64; + EaVectorUint64 eaVectorUint64; + + StdVectorTO stdVectorTO; + EaVectorTO eaVectorTO; + + for(int i = 0; i < 2; i++) + { + stdVectorUint32.clear(); + eaVectorUint32.clear(); + + for(int j = 0; j < 100000; j++) + { + stdVectorUint32.push_back(j); + eaVectorUint32.push_back(j); + stdVectorUint64.push_back(j); + eaVectorUint64.push_back(j); + stdVectorTO.push_back(TestObject(j)); + eaVectorTO.push_back(TestObject(j)); + + if((rng() % 16) == 0) + { + stdVectorUint32.push_back(i); + eaVectorUint32.push_back(i); + stdVectorUint64.push_back(j); + eaVectorUint64.push_back(j); + stdVectorTO.push_back(TestObject(j)); + eaVectorTO.push_back(TestObject(j)); + + if((rng() % 16) == 0) + { + stdVectorUint32.push_back(i); + eaVectorUint32.push_back(i); + stdVectorUint64.push_back(j); + eaVectorUint64.push_back(j); + stdVectorTO.push_back(TestObject(j)); + eaVectorTO.push_back(TestObject(j)); + } + } + } + + + /////////////////////////////// + // Test unique + /////////////////////////////// + + TestUniqueStd(stopwatch1, stdVectorUint32); + TestUniqueEa (stopwatch2, eaVectorUint32); + + if(i == 1) + Benchmark::AddResult("algorithm/unique/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestUniqueStd(stopwatch1, stdVectorUint64); + TestUniqueEa (stopwatch2, eaVectorUint64); + + if(i == 1) + Benchmark::AddResult("algorithm/unique/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestUniqueStd(stopwatch1, stdVectorTO); + TestUniqueEa (stopwatch2, eaVectorTO); + + if(i == 1) + Benchmark::AddResult("algorithm/unique/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test min_element + /////////////////////////////// + + TestMinElementStd(stopwatch1, stdVectorTO); + TestMinElementEa (stopwatch2, eaVectorTO); + + if(i == 1) + Benchmark::AddResult("algorithm/min_element/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test count + /////////////////////////////// + + TestCountStd(stopwatch1, stdVectorUint64); + TestCountEa (stopwatch2, eaVectorUint64); + + if(i == 1) + Benchmark::AddResult("algorithm/count/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test adjacent_find + /////////////////////////////// + + // Due to the above unique testing, the container should container unique elements. Let's change that. + stdVectorTO[stdVectorTO.size() - 2] = stdVectorTO[stdVectorTO.size() - 1]; + eaVectorTO[eaVectorTO.size() - 2] = eaVectorTO[eaVectorTO.size() - 1]; + TestAdjacentFindStd(stopwatch1, stdVectorTO); + TestAdjacentFindEa (stopwatch2, eaVectorTO); + + if(i == 1) + Benchmark::AddResult("algorithm/adj_find/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test lower_bound + /////////////////////////////// + + // Sort the containers for the following tests. + std::sort(stdVectorTO.begin(), stdVectorTO.end()); + eaVectorTO.assign(&stdVectorTO[0], &stdVectorTO[0] + stdVectorTO.size()); + + TestLowerBoundStd(stopwatch1, stdVectorTO, &stdVectorTO[0], &stdVectorTO[0] + stdVectorTO.size()); + TestLowerBoundEa (stopwatch2, eaVectorTO, &eaVectorTO[0], &eaVectorTO[0] + eaVectorTO.size()); + + if(i == 1) + Benchmark::AddResult("algorithm/lower_bound/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test upper_bound + /////////////////////////////// + + std::sort(stdVectorUint32.begin(), stdVectorUint32.end()); + eaVectorUint32.assign(&stdVectorUint32[0], &stdVectorUint32[0] + stdVectorUint32.size()); + + TestUpperBoundStd(stopwatch1, stdVectorUint32, &stdVectorUint32[0], &stdVectorUint32[0] + stdVectorUint32.size()); + TestUpperBoundEa (stopwatch2, eaVectorUint32, &eaVectorUint32[0], &eaVectorUint32[0] + eaVectorUint32.size()); + + if(i == 1) + Benchmark::AddResult("algorithm/upper_bound/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test equal_range + /////////////////////////////// + + // VS2010 (and later versions?) is extremely slow executing this in debug builds. It can take minutes for a + // single TestEqualRangeStd call to complete. It's so slow that it's nearly pointless to execute. + #if !defined(_MSC_VER) || (_MSC_VER < 1600) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2) + std::sort(stdVectorUint64.begin(), stdVectorUint64.end()); + eaVectorUint64.assign(&stdVectorUint64[0], &stdVectorUint64[0] + stdVectorUint64.size()); + + TestEqualRangeStd(stopwatch1, stdVectorUint64, &stdVectorUint64[0], &stdVectorUint64[0] + stdVectorUint64.size()); + TestEqualRangeEa (stopwatch2, eaVectorUint64, &eaVectorUint64[0], &eaVectorUint64[0] + eaVectorUint64.size()); + + if(i == 1) + Benchmark::AddResult("algorithm/equal_range/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + #endif + } + } +} + + +void BenchmarkAlgorithm3(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2) +{ + { + StdVectorUChar stdVectorUChar1(100000); + StdVectorUChar stdVectorUChar2(100000); + EaVectorUChar eaVectorUChar1(100000); + EaVectorUChar eaVectorUChar2(100000); + + StdVectorSChar stdVectorSChar1(100000); + StdVectorSChar stdVectorSChar2(100000); + EaVectorSChar eaVectorSChar1(100000); + EaVectorSChar eaVectorSChar2(100000); + + StdVectorTO stdVectorTO1(100000); + StdVectorTO stdVectorTO2(100000); + EaVectorTO eaVectorTO1(100000); + EaVectorTO eaVectorTO2(100000); + + // All these containers should have values of zero in them. + + for(int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test lexicographical_compare + /////////////////////////////// + + TestLexicographicalCompareStd(stopwatch1, stdVectorUChar1.begin(), stdVectorUChar1.end(), stdVectorUChar2.begin(), stdVectorUChar2.end()); + TestLexicographicalCompareEa (stopwatch2, eaVectorUChar1.begin(), eaVectorUChar2.end(), eaVectorUChar2.begin(), eaVectorUChar2.end()); + + if(i == 1) + Benchmark::AddResult("algorithm/lex_cmp/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestLexicographicalCompareStd(stopwatch1, &stdVectorSChar1[0], &stdVectorSChar1[0] + stdVectorSChar1.size(), &stdVectorSChar2[0], &stdVectorSChar2[0] + stdVectorSChar2.size()); + TestLexicographicalCompareEa (stopwatch2, &eaVectorSChar1[0], &eaVectorSChar1[0] + eaVectorSChar1.size(), &eaVectorSChar2[0], &eaVectorSChar2[0] + eaVectorSChar2.size()); + + if(i == 1) + Benchmark::AddResult("algorithm/lex_cmp/schar[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestLexicographicalCompareStd(stopwatch1, stdVectorTO1.begin(), stdVectorTO1.end(), stdVectorTO2.begin(), stdVectorTO2.end()); + TestLexicographicalCompareEa (stopwatch2, eaVectorTO1.begin(), eaVectorTO1.end(), eaVectorTO2.begin(), eaVectorTO2.end()); + + if(i == 1) + Benchmark::AddResult("algorithm/lex_cmp/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } + +} + + +void BenchmarkAlgorithm4(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2) +{ + { + std::vector stdVectorUint321(10000); + std::vector stdVectorUint322(10000); + eastl::vector eaVectorUint321(10000); + eastl::vector eaVectorUint322(10000); + + std::vector stdVectorUint64(100000); + eastl::vector eaVectorUint64(100000); + + + for(int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test copy + /////////////////////////////// + + TestCopyStd(stopwatch1, stdVectorUint321.begin(), stdVectorUint321.end(), stdVectorUint322.begin()); + TestCopyEa (stopwatch2, eaVectorUint321.begin(), eaVectorUint321.end(), eaVectorUint322.begin()); + + if(i == 1) + Benchmark::AddResult("algorithm/copy/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test copy_backward + /////////////////////////////// + + TestCopyBackwardStd(stopwatch1, stdVectorUint321.begin(), stdVectorUint321.end(), stdVectorUint322.end()); + TestCopyBackwardEa (stopwatch2, eaVectorUint321.begin(), eaVectorUint321.end(), eaVectorUint322.end()); + + if(i == 1) + Benchmark::AddResult("algorithm/copy_backward/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test fill + /////////////////////////////// + + TestFillStd(stopwatch1, stdVectorUint64.begin(), stdVectorUint64.end(), UINT64_C(37)); + TestFillEa (stopwatch2, eaVectorUint64.begin(), eaVectorUint64.end(), UINT64_C(37)); + TestFillStd(stopwatch1, stdVectorUint64.begin(), stdVectorUint64.end(), UINT64_C(37)); // Intentionally do this a second time, as we are finding + TestFillEa (stopwatch2, eaVectorUint64.begin(), eaVectorUint64.end(), UINT64_C(37)); // the results are inconsistent otherwise. + if(EA::StdC::Memcheck64(&eaVectorUint64[0], UINT64_C(37), eaVectorUint64.size())) + EA::UnitTest::Report("eastl algorithm 64 bit fill failure."); + + if(i == 1) + Benchmark::AddResult("algorithm/fill/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test fill_n + /////////////////////////////// + + TestFillNStd(stopwatch1, stdVectorUint64.begin(), (int)stdVectorUint64.size(), UINT64_C(37)); + TestFillNEa (stopwatch2, eaVectorUint64.begin(), (int) eaVectorUint64.size(), UINT64_C(37)); + TestFillNStd(stopwatch1, stdVectorUint64.begin(), (int)stdVectorUint64.size(), UINT64_C(37)); // Intentionally do this a second time, as we are finding + TestFillNEa (stopwatch2, eaVectorUint64.begin(), (int) eaVectorUint64.size(), UINT64_C(37)); // the results are inconsistent otherwise. + + if(i == 1) + Benchmark::AddResult("algorithm/fill_n/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } +} + + +void BenchmarkAlgorithm5(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2) +{ + { + std::vector stdVectorVoid(100000); + eastl::vector eaVectorVoid(100000); + + std::vector stdVectorChar(100000); + eastl::vector eaVectorChar(100000); + + std::vector stdVectorBool(100000); + eastl::vector eaVectorBool(100000); + + for(int i = 0; i < 2; i++) + { + TestFillStd(stopwatch1, stdVectorVoid.begin(), stdVectorVoid.end(), (void*)NULL); + TestFillEa (stopwatch2, eaVectorVoid.begin(), eaVectorVoid.end(), (void*)NULL); + + if(i == 1) + Benchmark::AddResult("algorithm/fill/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFillStd(stopwatch1, &stdVectorChar[0], &stdVectorChar[0] + stdVectorChar.size(), 'd'); // Intentionally use ' ' and not casted to any type. + TestFillEa (stopwatch2, eaVectorChar.data(), eaVectorChar.data() + eaVectorChar.size(), 'd'); + TestFillStd(stopwatch1, &stdVectorChar[0], &stdVectorChar[0] + stdVectorChar.size(), 'd'); // Intentionally do this a second time, as we are finding + TestFillEa (stopwatch2, eaVectorChar.data(), eaVectorChar.data() + eaVectorChar.size(), 'd'); // the results are inconsistent otherwise. + + if(i == 1) + Benchmark::AddResult("algorithm/fill/char[]/'d'", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFillStd(stopwatch1, stdVectorChar.begin(), stdVectorChar.end(), (char)'d'); + TestFillEa (stopwatch2, eaVectorChar.begin(), eaVectorChar.end(), (char)'d'); + TestFillStd(stopwatch1, stdVectorChar.begin(), stdVectorChar.end(), (char)'d'); // Intentionally do this a second time, as we are finding + TestFillEa (stopwatch2, eaVectorChar.begin(), eaVectorChar.end(), (char)'d'); // the results are inconsistent otherwise. + + if(i == 1) + Benchmark::AddResult("algorithm/fill/vector/'d'", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFillStd(stopwatch1, stdVectorChar.begin(), stdVectorChar.end(), (char)0); + TestFillEa (stopwatch2, eaVectorChar.begin(), eaVectorChar.end(), (char)0); + TestFillStd(stopwatch1, stdVectorChar.begin(), stdVectorChar.end(), (char)0); // Intentionally do this a second time, as we are finding + TestFillEa (stopwatch2, eaVectorChar.begin(), eaVectorChar.end(), (char)0); // the results are inconsistent otherwise. + + if(i == 1) + Benchmark::AddResult("algorithm/fill/vector/0", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFillStd(stopwatch1, eaVectorBool.data(), eaVectorBool.data() + eaVectorBool.size(), false); // Intentionally use eaVectorBool for the array. + TestFillEa (stopwatch2, eaVectorBool.data(), eaVectorBool.data() + eaVectorBool.size(), false); + TestFillStd(stopwatch1, eaVectorBool.data(), eaVectorBool.data() + eaVectorBool.size(), false); + TestFillEa (stopwatch2, eaVectorBool.data(), eaVectorBool.data() + eaVectorBool.size(), false); + + if(i == 1) + Benchmark::AddResult("algorithm/fill/bool[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test fill_n + /////////////////////////////// + + TestFillNStd(stopwatch1, eaVectorChar.data(), (int) eaVectorChar.size(), 'd'); // Intentionally use eaVectorBool for the array. + TestFillNEa (stopwatch2, eaVectorChar.data(), (int) eaVectorChar.size(), 'd'); + TestFillNStd(stopwatch1, eaVectorChar.data(), (int) eaVectorChar.size(), 'd'); // Intentionally do this a second time, as we are finding + TestFillNEa (stopwatch2, eaVectorChar.data(), (int) eaVectorChar.size(), 'd'); // the results are inconsistent otherwise. + + if(i == 1) + Benchmark::AddResult("algorithm/fill_n/char[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFillNStd(stopwatch1, eaVectorBool.data(), (int) eaVectorBool.size(), false); // Intentionally use eaVectorBool for the array. + TestFillNEa (stopwatch2, eaVectorBool.data(), (int) eaVectorBool.size(), false); + TestFillNStd(stopwatch1, eaVectorBool.data(), (int) eaVectorBool.size(), false); // Intentionally do this a second time, as we are finding + TestFillNEa (stopwatch2, eaVectorBool.data(), (int) eaVectorBool.size(), false); // the results are inconsistent otherwise. + + if(i == 1) + Benchmark::AddResult("algorithm/fill_n/bool[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } +} + + +void BenchmarkAlgorithm6(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2) +{ + // We allocate this on the heap because some platforms don't always have enough stack space for this. + std::vector* pstdVectorLP1 = new std::vector(100); + std::vector* pstdVectorLP2 = new std::vector(100); + eastl::vector* peaVectorLP1 = new eastl::vector(100); + eastl::vector* peaVectorLP2 = new eastl::vector(100); + + // Aliases. + std::vector& stdVectorLP1 = *pstdVectorLP1; + std::vector& stdVectorLP2 = *pstdVectorLP2; + eastl::vector& eaVectorLP1 = *peaVectorLP1; + eastl::vector& eaVectorLP2 = *peaVectorLP2; + + for(int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test copy + /////////////////////////////// + + TestCopyStd(stopwatch1, stdVectorLP1.begin(), stdVectorLP1.end(), stdVectorLP2.begin()); + TestCopyEa (stopwatch2, eaVectorLP1.begin(), eaVectorLP1.end(), eaVectorLP2.begin()); + + if(i == 1) + Benchmark::AddResult("algorithm/copy/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test copy_backward + /////////////////////////////// + + TestCopyBackwardStd(stopwatch1, stdVectorLP1.begin(), stdVectorLP1.end(), stdVectorLP2.end()); + TestCopyBackwardEa (stopwatch2, eaVectorLP1.begin(), eaVectorLP1.end(), eaVectorLP2.end()); + + if(i == 1) + Benchmark::AddResult("algorithm/copy_backward/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + + delete pstdVectorLP1; + delete pstdVectorLP2; + delete peaVectorLP1; + delete peaVectorLP2; +} + + +void BenchmarkAlgorithm7(EASTLTest_Rand& /*rng*/, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2) +{ + { + std::list stdListTO(10000); + eastl::list eaListTO(10000); + + std::vector stdVectorTO(10000); + eastl::vector eaVectorTO(10000); + + for(int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test reverse + /////////////////////////////// + + TestReverseStd(stopwatch1, stdListTO.begin(), stdListTO.end()); + TestReverseEa (stopwatch2, eaListTO.begin(), eaListTO.end()); + + if(i == 1) + Benchmark::AddResult("algorithm/reverse/list", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestReverseStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.end()); + TestReverseEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.end()); + + if(i == 1) + Benchmark::AddResult("algorithm/reverse/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } + + { + // Create some containers and seed them with incremental values (i.e. 0, 1, 2, 3...). + eastl::slist eaSlistIntLarge(10000); + eastl::generate(eaSlistIntLarge.begin(), eaSlistIntLarge.end(), GenerateIncrementalIntegers()); + + + std::vector< SizedPOD<32> > stdVectorLargePod32(10000); + for(std::vector< SizedPOD<32> >::iterator it = stdVectorLargePod32.begin(); it != stdVectorLargePod32.end(); ++it) + memset(&*it, 0, sizeof(SizedPOD<32>)); + eastl::vector< SizedPOD<32> > eaVectorLargePod32(10000); + for(eastl::vector< SizedPOD<32> >::iterator it = eaVectorLargePod32.begin(); it != eaVectorLargePod32.end(); ++it) + memset(&*it, 0, sizeof(SizedPOD<32>)); + + std::list stdListIntLarge(10000); + eastl::generate(stdListIntLarge.begin(), stdListIntLarge.end(), GenerateIncrementalIntegers()); + + eastl::list eaListIntLarge(10000); + eastl::generate(eaListIntLarge.begin(), eaListIntLarge.end(), GenerateIncrementalIntegers()); + + + std::vector stdVectorIntLarge(10000); + eastl::generate(stdVectorIntLarge.begin(), stdVectorIntLarge.end(), GenerateIncrementalIntegers()); + + eastl::vector eaVectorIntLarge(10000); + eastl::generate(eaVectorIntLarge.begin(), eaVectorIntLarge.end(), GenerateIncrementalIntegers()); + + + std::list stdListIntSmall(10); + eastl::generate(stdListIntLarge.begin(), stdListIntLarge.end(), GenerateIncrementalIntegers()); + + eastl::list eaListIntSmall(10); + eastl::generate(eaListIntLarge.begin(), eaListIntLarge.end(), GenerateIncrementalIntegers()); + + + std::vector stdVectorIntSmall(10); + eastl::generate(stdVectorIntLarge.begin(), stdVectorIntLarge.end(), GenerateIncrementalIntegers()); + + eastl::vector eaVectorIntSmall(10); + eastl::generate(eaVectorIntLarge.begin(), eaVectorIntLarge.end(), GenerateIncrementalIntegers()); + + + + std::list stdListTOLarge(10000); + eastl::generate(stdListTOLarge.begin(), stdListTOLarge.end(), GenerateIncrementalIntegers()); + + eastl::list eaListTOLarge(10000); + eastl::generate(eaListTOLarge.begin(), eaListTOLarge.end(), GenerateIncrementalIntegers()); + + + std::vector stdVectorTOLarge(10000); + eastl::generate(stdVectorTOLarge.begin(), stdVectorTOLarge.end(), GenerateIncrementalIntegers()); + + eastl::vector eaVectorTOLarge(10000); + eastl::generate(eaVectorTOLarge.begin(), eaVectorTOLarge.end(), GenerateIncrementalIntegers()); + + + std::list stdListTOSmall(10); + eastl::generate(stdListTOSmall.begin(), stdListTOSmall.end(), GenerateIncrementalIntegers()); + + eastl::list eaListTOSmall(10); + eastl::generate(eaListTOSmall.begin(), eaListTOSmall.end(), GenerateIncrementalIntegers()); + + + std::vector stdVectorTOSmall(10); + eastl::generate(stdVectorTOSmall.begin(), stdVectorTOSmall.end(), GenerateIncrementalIntegers()); + + eastl::vector eaVectorTOSmall(10); + eastl::generate(eaVectorTOSmall.begin(), eaVectorTOSmall.end(), GenerateIncrementalIntegers()); + + + for(int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test reverse + /////////////////////////////// + + // There is no guaranteed Standard Library forward_list or slist. + TestRotateEa (stopwatch2, eaSlistIntLarge.begin(), eastl::next( eaSlistIntLarge.begin(), (eaSlistIntLarge.size() / 2) - 1), eaSlistIntLarge.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/slist large", stopwatch1.GetUnits(), 0 /* untested */, stopwatch2.GetElapsedTime()); + + + + TestRotateStd(stopwatch1, stdVectorLargePod32.begin(), std__::next(stdVectorLargePod32.begin(), (stdVectorLargePod32.size() / 2) - 1), stdVectorLargePod32.end()); + TestRotateEa (stopwatch2, eaVectorLargePod32.begin(), eastl::next( eaVectorLargePod32.begin(), (eaVectorLargePod32.size() / 2) - 1), eaVectorLargePod32.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/vector> large", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + + TestRotateStd(stopwatch1, stdListIntLarge.begin(), std__::next(stdListIntLarge.begin(), (stdListIntLarge.size() / 2) - 1), stdListIntLarge.end()); + TestRotateEa (stopwatch2, eaListIntLarge.begin(), eastl::next( eaListIntLarge.begin(), (eaListIntLarge.size() / 2) - 1), eaListIntLarge.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/list large", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestRotateStd(stopwatch1, stdVectorIntLarge.begin(), std__::next(stdVectorIntLarge.begin(), (stdVectorIntLarge.size() / 2) - 1), stdVectorIntLarge.end()); + TestRotateEa (stopwatch2, eaVectorIntLarge.begin(), eastl::next( eaVectorIntLarge.begin(), (eaVectorIntLarge.size() / 2) - 1), eaVectorIntLarge.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + TestRotateStd(stopwatch1, stdListIntSmall.begin(), std__::next(stdListIntSmall.begin(), (stdListIntSmall.size() / 2) - 1), stdListIntSmall.end()); + TestRotateEa (stopwatch2, eaListIntSmall.begin(), eastl::next( eaListIntSmall.begin(), (eaListIntSmall.size() / 2) - 1), eaListIntSmall.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/list small", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestRotateStd(stopwatch1, stdVectorIntSmall.begin(), std__::next(stdVectorIntSmall.begin(), (stdVectorIntSmall.size() / 2) - 1), stdVectorIntSmall.end()); + TestRotateEa (stopwatch2, eaVectorIntSmall.begin(), eastl::next( eaVectorIntSmall.begin(), (eaVectorIntSmall.size() / 2) - 1), eaVectorIntSmall.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + TestRotateStd(stopwatch1, stdListTOLarge.begin(), std__::next(stdListTOLarge.begin(), (stdListTOLarge.size() / 2) - 1), stdListTOLarge.end()); + TestRotateEa (stopwatch2, eaListTOLarge.begin(), eastl::next( eaListTOLarge.begin(), (eaListTOLarge.size() / 2) - 1), eaListTOLarge.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/list", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestRotateStd(stopwatch1, stdVectorTOLarge.begin(), std__::next(stdVectorTOLarge.begin(), (stdVectorTOLarge.size() / 2) - 1), stdVectorTOLarge.end()); + TestRotateEa (stopwatch2, eaVectorTOLarge.begin(), eastl::next( eaVectorTOLarge.begin(), (eaVectorTOLarge.size() / 2) - 1), eaVectorTOLarge.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + TestRotateStd(stopwatch1, stdListTOSmall.begin(), std__::next(stdListTOSmall.begin(), (stdListTOSmall.size() / 2) - 1), stdListTOSmall.end()); + TestRotateEa (stopwatch2, eaListTOSmall.begin(), eastl::next( eaListTOSmall.begin(), (eaListTOSmall.size() / 2) - 1), eaListTOSmall.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/list", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestRotateStd(stopwatch1, stdVectorTOSmall.begin(), std__::next(stdVectorTOSmall.begin(), (stdVectorTOSmall.size() / 2) - 1), stdVectorTOSmall.end()); + TestRotateEa (stopwatch2, eaVectorTOSmall.begin(), eastl::next( eaVectorTOSmall.begin(), (eaVectorTOSmall.size() / 2) - 1), eaVectorTOSmall.end()); + if(i == 1) + Benchmark::AddResult("algorithm/rotate/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } +} + +void BenchmarkAlgorithm8(EASTLTest_Rand& rng, EA::StdC::Stopwatch& stopwatch1, EA::StdC::Stopwatch& stopwatch2) +{ + const uint32_t ElementCount = 10000; + + eastl::vector srcVecA(ElementCount); + eastl::vector srcVecB(ElementCount); + + std::vector stdVecAInt(ElementCount); + std::vector stdVecBInt(ElementCount); + std::vector stdVecOutInt(2 * ElementCount); + std::vector stdVecATestObject(ElementCount); + std::vector stdVecBTestObject(ElementCount); + std::vector stdVecOutTestObject(2 * ElementCount); + + eastl::vector eaVecAInt(ElementCount); + eastl::vector eaVecBInt(ElementCount); + eastl::vector eaVecOutInt(2 * ElementCount); + eastl::vector eaVecATestObject(ElementCount); + eastl::vector eaVecBTestObject(ElementCount); + eastl::vector eaVecOutTestObject(2 * ElementCount); + + // Note: + // In some cases the compiler may generate branch free code for the loop body of merge. + // In this situation the performance of merging data that has a random merge selection (i.e. the chance that the smallest + // element is taken from the first or second list is essentially random) is the same as merging data where the choice of + // which list has the smallest element is predictable. + // However, if the compiler doesn't generate branch free code, then the performance of merge will suffer from branch + // misprediction when merging random data and will benefit greatly when misprediction is rare. + // This benchmark is aimed at highlighting what sort of code is being generated, and also showing the impact of + // predictability of the comparisons performed during merge. The branch predictablity /can/ have a large impact + // on merge sort performance. + + // 'unpred' is the case where the comparison is unpredictable + // 'pred' is the case where the comparison is mostly predictable + const char* patternDescriptions[][2] = + { + { + "algorithm/merge/vector (unpred)", + "algorithm/merge/vector (pred)", + }, + { + "algorithm/merge/vector (unpred)", + "algorithm/merge/vector (pred)", + }, + }; + + enum Pattern + { + P_Random, + P_Predictable, + P_Count + }; + + for (int pattern = 0; pattern < P_Count; pattern++) + { + if (pattern == P_Random) + { + eastl::generate(srcVecA.begin(), srcVecA.end(), [&]{ return int(rng()); }); + eastl::sort(srcVecA.begin(), srcVecA.end()); + eastl::generate(srcVecB.begin(), srcVecB.end(), [&] { return int(rng()); }); + eastl::sort(srcVecB.begin(), srcVecB.end()); + } + else if (pattern == P_Predictable) + { + // The data pattern means that a simple/naive algorithm will select 'runLen' values + // from one list, and then 'runLen' values from the other list (alternating back and forth). + // Of course, a merge algorithm that is more complicated might have a different order of + // comparison. + const int runLen = 32; + for (int i = 0; i < ElementCount; i++) + { + int baseValue = ((i / runLen) * 2 * runLen) + (i % (runLen)); + srcVecA[i] = baseValue; + srcVecB[i] = baseValue + runLen; + } + } + + /////////////////////////////// + // Test merge + /////////////////////////////// + for (int i = 0; i < 2; i++) + { + eastl::copy(srcVecA.begin(), srcVecA.end(), stdVecAInt.begin()); + eastl::copy(srcVecB.begin(), srcVecB.end(), stdVecBInt.begin()); + eastl::copy(srcVecA.begin(), srcVecA.end(), eaVecAInt.begin()); + eastl::copy(srcVecB.begin(), srcVecB.end(), eaVecBInt.begin()); + TestMergeStd(stopwatch1, stdVecAInt.begin(), stdVecAInt.end(), stdVecBInt.begin(), stdVecBInt.end(), stdVecOutInt.begin()); + TestMergeEa(stopwatch2, eaVecAInt.begin(), eaVecAInt.end(), eaVecBInt.begin(), eaVecBInt.end(), eaVecOutInt.begin()); + + if (i == 1) + { + Benchmark::AddResult(patternDescriptions[0][pattern], stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + + for (int j = 0; j < ElementCount; j++) + { + stdVecATestObject[j] = TestObject(srcVecA[j]); + stdVecBTestObject[j] = TestObject(srcVecB[j]); + eaVecATestObject[j] = TestObject(srcVecA[j]); + eaVecBTestObject[j] = TestObject(srcVecB[j]); + } + TestMergeStd(stopwatch1, stdVecATestObject.begin(), stdVecATestObject.end(), stdVecBTestObject.begin(), stdVecBTestObject.end(), stdVecOutTestObject.begin()); + TestMergeEa(stopwatch2, eaVecATestObject.begin(), eaVecATestObject.end(), eaVecBTestObject.begin(), eaVecBTestObject.end(), eaVecOutTestObject.begin()); + + if (i == 1) + { + Benchmark::AddResult(patternDescriptions[1][pattern], stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } + } + +} + + + +void BenchmarkAlgorithm() +{ + EASTLTest_Printf("Algorithm\n"); + + EASTLTest_Rand rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + BenchmarkAlgorithm1(rng, stopwatch1, stopwatch2); + BenchmarkAlgorithm2(rng, stopwatch1, stopwatch2); + BenchmarkAlgorithm3(rng, stopwatch1, stopwatch2); + BenchmarkAlgorithm4(rng, stopwatch1, stopwatch2); + BenchmarkAlgorithm5(rng, stopwatch1, stopwatch2); + BenchmarkAlgorithm6(rng, stopwatch1, stopwatch2); + BenchmarkAlgorithm7(rng, stopwatch1, stopwatch2); + BenchmarkAlgorithm8(rng, stopwatch1, stopwatch2); +} + + + + + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkBitset.cpp b/benchmark/source/BenchmarkBitset.cpp new file mode 100644 index 0000000..680622b --- /dev/null +++ b/benchmark/source/BenchmarkBitset.cpp @@ -0,0 +1,366 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifdef _MSC_VER + // Microsoft STL generates warnings. + #pragma warning(disable: 4267) // 'initializing' : conversion from 'size_t' to 'const int', possible loss of data +#endif + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include + + +EA_DISABLE_ALL_VC_WARNINGS() +#include +EA_RESTORE_ALL_VC_WARNINGS() + + +using namespace EA; + + +namespace +{ + template + void TestSet(EA::StdC::Stopwatch& stopwatch, Bitset& b) + { + stopwatch.Restart(); + for(int i = 0; i < 100000; i++) + { + b.set(); + Benchmark::DoNothing(&b); + } + stopwatch.Stop(); + } + + + template + void TestSetIndex(EA::StdC::Stopwatch& stopwatch, Bitset& b, size_t index) + { + stopwatch.Restart(); + for(int i = 0; i < 100000; i++) + { + b.set(index); + Benchmark::DoNothing(&b); + } + stopwatch.Stop(); + } + + + template + void TestReset(EA::StdC::Stopwatch& stopwatch, Bitset& b) + { + stopwatch.Restart(); + for(int i = 0; i < 100000; i++) + { + b.reset(); + Benchmark::DoNothing(&b); + } + stopwatch.Stop(); + } + + + template + void TestFlip(EA::StdC::Stopwatch& stopwatch, Bitset& b) + { + stopwatch.Restart(); + for(int i = 0; i < 100000; i++) + { + b.flip(); + Benchmark::DoNothing(&b); + } + stopwatch.Stop(); + } + + + template + void TestTest(EA::StdC::Stopwatch& stopwatch, Bitset& b, unsigned nANDValue) + { + stopwatch.Restart(); + for(unsigned i = 0; i < 100000; i++) + Benchmark::DoNothing(b.test(i & nANDValue)); // We use & instead of % because the former is always fast due to forced power of 2. + stopwatch.Stop(); + } + + + template + void TestCount(EA::StdC::Stopwatch& stopwatch, Bitset& b) + { + size_t temp = 0; + stopwatch.Restart(); + for(int i = 0; i < 100000; i++) + { + temp += b.count(); + Benchmark::DoNothing(&temp); + } + stopwatch.Stop(); + } + + + template + void TestRightShift(EA::StdC::Stopwatch& stopwatch, Bitset& b, size_t n) + { + size_t temp = 0; + stopwatch.Restart(); + for(int i = 0; i < 100000; i++) + { + b >>= n; + Benchmark::DoNothing(&temp); + } + stopwatch.Stop(); + } + +} // namespace + + + +void BenchmarkBitset() +{ + EASTLTest_Printf("Bitset\n"); + + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { + std::bitset<15> stdBitset15; + eastl::bitset<15> eaBitset15; + + std::bitset<35> stdBitset35; + eastl::bitset<35> eaBitset35; + + std::bitset<75> stdBitset75; + eastl::bitset<75> eaBitset75; + + std::bitset<1500> stdBitset1500; + eastl::bitset<1500> eaBitset1500; + + + for(int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test set() + /////////////////////////////// + + TestSet(stopwatch1, stdBitset15); + TestSet(stopwatch2, eaBitset15); + + if(i == 1) + Benchmark::AddResult("bitset<15>/set()", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSet(stopwatch1, stdBitset35); + TestSet(stopwatch2, eaBitset35); + + if(i == 1) + Benchmark::AddResult("bitset<35>/set()", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSet(stopwatch1, stdBitset75); + TestSet(stopwatch2, eaBitset75); + + if(i == 1) + Benchmark::AddResult("bitset<75>/set()", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSet(stopwatch1, stdBitset1500); + TestSet(stopwatch2, eaBitset1500); + + if(i == 1) + Benchmark::AddResult("bitset<1500>/set()", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test set(index) + /////////////////////////////// + + TestSetIndex(stopwatch1, stdBitset15, 13); + TestSetIndex(stopwatch2, eaBitset15, 13); + + if(i == 1) + Benchmark::AddResult("bitset<15>/set(i)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSetIndex(stopwatch1, stdBitset35, 33); + TestSetIndex(stopwatch2, eaBitset35, 33); + + if(i == 1) + Benchmark::AddResult("bitset<35>/set(i)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSetIndex(stopwatch1, stdBitset75, 73); + TestSetIndex(stopwatch2, eaBitset75, 73); + + if(i == 1) + Benchmark::AddResult("bitset<75>/set(i)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSetIndex(stopwatch1, stdBitset1500, 730); + TestSetIndex(stopwatch2, eaBitset1500, 730); + + if(i == 1) + Benchmark::AddResult("bitset<1500>/set(i)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test reset() + /////////////////////////////// + + TestReset(stopwatch1, stdBitset15); + TestReset(stopwatch2, eaBitset15); + + if(i == 1) + Benchmark::AddResult("bitset<15>/reset", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestReset(stopwatch1, stdBitset35); + TestReset(stopwatch2, eaBitset35); + + if(i == 1) + Benchmark::AddResult("bitset<35>/reset", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestReset(stopwatch1, stdBitset75); + TestReset(stopwatch2, eaBitset75); + + if(i == 1) + Benchmark::AddResult("bitset<75>/reset", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestReset(stopwatch1, stdBitset1500); + TestReset(stopwatch2, eaBitset1500); + + if(i == 1) + Benchmark::AddResult("bitset<1500>/reset", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test flip + /////////////////////////////// + + TestFlip(stopwatch1, stdBitset15); + TestFlip(stopwatch2, eaBitset15); + + if(i == 1) + Benchmark::AddResult("bitset<15>/flip", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFlip(stopwatch1, stdBitset35); + TestFlip(stopwatch2, eaBitset35); + + if(i == 1) + Benchmark::AddResult("bitset<35>/flip", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFlip(stopwatch1, stdBitset75); + TestFlip(stopwatch2, eaBitset75); + + if(i == 1) + Benchmark::AddResult("bitset<75>/flip", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFlip(stopwatch1, stdBitset1500); + TestFlip(stopwatch2, eaBitset1500); + + if(i == 1) + Benchmark::AddResult("bitset<1500>/flip", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test test + /////////////////////////////// + + TestTest(stopwatch1, stdBitset15, 7); + TestTest(stopwatch2, eaBitset15, 7); + + if(i == 1) + Benchmark::AddResult("bitset<15>/test", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestTest(stopwatch1, stdBitset35, 31); + TestTest(stopwatch2, eaBitset35, 31); + + if(i == 1) + Benchmark::AddResult("bitset<35>/test", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestTest(stopwatch1, stdBitset75, 63); + TestTest(stopwatch2, eaBitset75, 63); + + if(i == 1) + Benchmark::AddResult("bitset<75>/test", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestTest(stopwatch1, stdBitset1500, 1023); + TestTest(stopwatch2, eaBitset1500, 1023); + + if(i == 1) + Benchmark::AddResult("bitset<1500>/test", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test count + /////////////////////////////// + + TestCount(stopwatch1, stdBitset15); + TestCount(stopwatch2, eaBitset15); + + if(i == 1) + Benchmark::AddResult("bitset<15>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestCount(stopwatch1, stdBitset35); + TestCount(stopwatch2, eaBitset35); + + if(i == 1) + Benchmark::AddResult("bitset<35>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestCount(stopwatch1, stdBitset75); + TestCount(stopwatch2, eaBitset75); + + if(i == 1) + Benchmark::AddResult("bitset<75>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestCount(stopwatch1, stdBitset1500); + TestCount(stopwatch2, eaBitset1500); + + if(i == 1) + Benchmark::AddResult("bitset<1500>/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test >>= + /////////////////////////////// + + TestRightShift(stopwatch1, stdBitset15, 1); + TestRightShift(stopwatch2, eaBitset15, 1); + + if(i == 1) + Benchmark::AddResult("bitset<15>/>>=/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(), + GetStdSTLType() == kSTLPort ? "STLPort is broken, neglects wraparound check." : NULL); + + TestRightShift(stopwatch1, stdBitset35, 1); + TestRightShift(stopwatch2, eaBitset35, 1); + + if(i == 1) + Benchmark::AddResult("bitset<35>/>>=/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(), + GetStdSTLType() == kSTLPort ? "STLPort is broken, neglects wraparound check." : NULL); + + TestRightShift(stopwatch1, stdBitset75, 1); + TestRightShift(stopwatch2, eaBitset75, 1); + + if(i == 1) + Benchmark::AddResult("bitset<75>/>>=/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(), + GetStdSTLType() == kSTLPort ? "STLPort is broken, neglects wraparound check." : NULL); + + TestRightShift(stopwatch1, stdBitset1500, 1); + TestRightShift(stopwatch2, eaBitset1500, 1); + + if(i == 1) + Benchmark::AddResult("bitset<1500>/>>=/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(), + GetStdSTLType() == kSTLPort ? "STLPort is broken, neglects wraparound check." : NULL); + } + } +} + + + + + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkDeque.cpp b/benchmark/source/BenchmarkDeque.cpp new file mode 100644 index 0000000..d3c69de --- /dev/null +++ b/benchmark/source/BenchmarkDeque.cpp @@ -0,0 +1,342 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #pragma warning(disable: 4350) // behavior change: X called instead of Y +#endif +#include +#include +#include +#include +#include +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +using namespace EA; + + +namespace +{ + struct ValuePair + { + uint32_t key; + uint32_t v; + }; + + struct VPCompare + { + bool operator()(const ValuePair& vp1, const ValuePair& vp2) const + { + return (vp1.key == vp2.key) ? (vp1.v < vp2.v) : (vp1.key < vp2.key); + } + }; + + bool operator<(const ValuePair& vp1, const ValuePair& vp2) + { + return (vp1.key == vp2.key) ? (vp1.v < vp2.v) : (vp1.key < vp2.key); + } + + bool operator==(const ValuePair& vp1, const ValuePair& vp2) + { + return (vp1.key == vp2.key) && (vp1.v == vp2.v); + } +} + + +EASTL_DECLARE_POD(ValuePair) +EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(ValuePair) +EASTL_DECLARE_TRIVIAL_COPY(ValuePair) +EASTL_DECLARE_TRIVIAL_ASSIGN(ValuePair) +EASTL_DECLARE_TRIVIAL_DESTRUCTOR(ValuePair) +EASTL_DECLARE_TRIVIAL_RELOCATE(ValuePair) + + + +typedef std::deque StdDeque; +typedef eastl::deque EaDeque; // What value do we pick for the subarray size to make the comparison fair? Using the default isn't ideal because it results in this test measuring speed efficiency and ignoring memory efficiency. + + + +namespace +{ + template + void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector& intVector) + { + stopwatch.Restart(); + for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + { + const ValuePair vp = { intVector[j], intVector[j] }; + c.push_back(vp); + } + stopwatch.Stop(); + } + + + template + void TestPushFront(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector& intVector) + { + stopwatch.Restart(); + for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + { + const ValuePair vp = { intVector[j], intVector[j] }; + c.push_front(vp); + } + stopwatch.Stop(); + } + + + template + void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c) + { + uint64_t temp = 0; + stopwatch.Restart(); + for(typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++) + temp += c[j].key; + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff)); + } + + + template + void TestIteration(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::iterator it = c.begin(), itEnd = c.end(); + stopwatch.Restart(); + while(it != itEnd) + ++it; + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(*it).key); + + /* Alternative way to measure: + const eastl_size_t n = c.size(); + stopwatch.Restart(); + for(eastl_size_t i = 0; i < n; ++i) + ++it; + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(*it).key); + */ + } + + + template + void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c) + { + // Intentionally use eastl find in order to measure just + // vector access speed and not be polluted by sort speed. + const ValuePair vp = { 0xffffffff, 0 }; + stopwatch.Restart(); + typename Container::iterator it = eastl::find(c.begin(), c.end(), vp); + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(*it).key); + } + + + template + void TestSort(EA::StdC::Stopwatch& stopwatch, Container& c) + { + // Intentionally use eastl sort in order to measure just + // vector access speed and not be polluted by sort speed. + VPCompare vpCompare; + stopwatch.Restart(); + eastl::quick_sort(c.begin(), c.end(), vpCompare); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c[0].key); + } + + + template + void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c) + { + const ValuePair vp = { 0xffffffff, 0 }; + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = 2000, it = c.begin(); j < jEnd; ++j) + { + it = c.insert(it, vp); + + if(it == c.end()) // Try to safely increment the iterator three times. + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + } + + + template + void TestErase(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = 2000, it = c.begin(); j < jEnd; ++j) + { + it = c.erase(it); + + if(it == c.end()) // Try to safely increment the iterator three times. + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + } + +} // namespace + + + +void BenchmarkDeque() +{ + EASTLTest_Printf("Deque\n"); + + EA::UnitTest::RandGenT rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { // Exercise some declarations + int nErrorCount = 0; + ValuePair vp1 = { 0, 0 }, vp2 = { 0, 0 }; + VPCompare c1, c2; + + VERIFY(c1.operator()(vp1, vp2) == c2.operator()(vp1, vp2)); + VERIFY((vp1 < vp2) || (vp1 == vp2) || !(vp1 == vp2)); + } + + { + eastl::vector intVector(100000); + eastl::generate(intVector.begin(), intVector.end(), rng); + + for(int i = 0; i < 2; i++) + { + StdDeque stdDeque; + EaDeque eaDeque; + + + /////////////////////////////// + // Test push_back + /////////////////////////////// + + TestPushBack(stopwatch1, stdDeque, intVector); + TestPushBack(stopwatch2, eaDeque, intVector); + + if(i == 1) + Benchmark::AddResult("deque/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test push_front + /////////////////////////////// + + TestPushFront(stopwatch1, stdDeque, intVector); + TestPushFront(stopwatch2, eaDeque, intVector); + + if(i == 1) + Benchmark::AddResult("deque/push_front", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test operator[] + /////////////////////////////// + + TestBracket(stopwatch1, stdDeque); + TestBracket(stopwatch2, eaDeque); + + if(i == 1) + Benchmark::AddResult("deque/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test iteration + /////////////////////////////// + + TestIteration(stopwatch1, stdDeque); + TestIteration(stopwatch2, eaDeque); + + if(i == 1) + Benchmark::AddResult("deque/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find() + /////////////////////////////// + + TestFind(stopwatch1, stdDeque); + TestFind(stopwatch2, eaDeque); + + if(i == 1) + Benchmark::AddResult("deque/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test sort + /////////////////////////////// + + // Currently VC++ complains about our sort function decrementing std::iterator that is already at begin(). In the strictest sense, + // that's a valid complaint, but we aren't testing std STL here. We will want to revise our sort function eventually. + #if !defined(_MSC_VER) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2) + TestSort(stopwatch1, stdDeque); + TestSort(stopwatch2, eaDeque); + + if(i == 1) + Benchmark::AddResult("deque/sort", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + #endif + + + /////////////////////////////// + // Test insert + /////////////////////////////// + + TestInsert(stopwatch1, stdDeque); + TestInsert(stopwatch2, eaDeque); + + if(i == 1) + Benchmark::AddResult("deque/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase + /////////////////////////////// + + TestErase(stopwatch1, stdDeque); + TestErase(stopwatch2, eaDeque); + + if(i == 1) + Benchmark::AddResult("deque/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } +} + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkHash.cpp b/benchmark/source/BenchmarkHash.cpp new file mode 100644 index 0000000..35470e7 --- /dev/null +++ b/benchmark/source/BenchmarkHash.cpp @@ -0,0 +1,469 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include +#include + + + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + + + +using namespace EA; + + +// HashString8 +// +// We define a string +// +template +struct HashString8 +{ + // Defined for EASTL, STLPort, SGI, etc. and Metrowerks-related hash tables: + size_t operator()(const String& s) const + { + const uint8_t* p = (const uint8_t*) s.c_str(); + uint32_t c, stringHash = UINT32_C(2166136261); + while((c = *p++) != 0) + stringHash = (stringHash * 16777619) ^ c; + return stringHash; + } + + // Defined for Dinkumware-related (e.g. MS STL) hash tables: + bool operator()(const String& s1, const String& s2) const + { + return s1 < s2; + } + + // Defined for Dinkumware-related (e.g. MS STL) hash tables: + enum { + bucket_size = 7, + min_buckets = 8 + }; +}; + + +using StdMapUint32TO = std::unordered_map; +using StdMapStrUint32 = std::unordered_map>; + +using EaMapUint32TO = eastl::hash_map; +using EaMapStrUint32 = eastl::hash_map>; + + +namespace +{ + template + void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + c.insert(pArrayBegin, pArrayEnd); + stopwatch.Stop(); + } + + + template + void TestIteration(EA::StdC::Stopwatch& stopwatch, const Container& c, const Value& findValue) + { + stopwatch.Restart(); + typename Container::const_iterator it = eastl::find(c.begin(), c.end(), findValue); // It shouldn't matter what find implementation we use here, as it merely iterates values. + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%p", &*it); + } + + + template + void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + Benchmark::DoNothing(&c[pArrayBegin->first]); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + typename Container::iterator it = c.find(pArrayBegin->first); + Benchmark::DoNothing(&it); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestFindAsStd(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + typename Container::iterator it = c.find(pArrayBegin->first.c_str()); + Benchmark::DoNothing(&it); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestFindAsEa(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + typename Container::iterator it = c.find_as(pArrayBegin->first.c_str()); + Benchmark::DoNothing(&it); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestCount(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + typename Container::size_type temp = 0; + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + temp += c.count(pArrayBegin->first); + ++pArrayBegin; + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp); + } + + + template + void TestEraseValue(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + c.erase(pArrayBegin->first); + ++pArrayBegin; + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + } + + + template + void TestErasePosition(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = c.size() / 3, it = c.begin(); j < jEnd; ++j) + { + // The erase fucntion is supposed to return an iterator, but the C++ standard was + // not initially clear about it and some STL implementations don't do it correctly. + #if (defined(_MSC_VER) || defined(_CPPLIB_VER)) // _CPPLIB_VER is something defined by Dinkumware STL. + it = c.erase(it); + #else + // This pathway may execute at a slightly different speed than the + // standard behaviour, but that's fine for the benchmark because the + // benchmark is measuring the speed of erasing while iterating, and + // however it needs to get done by the given STL is how it is measured. + const typename Container::iterator itErase(it++); + c.erase(itErase); + #endif + + ++it; + ++it; + } + + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p %p", &c, &it); + } + + + template + void TestEraseRange(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it1 = c.begin(); + typename Container::iterator it2 = c.begin(); + + for(j = 0, jEnd = c.size() / 3; j < jEnd; ++j) + ++it2; + + stopwatch.Restart(); + c.erase(it1, it2); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p %p %p", &c, &it1, &it2); + } + + + template + void TestClear(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + c.clear(); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + } + + +} // namespace + + + +void BenchmarkHash() +{ + EASTLTest_Printf("HashMap\n"); + + EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { + eastl::vector< std::pair > stdVectorUT(10000); + eastl::vector< eastl::pair > eaVectorUT(10000); + + eastl::vector< std::pair< std::string, uint32_t> > stdVectorSU(10000); + eastl::vector< eastl::pair > eaVectorSU(10000); + + for(eastl_size_t i = 0, iEnd = stdVectorUT.size(); i < iEnd; i++) + { + const uint32_t n1 = rng.RandLimit((uint32_t)(iEnd / 2)); + const uint32_t n2 = rng.RandValue(); + + stdVectorUT[i] = std::pair(n1, TestObject(n2)); + eaVectorUT[i] = eastl::pair(n1, TestObject(n2)); + + char str_n1[32]; + sprintf(str_n1, "%u", (unsigned)n1); + + stdVectorSU[i] = std::pair< std::string, uint32_t>( std::string(str_n1), n2); + eaVectorSU[i] = eastl::pair(eastl::string(str_n1), n2); + } + + for(int i = 0; i < 2; i++) + { + StdMapUint32TO stdMapUint32TO; + EaMapUint32TO eaMapUint32TO; + + StdMapStrUint32 stdMapStrUint32; + EaMapStrUint32 eaMapStrUint32; + + + /////////////////////////////// + // Test insert(const value_type&) + /////////////////////////////// + + TestInsert(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size()); + TestInsert(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestInsert(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size()); + TestInsert(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test iteration + /////////////////////////////// + + TestIteration(stopwatch1, stdMapUint32TO, StdMapUint32TO::value_type(9999999, TestObject(9999999))); + TestIteration(stopwatch2, eaMapUint32TO, EaMapUint32TO::value_type(9999999, TestObject(9999999))); + + if(i == 1) + Benchmark::AddResult("hash_map/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestIteration(stopwatch1, stdMapStrUint32, StdMapStrUint32::value_type( std::string("9999999"), 9999999)); + TestIteration(stopwatch2, eaMapStrUint32, EaMapStrUint32::value_type(eastl::string("9999999"), 9999999)); + + if(i == 1) + Benchmark::AddResult("hash_map/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test operator[] + /////////////////////////////// + + TestBracket(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size()); + TestBracket(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestBracket(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size()); + TestBracket(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find + /////////////////////////////// + + TestFind(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size()); + TestFind(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFind(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size()); + TestFind(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find_as + /////////////////////////////// + + TestFindAsStd(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size()); + TestFindAsEa(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/find_as/char*", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test count + /////////////////////////////// + + TestCount(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size()); + TestCount(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestCount(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size()); + TestCount(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size()); + + if(i == 1) + Benchmark::AddResult("hash_map/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase(const key_type& key) + /////////////////////////////// + + TestEraseValue(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + (stdVectorUT.size() / 2)); + TestEraseValue(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + (eaVectorUT.size() / 2)); + + if(i == 1) + Benchmark::AddResult("hash_map/erase val", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestEraseValue(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + (stdVectorSU.size() / 2)); + TestEraseValue(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + (eaVectorSU.size() / 2)); + + if(i == 1) + Benchmark::AddResult("hash_map/erase val", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase(iterator position) + /////////////////////////////// + + TestErasePosition(stopwatch1, stdMapUint32TO); + TestErasePosition(stopwatch2, eaMapUint32TO); + + if(i == 1) + Benchmark::AddResult("hash_map/erase pos", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestErasePosition(stopwatch1, stdMapStrUint32); + TestErasePosition(stopwatch2, eaMapStrUint32); + + if(i == 1) + Benchmark::AddResult("hash_map/erase pos", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase(iterator first, iterator last) + /////////////////////////////// + + TestEraseRange(stopwatch1, stdMapUint32TO); + TestEraseRange(stopwatch2, eaMapUint32TO); + + if(i == 1) + Benchmark::AddResult("hash_map/erase range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestEraseRange(stopwatch1, stdMapStrUint32); + TestEraseRange(stopwatch2, eaMapStrUint32); + + if(i == 1) + Benchmark::AddResult("hash_map/erase range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test clear() + /////////////////////////////// + + // Clear the containers of whatever they happen to have. We want the containers to have full data. + TestClear(stopwatch1, stdMapUint32TO); + TestClear(stopwatch2, eaMapUint32TO); + TestClear(stopwatch1, stdMapStrUint32); + TestClear(stopwatch2, eaMapStrUint32); + + // Re-set the containers with full data. + TestInsert(stopwatch1, stdMapUint32TO, stdVectorUT.data(), stdVectorUT.data() + stdVectorUT.size()); + TestInsert(stopwatch2, eaMapUint32TO, eaVectorUT.data(), eaVectorUT.data() + eaVectorUT.size()); + TestInsert(stopwatch1, stdMapStrUint32, stdVectorSU.data(), stdVectorSU.data() + stdVectorSU.size()); + TestInsert(stopwatch2, eaMapStrUint32, eaVectorSU.data(), eaVectorSU.data() + eaVectorSU.size()); + + // Now clear the data again, this time measuring it. + TestClear(stopwatch1, stdMapUint32TO); + TestClear(stopwatch2, eaMapUint32TO); + + if(i == 1) + Benchmark::AddResult("hash_map/clear", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestClear(stopwatch1, stdMapStrUint32); + TestClear(stopwatch2, eaMapStrUint32); + + if(i == 1) + Benchmark::AddResult("hash_map/clear", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + } + } +} + + + + + + + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkHeap.cpp b/benchmark/source/BenchmarkHeap.cpp new file mode 100644 index 0000000..635cf31 --- /dev/null +++ b/benchmark/source/BenchmarkHeap.cpp @@ -0,0 +1,238 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #pragma warning(disable: 4350) // behavior change: X called instead of Y +#endif +#include +#include +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +using namespace EA; + + +namespace +{ + template + void TestMakeHeapStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last) + { + stopwatch.Restart(); + std::make_heap(first, last); + stopwatch.Stop(); + } + + template + void TestMakeHeapEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last) + { + stopwatch.Restart(); + eastl::make_heap(first, last); + stopwatch.Stop(); + } + + + + template + void TestPushHeapStd(EA::StdC::Stopwatch& stopwatch, Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2) + { + stopwatch.Restart(); + while(first2 != last2) + { + *last1++ = *first2++; + std::push_heap(first1, last1); + } + stopwatch.Stop(); + } + + template + void TestPushHeapEa(EA::StdC::Stopwatch& stopwatch, Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2) + { + stopwatch.Restart(); + while(first2 != last2) + { + *last1++ = *first2++; + eastl::push_heap(first1, last1); + } + stopwatch.Stop(); + } + + + + template + void TestPopHeapStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, Iterator popEnd) + { + stopwatch.Restart(); + while(last != popEnd) + std::pop_heap(first, last--); + stopwatch.Stop(); + } + + template + void TestPopHeapEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last, Iterator popEnd) + { + stopwatch.Restart(); + while(last != popEnd) + eastl::pop_heap(first, last--); + stopwatch.Stop(); + } + + + + template + void TestSortHeapStd(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last) + { + stopwatch.Restart(); + std::sort_heap(first, last); + stopwatch.Stop(); + } + + template + void TestSortHeapEa(EA::StdC::Stopwatch& stopwatch, Iterator first, Iterator last) + { + stopwatch.Restart(); + eastl::sort_heap(first, last); + stopwatch.Stop(); + } + +} // namespace + + + +void BenchmarkHeap() +{ + EASTLTest_Printf("Heap (Priority Queue)\n"); + + EA::UnitTest::RandGenT rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { + const int kArraySize = 100000; + + // uint32[] + uint32_t* const pIntArrayS = new uint32_t[kArraySize * 2]; // * 2 because we will be adding more items via push_heap. + uint32_t* const pIntArrayE = new uint32_t[kArraySize * 2]; // S means Std; E means EA. + uint32_t* const pIntArray2 = new uint32_t[kArraySize]; // This will be used for pop_heap. + + eastl::generate(pIntArrayS, pIntArrayS + kArraySize, rng); + eastl::copy(pIntArrayS, pIntArrayS + kArraySize, pIntArrayE); + eastl::copy(pIntArrayS, pIntArrayS + kArraySize, pIntArray2); + + + // vector + std::vector stdVectorTO(kArraySize * 2); + std::vector stdVectorTO2(kArraySize); + eastl::vector eaVectorTO(kArraySize * 2); + eastl::vector eaVectorTO2(kArraySize); + + for(int k = 0; k < kArraySize; k++) + { + stdVectorTO[k] = TestObject(pIntArrayS[k]); + stdVectorTO2[k] = TestObject(pIntArrayS[k]); + eaVectorTO[k] = TestObject(pIntArrayS[k]); + eaVectorTO2[k] = TestObject(pIntArrayS[k]); + } + + + for(int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test make_heap + /////////////////////////////// + + TestMakeHeapStd(stopwatch1, pIntArrayS, pIntArrayS + kArraySize); + TestMakeHeapEa (stopwatch2, pIntArrayE, pIntArrayE + kArraySize); + + if(i == 1) + Benchmark::AddResult("heap (uint32_t[])/make_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestMakeHeapStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.begin() + kArraySize); + TestMakeHeapEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.begin() + kArraySize); + + if(i == 1) + Benchmark::AddResult("heap (vector)/make_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test push_heap + /////////////////////////////// + + TestPushHeapStd(stopwatch1, pIntArrayS, pIntArrayS + kArraySize, pIntArray2, pIntArray2 + kArraySize); + TestPushHeapEa (stopwatch2, pIntArrayE, pIntArrayE + kArraySize, pIntArray2, pIntArray2 + kArraySize); + + if(i == 1) + Benchmark::AddResult("heap (uint32_t[])/push_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestPushHeapStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.begin() + kArraySize, stdVectorTO2.begin(), stdVectorTO2.begin() + kArraySize); + TestPushHeapEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.begin() + kArraySize, eaVectorTO2.begin(), eaVectorTO2.begin() + kArraySize); + + if(i == 1) + Benchmark::AddResult("heap (vector)/push_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test pop_heap + /////////////////////////////// + + TestPopHeapStd(stopwatch1, pIntArrayS, pIntArrayS + (kArraySize * 2), pIntArrayS + kArraySize); // * 2 because we used push_heap above to add more items. + TestPopHeapEa (stopwatch2, pIntArrayE, pIntArrayE + (kArraySize * 2), pIntArrayE + kArraySize); + + if(i == 1) + Benchmark::AddResult("heap (uint32_t[])/pop_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestPopHeapStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.begin() + (kArraySize * 2), stdVectorTO.begin() + kArraySize); // * 2 because we used push_heap above to add more items. + TestPopHeapEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.begin() + (kArraySize * 2), eaVectorTO.begin() + kArraySize); + + if(i == 1) + Benchmark::AddResult("heap (vector)/pop_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test sort_heap + /////////////////////////////// + + TestSortHeapStd(stopwatch1, pIntArrayS, pIntArrayS + kArraySize); + TestSortHeapEa (stopwatch2, pIntArrayE, pIntArrayE + kArraySize); + + if(i == 1) + Benchmark::AddResult("heap (uint32_t[])/sort_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSortHeapStd(stopwatch1, stdVectorTO.begin(), stdVectorTO.begin() + kArraySize); + TestSortHeapEa (stopwatch2, eaVectorTO.begin(), eaVectorTO.begin() + kArraySize); + + if(i == 1) + Benchmark::AddResult("heap (vector)/sort_heap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + + delete[] pIntArrayS; + delete[] pIntArrayE; + delete[] pIntArray2; + } +} + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkList.cpp b/benchmark/source/BenchmarkList.cpp new file mode 100644 index 0000000..1d22ad8 --- /dev/null +++ b/benchmark/source/BenchmarkList.cpp @@ -0,0 +1,382 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #pragma warning(disable: 4555) // expression has no effect; expected expression with side-effect + #pragma warning(disable: 4350) // behavior change: X called instead of Y +#endif +#include +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +using namespace EA; +using namespace eastl; + + + +typedef std::list StdListTO; +typedef eastl::list EaListTO; + + + +namespace +{ + void DoNothing(void*) + { + // Empty + } + + + template + void TestCtorIterator(EA::StdC::Stopwatch& stopwatch, const ContainerSource& cs, Container*) // Dummy Container argument because of GCC 2.X limitations. + { + stopwatch.Restart(); + Container c(cs.begin(), cs.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX); + } + + + template + void TestCtorN(EA::StdC::Stopwatch& stopwatch, Container*) // Dummy Container argument because of GCC 2.X limitations. + { + stopwatch.Restart(); + Container c(10000); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX); + } + + + template + void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c, const TestObject* pTOBegin, const TestObject* const pTOEnd) + { + stopwatch.Restart(); + while(pTOBegin != pTOEnd) + c.push_back(*pTOBegin++); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX); + } + + + template + void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c, const TestObject* pTOBegin, const TestObject* const pTOEnd) + { + typename Container::iterator it = c.begin(); + stopwatch.Restart(); + while(pTOBegin != pTOEnd) + { + it = c.insert(it, *pTOBegin++); + + if(++it == c.end()) // Try to safely increment the iterator a couple times + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX); + } + + + template + void TestSize(EA::StdC::Stopwatch& stopwatch, Container& c, void (*pFunction)(...)) + { + stopwatch.Restart(); + for(int i = 0; (i < 10000) && c.size(); i++) + (*pFunction)(&c); + stopwatch.Stop(); + } + + + template + void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c, const TestObject& to) + { + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + stopwatch.Restart(); + typename Container::iterator it = eastl::find(c.begin(), c.end(), to); + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%d", (*it).mX); + } + + + template + void TestReverse(EA::StdC::Stopwatch& stopwatch, Container& c) + { + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + stopwatch.Restart(); + c.reverse(); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX); + } + + + template + void TestRemove(EA::StdC::Stopwatch& stopwatch, Container& c, const TestObject* pTOBegin, const TestObject* const pTOEnd) + { + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + stopwatch.Restart(); + while(pTOBegin != pTOEnd) + c.remove(*pTOBegin++); + stopwatch.Stop(); + if(!c.empty()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX); + } + + + template + void TestSplice(EA::StdC::Stopwatch& stopwatch, Container& c, Container& cSource) + { + typename Container::iterator it = c.begin(); + int i = 0, iEnd = (int)cSource.size() - 5; + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + stopwatch.Restart(); + while(i++ != iEnd) + c.splice(it, cSource, cSource.begin()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX); + } + + + template + void TestErase(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::iterator it = c.begin(); + int i = 0, iEnd = (int)c.size() - 5; + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + stopwatch.Restart(); + while(i++ != iEnd) + { + it = c.erase(it); + + if(it == c.end()) // Try to safely increment the iterator a couple times + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.back().mX); + } + +} // namespace + + + + +void BenchmarkList() +{ + EASTLTest_Printf("List\n"); + + EASTLTest_Rand rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + EaListTO eaListTO_1(1); + EaListTO eaListTO_10(10); + EaListTO eaListTO_100(100); + StdListTO stdListTO_1(1); + StdListTO stdListTO_10(10); + StdListTO stdListTO_100(100); + + { + char buffer[32]; + sprintf(buffer, "%p", &DoNothing); + } + + { + eastl::vector toVector(100000); + for(eastl_size_t i = 0, iEnd = toVector.size(); i < iEnd; ++i) + toVector[i] = TestObject((int)i); + random_shuffle(toVector.begin(), toVector.end(), rng); + + + for(int i = 0; i < 2; i++) + { + StdListTO stdListTO; + EaListTO eaListTO; + + + /////////////////////////////// + // Test list(InputIterator first, InputIterator last) + /////////////////////////////// + + TestCtorIterator(stopwatch1, toVector, &stdListTO); + TestCtorIterator(stopwatch2, toVector, &eaListTO); + + if(i == 1) + Benchmark::AddResult("list/ctor(it)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test list(size_type n) + /////////////////////////////// + + TestCtorN(stopwatch1, &stdListTO); + TestCtorN(stopwatch2, &eaListTO); + + if(i == 1) + Benchmark::AddResult("list/ctor(n)", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + + /////////////////////////////// + // Test push_back() + /////////////////////////////// + + TestPushBack(stopwatch1, stdListTO, toVector.data(), toVector.data() + toVector.size()); + TestPushBack(stopwatch2, eaListTO, toVector.data(), toVector.data() + toVector.size()); + + if(i == 1) + Benchmark::AddResult("list/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + + /////////////////////////////// + // Test insert() + /////////////////////////////// + + TestInsert(stopwatch1, stdListTO, toVector.data(), toVector.data() + toVector.size()); + TestInsert(stopwatch2, eaListTO, toVector.data(), toVector.data() + toVector.size()); + + if(i == 1) + Benchmark::AddResult("list/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + + /////////////////////////////// + // Test size() + /////////////////////////////// + + TestSize(stopwatch1, stdListTO_1, Benchmark::DoNothing); + TestSize(stopwatch2, eaListTO_1, Benchmark::DoNothing); + + if(i == 1) + Benchmark::AddResult("list/size/1", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSize(stopwatch1, stdListTO_10, Benchmark::DoNothing); + TestSize(stopwatch2, eaListTO_10, Benchmark::DoNothing); + + if(i == 1) + Benchmark::AddResult("list/size/10", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime() + #if !EASTL_LIST_SIZE_CACHE + , "EASTL is configured to not cache the list size." + #endif + ); + + TestSize(stopwatch1, stdListTO_100, Benchmark::DoNothing); + TestSize(stopwatch2, eaListTO_100, Benchmark::DoNothing); + + if(i == 1) + Benchmark::AddResult("list/size/100", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime() + #if !EASTL_LIST_SIZE_CACHE + , "EASTL is configured to not cache the list size." + #endif + ); + + + + /////////////////////////////// + // Test find() + /////////////////////////////// + + TestFind(stopwatch1, stdListTO, TestObject(99999999)); + TestFind(stopwatch2, eaListTO, TestObject(99999999)); + + if(i == 1) + Benchmark::AddResult("list/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + + /////////////////////////////// + // Test reverse() + /////////////////////////////// + + TestReverse(stopwatch1, stdListTO); + TestReverse(stopwatch2, eaListTO); + + if(i == 1) + Benchmark::AddResult("list/reverse", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + + /////////////////////////////// + // Test remove() + /////////////////////////////// + + random_shuffle(toVector.begin(), toVector.end(), rng); + TestRemove(stopwatch1, stdListTO, &toVector[0], &toVector[20]); + TestRemove(stopwatch2, eaListTO, &toVector[0], &toVector[20]); + + if(i == 1) + Benchmark::AddResult("list/remove", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + + /////////////////////////////// + // Test splice() + /////////////////////////////// + StdListTO listCopyStd(stdListTO); + EaListTO listCopyEa(eaListTO); + + TestSplice(stopwatch1, stdListTO, listCopyStd); + TestSplice(stopwatch2, eaListTO, listCopyEa); + + if(i == 1) + Benchmark::AddResult("list/splice", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + + /////////////////////////////// + // Test erase() + /////////////////////////////// + + TestErase(stopwatch1, stdListTO); + TestErase(stopwatch2, eaListTO); + + if(i == 1) + Benchmark::AddResult("list/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } +} + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkMap.cpp b/benchmark/source/BenchmarkMap.cpp new file mode 100644 index 0000000..d2fc35e --- /dev/null +++ b/benchmark/source/BenchmarkMap.cpp @@ -0,0 +1,382 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + + +using namespace EA; + + +typedef std::map StdMapTOUint32; +typedef eastl::map EaMapTOUint32; + + +namespace +{ + template + void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd, const Value& highValue) + { + stopwatch.Restart(); + c.insert(pArrayBegin, pArrayEnd); + stopwatch.Stop(); + c.insert(highValue); + } + + + template + void TestIteration(EA::StdC::Stopwatch& stopwatch, const Container& c, const Value& findValue) + { + stopwatch.Restart(); + typename Container::const_iterator it = eastl::find(c.begin(), c.end(), findValue); // It shouldn't matter what find implementation we use here, as it merely iterates values. + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%p", &*it); + } + + + template + void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + Benchmark::DoNothing(c[pArrayBegin->first]); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + Benchmark::DoNothing(c.find(pArrayBegin->first)->second); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestCount(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + typename Container::size_type temp = 0; + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + temp += c.count(pArrayBegin->first); + ++pArrayBegin; + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp); + } + + + template + void TestLowerBound(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + Benchmark::DoNothing(c.lower_bound(pArrayBegin->first)->second); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestUpperBound(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + Benchmark::DoNothing(c.upper_bound(pArrayBegin->first)->second); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestEqualRange(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + Benchmark::DoNothing(c.equal_range(pArrayBegin->first).second->second); + ++pArrayBegin; + } + stopwatch.Stop(); + } + + + template + void TestEraseValue(EA::StdC::Stopwatch& stopwatch, Container& c, const Value* pArrayBegin, const Value* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + c.erase(pArrayBegin->first); + ++pArrayBegin; + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + } + + + template + void TestErasePosition(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = c.size() / 3, it = c.begin(); j < jEnd; ++j) + { + // The erase fucntion is supposed to return an iterator, but the C++ standard was + // not initially clear about it and some STL implementations don't do it correctly. + #if (((defined(_MSC_VER) || defined(_CPPLIB_VER)) && !defined(_HAS_STRICT_CONFORMANCE))) // _CPPLIB_VER is something defined by Dinkumware STL. + it = c.erase(it); // Standard behavior. + #else + // This pathway may execute at a slightly different speed than the + // standard behaviour, but that's fine for the benchmark because the + // benchmark is measuring the speed of erasing while iterating, and + // however it needs to get done by the given STL is how it is measured. + const typename Container::iterator itErase(it++); + c.erase(itErase); + #endif + + ++it; + ++it; + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p %p", &c, &it); + } + + + template + void TestEraseRange(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it1 = c.begin(); + typename Container::iterator it2 = c.begin(); + + for(j = 0, jEnd = c.size() / 3; j < jEnd; ++j) + ++it2; + + stopwatch.Restart(); + c.erase(it1, it2); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%p %p %p", &c, &it1, &it2); + } + + + template + void TestClear(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + c.clear(); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + } + + +} // namespace + + + +void BenchmarkMap() +{ + EASTLTest_Printf("Map\n"); + + EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { + eastl::vector< std::pair > stdVector(10000); + eastl::vector< eastl::pair > eaVector(10000); + + for(eastl_size_t i = 0, iEnd = stdVector.size(); i < iEnd; i++) + { + const uint32_t n1 = rng.RandLimit(((uint32_t)iEnd / 2)); + const uint32_t n2 = rng.RandValue(); + + stdVector[i] = std::pair(TestObject(n1), n2); + eaVector[i] = eastl::pair(TestObject(n1), n2); + } + + for(int i = 0; i < 2; i++) + { + StdMapTOUint32 stdMapTOUint32; + EaMapTOUint32 eaMapTOUint32; + + + /////////////////////////////// + // Test insert(const value_type&) + /////////////////////////////// + const std::pair stdHighValue(TestObject(0x7fffffff), 0x7fffffff); + const eastl::pair eaHighValue(TestObject(0x7fffffff), 0x7fffffff); + + TestInsert(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size(), stdHighValue); + TestInsert(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size(), eaHighValue); + + if(i == 1) + Benchmark::AddResult("map/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test iteration + /////////////////////////////// + + TestIteration(stopwatch1, stdMapTOUint32, StdMapTOUint32::value_type(TestObject(9999999), 9999999)); + TestIteration(stopwatch2, eaMapTOUint32, EaMapTOUint32::value_type(TestObject(9999999), 9999999)); + + if(i == 1) + Benchmark::AddResult("map/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test operator[] + /////////////////////////////// + + TestBracket(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size()); + TestBracket(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size()); + + if(i == 1) + Benchmark::AddResult("map/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find + /////////////////////////////// + + TestFind(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size()); + TestFind(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size()); + + if(i == 1) + Benchmark::AddResult("map/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test count + /////////////////////////////// + + TestCount(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size()); + TestCount(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size()); + + if(i == 1) + Benchmark::AddResult("map/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test lower_bound + /////////////////////////////// + + TestLowerBound(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size()); + TestLowerBound(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size()); + + if(i == 1) + Benchmark::AddResult("map/lower_bound", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test upper_bound + /////////////////////////////// + + TestUpperBound(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size()); + TestUpperBound(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size()); + + if(i == 1) + Benchmark::AddResult("map/upper_bound", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test equal_range + /////////////////////////////// + + TestEqualRange(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + stdVector.size()); + TestEqualRange(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + eaVector.size()); + + if(i == 1) + Benchmark::AddResult("map/equal_range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase(const key_type& key) + /////////////////////////////// + + TestEraseValue(stopwatch1, stdMapTOUint32, stdVector.data(), stdVector.data() + (stdVector.size() / 2)); + TestEraseValue(stopwatch2, eaMapTOUint32, eaVector.data(), eaVector.data() + (eaVector.size() / 2)); + + if(i == 1) + Benchmark::AddResult("map/erase/key", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase(iterator position) + /////////////////////////////// + + TestErasePosition(stopwatch1, stdMapTOUint32); + TestErasePosition(stopwatch2, eaMapTOUint32); + + if(i == 1) + Benchmark::AddResult("map/erase/pos", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(), + GetStdSTLType() == kSTLMS ? "MS uses a code bloating implementation of erase." : NULL); + + + /////////////////////////////// + // Test erase(iterator first, iterator last) + /////////////////////////////// + + TestEraseRange(stopwatch1, stdMapTOUint32); + TestEraseRange(stopwatch2, eaMapTOUint32); + + if(i == 1) + Benchmark::AddResult("map/erase/range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test clear() + /////////////////////////////// + + TestClear(stopwatch1, stdMapTOUint32); + TestClear(stopwatch2, eaMapTOUint32); + + if(i == 1) + Benchmark::AddResult("map/clear", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + } + } +} + + + + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkSet.cpp b/benchmark/source/BenchmarkSet.cpp new file mode 100644 index 0000000..4a58b1a --- /dev/null +++ b/benchmark/source/BenchmarkSet.cpp @@ -0,0 +1,353 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + + +using namespace EA; + + +typedef std::set StdSetUint32; +typedef eastl::set EaSetUint32; + + +namespace +{ + template + void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd) + { + stopwatch.Restart(); + c.insert(pArrayBegin, pArrayEnd); + stopwatch.Stop(); + + // Intentionally push back a high uint32_t value. We do this so that + // later upper_bound, lower_bound and equal_range never return end(). + c.insert(0xffffffff); + } + + + template + void TestIteration(EA::StdC::Stopwatch& stopwatch, const Container& c) + { + stopwatch.Restart(); + typename Container::const_iterator it = eastl::find(c.begin(), c.end(), uint32_t(9999999)); + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)*it); + } + + + template + void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd) + { + uint32_t temp = 0; + typename Container::iterator it; + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + it = c.find(*pArrayBegin++); + temp += *it; + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp); + } + + + template + void TestCount(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd) + { + typename Container::size_type temp = 0; + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + temp += c.count(*pArrayBegin++); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp); + } + + + template + void TestLowerBound(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd) + { + uint32_t temp = 0; + typename Container::iterator it; + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + it = c.lower_bound(*pArrayBegin++); + temp += *it; // We know that it != end because earlier we inserted 0xffffffff. + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp); + } + + + template + void TestUpperBound(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd) + { + uint32_t temp = 0; + typename Container::iterator it; + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + it = c.upper_bound(*pArrayBegin++); + temp += *it; // We know that it != end because earlier we inserted 0xffffffff. + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp); + } + + + template + void TestEqualRange(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd) + { + uint32_t temp = 0; + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + { + temp += *(c.equal_range(*pArrayBegin++).first); // We know that it != end because earlier we inserted 0xffffffff. + } + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp); + } + + + template + void TestEraseValue(EA::StdC::Stopwatch& stopwatch, Container& c, const uint32_t* pArrayBegin, const uint32_t* pArrayEnd) + { + stopwatch.Restart(); + while(pArrayBegin != pArrayEnd) + c.erase(*pArrayBegin++); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + } + + + template + void TestErasePosition(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = c.size() / 3, it = c.begin(); j < jEnd; ++j) + { + // The erase fucntion is supposed to return an iterator, but the C++ standard was + // not initially clear about it and some STL implementations don't do it correctly. + #if (((defined(_MSC_VER) || defined(_CPPLIB_VER)) && !defined(_HAS_STRICT_CONFORMANCE))) // _CPPLIB_VER is something defined by Dinkumware STL. + it = c.erase(it); + #else + // This pathway may execute at a slightly different speed than the + // standard behaviour, but that's fine for the benchmark because the + // benchmark is measuring the speed of erasing while iterating, and + // however it needs to get done by the given STL is how it is measured. + const typename Container::iterator itErase(it++); + c.erase(itErase); + #endif + + ++it; + ++it; + } + stopwatch.Stop(); + } + + + template + void TestEraseRange(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it1 = c.begin(); + typename Container::iterator it2 = c.begin(); + + for(j = 0, jEnd = c.size() / 3; j < jEnd; ++j) + ++it2; + + stopwatch.Restart(); + c.erase(it1, it2); + stopwatch.Stop(); + } + + + template + void TestClear(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + c.clear(); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)c.size()); + } + + +} // namespace + + + +void BenchmarkSet() +{ + EASTLTest_Printf("Set\n"); + + EA::UnitTest::Rand rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { + eastl::vector intVector(10000); + for(eastl_size_t i = 0, iEnd = intVector.size(); i < iEnd; i++) + intVector[i] = (uint32_t)rng.RandLimit(((uint32_t)iEnd / 2)); // This will result in duplicates and even a few triplicates. + + for(int i = 0; i < 2; i++) + { + StdSetUint32 stdSetUint32; + EaSetUint32 eaSetUint32; + + + /////////////////////////////// + // Test insert(const value_type&) + /////////////////////////////// + + TestInsert(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size()); + TestInsert(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size()); + + if(i == 1) + Benchmark::AddResult("set/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test iteration + /////////////////////////////// + + TestIteration(stopwatch1, stdSetUint32); + TestIteration(stopwatch2, eaSetUint32); + + if(i == 1) + Benchmark::AddResult("set/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find + /////////////////////////////// + + TestFind(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size()); + TestFind(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size()); + + if(i == 1) + Benchmark::AddResult("set/find", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test count + /////////////////////////////// + + TestCount(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size()); + TestCount(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size()); + + if(i == 1) + Benchmark::AddResult("set/count", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test lower_bound + /////////////////////////////// + + TestLowerBound(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size()); + TestLowerBound(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size()); + + if(i == 1) + Benchmark::AddResult("set/lower_bound", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test upper_bound + /////////////////////////////// + + TestUpperBound(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size()); + TestUpperBound(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size()); + + if(i == 1) + Benchmark::AddResult("set/upper_bound", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test equal_range + /////////////////////////////// + + TestEqualRange(stopwatch1, stdSetUint32, intVector.data(), intVector.data() + intVector.size()); + TestEqualRange(stopwatch2, eaSetUint32, intVector.data(), intVector.data() + intVector.size()); + + if(i == 1) + Benchmark::AddResult("set/equal_range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase(const key_type& key) + /////////////////////////////// + + TestEraseValue(stopwatch1, stdSetUint32, &intVector[0], &intVector[intVector.size() / 2]); + TestEraseValue(stopwatch2, eaSetUint32, &intVector[0], &intVector[intVector.size() / 2]); + + if(i == 1) + Benchmark::AddResult("set/erase/val", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase(iterator position) + /////////////////////////////// + + TestErasePosition(stopwatch1, stdSetUint32); + TestErasePosition(stopwatch2, eaSetUint32); + + if(i == 1) + Benchmark::AddResult("set/erase/pos", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime(), + GetStdSTLType() == kSTLMS ? "MS uses a code bloating implementation of erase." : NULL); + + + /////////////////////////////// + // Test erase(iterator first, iterator last) + /////////////////////////////// + + TestEraseRange(stopwatch1, stdSetUint32); + TestEraseRange(stopwatch2, eaSetUint32); + + if(i == 1) + Benchmark::AddResult("set/erase range", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test clear() + /////////////////////////////// + + TestClear(stopwatch1, stdSetUint32); + TestClear(stopwatch2, eaSetUint32); + + if(i == 1) + Benchmark::AddResult("set/clear", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + } + } +} + + + + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkSort.cpp b/benchmark/source/BenchmarkSort.cpp new file mode 100644 index 0000000..ccd2f43 --- /dev/null +++ b/benchmark/source/BenchmarkSort.cpp @@ -0,0 +1,1399 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include +#include +#include +#include +#include "EASTLBenchmark.h" +#include "EASTLTest.h" + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + + +using namespace EA; + + +namespace +{ + struct ValuePair + { + uint32_t key; + uint32_t v; + }; + + struct VPCompare + { + bool operator()(const ValuePair& vp1, const ValuePair& vp2) const + { + // return *(const uint64_t*)&vp1 < *(const uint64_t*)&vp2; + return (vp1.key == vp2.key) ? (vp1.v < vp2.v) : (vp1.key < vp2.key); + } + }; + + bool operator<(const ValuePair& vp1, const ValuePair& vp2) + { + // return *(const uint64_t*)&vp1 < *(const uint64_t*)&vp2; + return (vp1.key == vp2.key) ? (vp1.v < vp2.v) : (vp1.key < vp2.key); + } + + bool operator==(const ValuePair& vp1, const ValuePair& vp2) + { + // return *(const uint64_t*)&vp1 == *(const uint64_t*)&vp2; + return (vp1.key == vp2.key) && (vp1.v == vp2.v); + } +} + +// VPCompareC +// Useful for testing the the C qsort function. +int VPCompareC(const void* elem1, const void* elem2) +{ + return (int)(*(const uint64_t*)elem1 - *(const uint64_t*)elem2); +} + + +typedef std::vector StdVectorVP; +typedef eastl::vector EaVectorVP; + +typedef std::vector StdVectorInt; +typedef eastl::vector EaVectorInt; + +typedef std::vector StdVectorTO; +typedef eastl::vector EaVectorTO; + + +namespace +{ + #ifndef EA_PREFIX_NO_INLINE + #ifdef _MSC_VER + #define EA_PREFIX_NO_INLINE EA_NO_INLINE + #define EA_POSTFIX_NO_INLINE + #else + #define EA_PREFIX_NO_INLINE + #define EA_POSTFIX_NO_INLINE EA_NO_INLINE + #endif + #endif + + EA_PREFIX_NO_INLINE void TestQuickSortStdVP (EA::StdC::Stopwatch& stopwatch, StdVectorVP& stdVectorVP) EA_POSTFIX_NO_INLINE; + EA_PREFIX_NO_INLINE void TestQuickSortEaVP (EA::StdC::Stopwatch& stopwatch, EaVectorVP& eaVectorVP) EA_POSTFIX_NO_INLINE; + EA_PREFIX_NO_INLINE void TestQuickSortStdInt(EA::StdC::Stopwatch& stopwatch, StdVectorInt& stdVectorInt) EA_POSTFIX_NO_INLINE; + EA_PREFIX_NO_INLINE void TestQuickSortEaInt (EA::StdC::Stopwatch& stopwatch, EaVectorInt& eaVectorInt) EA_POSTFIX_NO_INLINE; + EA_PREFIX_NO_INLINE void TestQuickSortStdTO (EA::StdC::Stopwatch& stopwatch, StdVectorTO& stdVectorTO) EA_POSTFIX_NO_INLINE; + EA_PREFIX_NO_INLINE void TestQuickSortEaTO (EA::StdC::Stopwatch& stopwatch, EaVectorTO& eaVectorTO) EA_POSTFIX_NO_INLINE; + + + + void TestQuickSortStdVP(EA::StdC::Stopwatch& stopwatch, StdVectorVP& stdVectorVP) + { + stopwatch.Restart(); + std::sort(stdVectorVP.begin(), stdVectorVP.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)stdVectorVP[0].key); + } + + + void TestQuickSortEaVP(EA::StdC::Stopwatch& stopwatch, EaVectorVP& eaVectorVP) + { + stopwatch.Restart(); + eastl::quick_sort(eaVectorVP.begin(), eaVectorVP.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eaVectorVP[0].key); + } + + + void TestQuickSortStdInt(EA::StdC::Stopwatch& stopwatch, StdVectorInt& stdVectorInt) + { + stopwatch.Restart(); + std::sort(stdVectorInt.begin(), stdVectorInt.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)stdVectorInt[0]); + } + + + void TestQuickSortEaInt(EA::StdC::Stopwatch& stopwatch, EaVectorInt& eaVectorInt) + { + stopwatch.Restart(); + eastl::quick_sort(eaVectorInt.begin(), eaVectorInt.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eaVectorInt[0]); + } + + + void TestQuickSortStdTO(EA::StdC::Stopwatch& stopwatch, StdVectorTO& stdVectorTO) + { + stopwatch.Restart(); + std::sort(stdVectorTO.begin(), stdVectorTO.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)stdVectorTO[0].mX); + } + + + void TestQuickSortEaTO(EA::StdC::Stopwatch& stopwatch, EaVectorTO& eaVectorTO) + { + stopwatch.Restart(); + eastl::quick_sort(eaVectorTO.begin(), eaVectorTO.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eaVectorTO[0].mX); + } + +} // namespace + + +namespace +{ + enum SortFunctionType + { + sf_qsort, // C qsort + sf_shell_sort, // eastl::shell_sort. + sf_heap_sort, // eastl::heap_sort + sf_merge_sort, // eastl::merge_sort + sf_merge_sort_buffer, // eastl::merge_sort_buffer + sf_comb_sort, // eastl::comb_sort + sf_bubble_sort, // eastl::bubble_sort + sf_selection_sort, // eastl::selection_sort + sf_shaker_sort, // eastl::shaker_sort + sf_quick_sort, // eastl::quick_sort + sf_tim_sort, // eastl::tim_sort + sf_insertion_sort, // eastl::insertion_sort + sf_std_sort, // std::sort + sf_std_stable_sort, // std::stable_sort + sf_radix_sort, // eastl::radix_sort (unconventional sort) + sf_count // + }; + + const char* GetSortFunctionName(int sortFunctionType) + { + switch (sortFunctionType) + { + case sf_quick_sort: + return "eastl::sort"; + + case sf_tim_sort: + return "eastl::tim_sort"; + + case sf_insertion_sort: + return "eastl::insertion_sort"; + + case sf_shell_sort: + return "eastl::shell_sort"; + + case sf_heap_sort: + return "eastl::heap_sort"; + + case sf_merge_sort: + return "eastl::merge_sort"; + + case sf_merge_sort_buffer: + return "eastl::merge_sort_buffer"; + + case sf_comb_sort: + return "eastl::comb_sort"; + + case sf_bubble_sort: + return "eastl::bubble_sort"; + + case sf_selection_sort: + return "eastl::selection_sort"; + + case sf_shaker_sort: + return "eastl::shaker_sort"; + + case sf_radix_sort: + return "eastl::radix_sort"; + + case sf_qsort: + return "qsort"; + + case sf_std_sort: + return "std::sort"; + + case sf_std_stable_sort: + return "std::stable_sort"; + + default: + return "unknown"; + } + } + + + enum RandomizationType + { + kRandom, // Completely random data. + kRandomSorted, // Random values already sorted. + kOrdered, // Already sorted. + kMostlyOrdered, // Partly sorted already. + kRandomizationTypeCount + }; + + const char* GetRandomizationTypeName(int randomizationType) + { + switch (randomizationType) + { + case kRandom: + return "random"; + + case kRandomSorted: + return "random sorted"; + + case kOrdered: + return "ordered"; + + case kMostlyOrdered: + return "mostly ordered"; + + default: + return "unknown"; + } + } + + template + void Randomize(eastl::vector& v, EA::UnitTest::RandGenT& rng, RandomizationType type) + { + typedef RandomType value_type; + + switch (type) + { + default: + case kRandomizationTypeCount: // We specify this only to avoid a compiler warning about not testing for it. + case kRandom: + { + eastl::generate(v.begin(), v.end(), rng); + break; + } + + case kRandomSorted: + { + // This randomization type differs from kOrdered because the set of values is random (but sorted), in the kOrdered + // case the set of values is contiguous (i.e. 0, 1, ..., n) which can have different performance characteristics. + // For example, radix_sort performs poorly for kOrdered. + eastl::generate(v.begin(), v.end(), rng); + eastl::sort(v.begin(), v.end()); + break; + } + + case kOrdered: + { + for(eastl_size_t i = 0; i < v.size(); ++i) + v[i] = value_type((value_type)i); // Note that value_type may be a struct and not an integer. Thus the casting and construction here. + break; + } + + case kMostlyOrdered: + { + for(eastl_size_t i = 0; i < v.size(); ++i) + v[i] = value_type((value_type)i); // Note that value_type may be a struct and not an integer. Thus the casting and construction here. + + // We order random segments. + // The algorithm below in practice will make slightly more than kPercentOrdered be ordered. + const eastl_size_t kPercentOrdered = 80; // In actuality, due to statistics, the actual ordered percent will be about 82-85%. + + for(eastl_size_t n = 0, s = v.size(), nEnd = ((s < (100 - kPercentOrdered)) ? 1 : (s / (100 - kPercentOrdered))); n < nEnd; n++) + { + eastl_size_t i = rng.mRand.RandLimit((uint32_t)s); + eastl_size_t j = rng.mRand.RandLimit((uint32_t)s); + + eastl::swap(v[i], v[j]); + } + + break; + } + } + } + + + char gSlowAssignBuffer1[256] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* ... */}; + char gSlowAssignBuffer2[256] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* ... */}; + + + // SlowAssign + // Implements an object which has slow assign performance. + template + struct SlowAssign + { + typedef T key_type; + T x; + + static int nAssignCount; + + SlowAssign() + { x = 0; memcpy(gSlowAssignBuffer1, gSlowAssignBuffer2, sizeof(gSlowAssignBuffer1)); } + + SlowAssign(const SlowAssign& sa) + { ++nAssignCount; x = sa.x; memcpy(gSlowAssignBuffer1, gSlowAssignBuffer2, sizeof(gSlowAssignBuffer1)); } + + SlowAssign& operator=(const SlowAssign& sa) + { ++nAssignCount; x = sa.x; memcpy(gSlowAssignBuffer1, gSlowAssignBuffer2, sizeof(gSlowAssignBuffer1)); return *this; } + + SlowAssign& operator=(int a) + { x = (T)a; return *this; } + + static void Reset() + { nAssignCount = 0; } + }; + + template<> int SlowAssign::nAssignCount = 0; + + template + bool operator <(const SlowAssign& a, const SlowAssign& b) + { return a.x < b.x; } + + + // SlowCompare + // Implements a compare which is N time slower than a simple integer compare. + template + struct SlowCompare + { + static int nCompareCount; + + bool operator()(T a, T b) + { + ++nCompareCount; + + return (a < b) && // It happens that gSlowAssignBuffer1 is always zeroed. + (gSlowAssignBuffer1[0] == 0) && (gSlowAssignBuffer1[1] == 0) && (gSlowAssignBuffer1[1] == 0) && + (gSlowAssignBuffer1[2] == 0) && (gSlowAssignBuffer1[4] == 0) && (gSlowAssignBuffer1[5] == 0); + } + + static void Reset() { nCompareCount = 0; } + }; + + template <> + int SlowCompare::nCompareCount = 0; + + + // qsort callback functions + // qsort compare function returns negative if b > a and positive if a > b. + template + int CompareInteger(const void* a, const void* b) + { + // Even though you see the following in Internet example code, it doesn't work! + // The reason is that it works only if a and b are both >= 0, otherwise large + // values can cause integer register wraparound. A similar kind of problem happens + // if you try to do the same thing with floating point value compares. + // See http://www.akalin.cx/2006/06/23/on-the-qsort-comparison-function/ + // Internet exmaple code: + // return *(const int32_t*)a - *(const int32_t*)b; + + // This double comparison might seem like it's crippling qsort against the + // STL-based sorts which do a single compare. But consider that the returning + // of -1, 0, +1 gives qsort more information, and its logic takes advantage + // of that. + if (*(const T*)a < *(const T*)b) + return -1; + if (*(const T*)a > *(const T*)b) + return +1; + return 0; + } + + + int SlowCompareInt32(const void* a, const void* b) + { + ++SlowCompare::nCompareCount; + + // This code is similar in performance to the C++ SlowCompare template functor above. + if((gSlowAssignBuffer1[0] == 0) && (gSlowAssignBuffer1[1] == 0) && + (gSlowAssignBuffer1[1] == 0) && (gSlowAssignBuffer1[2] == 0) && + (gSlowAssignBuffer1[4] == 0) && (gSlowAssignBuffer1[5] == 0)) + { + if (*(const int32_t*)a < *(const int32_t*)b) + return -1; + if (*(const int32_t*)a > *(const int32_t*)b) + return +1; + } + + return 0; + } + + template + struct slow_assign_extract_radix_key + { + typedef typename slow_assign_type::key_type radix_type; + + const radix_type operator()(const slow_assign_type& obj) const + { + return obj.x; + } + }; + + template + struct identity_extract_radix_key + { + typedef integer_type radix_type; + + const radix_type operator()(const integer_type& x) const + { + return x; + } + }; +} // namespace + + +struct BenchmarkResult +{ + uint64_t mTime; + uint64_t mCompareCount; + uint64_t mAssignCount; + + BenchmarkResult() : mTime(0), mCompareCount(0), mAssignCount(0) {} +}; + + +int CompareSortPerformance() +{ + // Sizes of arrays to be sorted. + const eastl_size_t kSizes[] = { 10, 100, 1000, 10000 }; + const eastl_size_t kSizesCount = EAArrayCount(kSizes); + static BenchmarkResult sResults[kRandomizationTypeCount][kSizesCount][sf_count]; + int nErrorCount = 0; + + EA::UnitTest::ReportVerbosity(2, "Sort comparison\n"); + EA::UnitTest::ReportVerbosity(2, "Random seed = %u\n", (unsigned)EA::UnitTest::GetRandSeed()); + + EA::UnitTest::RandGenT rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatchGlobal(EA::StdC::Stopwatch::kUnitsSeconds); + const eastl_size_t kArraySizeMax = *eastl::max_element(eastl::begin(kSizes), eastl::end(kSizes)); + const int kRunCount = 4; + + #if !defined(EA_DEBUG) + EA::UnitTest::SetHighThreadPriority(); + #endif + + eastl::vector allSortFunctions; + for (int i = 0; i < sf_count; i++) + { + allSortFunctions.push_back(SortFunctionType(i)); + } + + { + auto& sortFunctions = allSortFunctions; + + // Regular speed test. + // In this case we test the sorting of integral values. + // This is probably the most common type of comparison. + EA::UnitTest::ReportVerbosity(2, "Sort comparison: Regular speed test\n"); + + typedef uint32_t ElementType; + typedef eastl::less CompareFunction; + + eastl::string sOutput; + sOutput.set_capacity(100000); + ElementType* pBuffer = new ElementType[kArraySizeMax]; + + memset(sResults, 0, sizeof(sResults)); + + stopwatchGlobal.Restart(); + + for (int c = 0; c < kRunCount; c++) + { + for (int i = 0; i < kRandomizationTypeCount; i++) + { + for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++) + { + const eastl_size_t size = kSizes[sizeType]; + + for (SortFunctionType sortFunction : sortFunctions) + { + eastl::vector v(size); + + rng.SetSeed(EA::UnitTest::GetRandSeed()); + Randomize(v, rng, (RandomizationType)i); + + switch (sortFunction) + { + case sf_quick_sort: + stopwatch.Restart(); + eastl::quick_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_tim_sort: + stopwatch.Restart(); + eastl::tim_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction()); + stopwatch.Stop(); + break; + + case sf_insertion_sort: + stopwatch.Restart(); + eastl::insertion_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_shell_sort: + stopwatch.Restart(); + eastl::shell_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_heap_sort: + stopwatch.Restart(); + eastl::heap_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_merge_sort: + stopwatch.Restart(); + eastl::merge_sort(v.begin(), v.end(), *get_default_allocator((EASTLAllocatorType*)NULL), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_merge_sort_buffer: + stopwatch.Restart(); + eastl::merge_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction()); + stopwatch.Stop(); + break; + + case sf_comb_sort: + stopwatch.Restart(); + eastl::comb_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_bubble_sort: + stopwatch.Restart(); + eastl::bubble_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_selection_sort: + stopwatch.Restart(); + eastl::selection_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_shaker_sort: + stopwatch.Restart(); + eastl::shaker_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_radix_sort: + stopwatch.Restart(); + eastl::radix_sort>(v.begin(), v.end(), pBuffer); + stopwatch.Stop(); + break; + + case sf_qsort: + stopwatch.Restart(); + qsort(&v[0], (size_t)v.size(), sizeof(ElementType), CompareInteger); + stopwatch.Stop(); + break; + + case sf_std_sort: + stopwatch.Restart(); + std::sort(v.data(), v.data() + v.size(), std::less()); + stopwatch.Stop(); + break; + + case sf_std_stable_sort: + stopwatch.Restart(); + std::stable_sort(v.data(), v.data() + v.size(), std::less()); + stopwatch.Stop(); + break; + + case sf_count: + default: + // unsupported + break; + } + + const uint64_t elapsedTime = (uint64_t)stopwatch.GetElapsedTime(); + + // If this result was faster than a previously fastest result, record this one instead. + if ((c == 0) || (elapsedTime < sResults[i][sizeType][sortFunction].mTime)) + sResults[i][sizeType][sortFunction].mTime = elapsedTime; + + VERIFY(eastl::is_sorted(v.begin(), v.end())); + + } // for each sort function... + + } // for each size type... + + } // for each randomization type... + + } // for each run + + EA::UnitTest::ReportVerbosity(2, "Total time: %.2f s\n", stopwatchGlobal.GetElapsedTimeFloat()); + + delete[] pBuffer; + + // Now print the results. + for (int i = 0; i < kRandomizationTypeCount; i++) + { + for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++) + { + const eastl_size_t size = kSizes[sizeType]; + + for (SortFunctionType sortFunction : sortFunctions) + { + sOutput.append_sprintf("%25s, %14s, Size: %8u, Time: %14" PRIu64 " ticks %0.2f ticks/elem\n", + GetSortFunctionName(sortFunction), GetRandomizationTypeName(i), + (unsigned)size, sResults[i][sizeType][sortFunction].mTime, + float(sResults[i][sizeType][sortFunction].mTime)/float(size)); + } + sOutput.append("\n"); + } + } + + EA::UnitTest::ReportVerbosity(2, "%s\n\n", sOutput.c_str()); + } + + { + // Do a speed test for the case of slow compares. + // By this we mean to compare sorting speeds when the comparison of elements is slow. + // Sort functions use element comparison to tell where elements go and use element + // movement to get them there. But some sorting functions accomplish sorting performance by + // minimizing the amount of movement, some minimize the amount of comparisons, and the + // best do a good job of minimizing both. + auto sortFunctions = allSortFunctions; + // We can't test this radix_sort because what we need isn't exposed. + sortFunctions.erase(eastl::remove(sortFunctions.begin(), sortFunctions.end(), sf_radix_sort), sortFunctions.end()); + EA::UnitTest::ReportVerbosity(2, "Sort comparison: Slow compare speed test\n"); + + typedef int32_t ElementType; + typedef SlowCompare CompareFunction; + + eastl::string sOutput; + sOutput.set_capacity(100000); + ElementType* pBuffer = new ElementType[kArraySizeMax]; + + memset(sResults, 0, sizeof(sResults)); + + stopwatchGlobal.Restart(); + + for (int c = 0; c < kRunCount; c++) + { + for (int i = 0; i < kRandomizationTypeCount; i++) + { + for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++) + { + const eastl_size_t size = kSizes[sizeType]; + + for (SortFunctionType sortFunction : sortFunctions) + { + eastl::vector v(size); + + rng.SetSeed(EA::UnitTest::GetRandSeed()); + Randomize(v, rng, (RandomizationType)i); + CompareFunction::Reset(); + + switch (sortFunction) + { + case sf_quick_sort: + stopwatch.Restart(); + eastl::quick_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_tim_sort: + stopwatch.Restart(); + eastl::tim_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction()); + stopwatch.Stop(); + break; + + case sf_insertion_sort: + stopwatch.Restart(); + eastl::insertion_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_shell_sort: + stopwatch.Restart(); + eastl::shell_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_heap_sort: + stopwatch.Restart(); + eastl::heap_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_merge_sort: + stopwatch.Restart(); + eastl::merge_sort(v.begin(), v.end(), *get_default_allocator((EASTLAllocatorType*)NULL), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_merge_sort_buffer: + stopwatch.Restart(); + eastl::merge_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction()); + stopwatch.Stop(); + break; + + case sf_comb_sort: + stopwatch.Restart(); + eastl::comb_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_bubble_sort: + stopwatch.Restart(); + eastl::bubble_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_selection_sort: + stopwatch.Restart(); + eastl::selection_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_shaker_sort: + stopwatch.Restart(); + eastl::shaker_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_qsort: + stopwatch.Restart(); + qsort(&v[0], (size_t)v.size(), sizeof(ElementType), SlowCompareInt32); + stopwatch.Stop(); + break; + + case sf_std_sort: + stopwatch.Restart(); + std::sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_std_stable_sort: + stopwatch.Restart(); + std::stable_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_radix_sort: + case sf_count: + default: + // unsupported + break; + } + + const uint64_t elapsedTime = (uint64_t)stopwatch.GetElapsedTime(); + + // If this result was faster than a previously fastest result, record this one instead. + if ((c == 0) || (elapsedTime < sResults[i][sizeType][sortFunction].mTime)) + { + sResults[i][sizeType][sortFunction].mTime = elapsedTime; + sResults[i][sizeType][sortFunction].mCompareCount = (uint64_t)CompareFunction::nCompareCount; + } + + VERIFY(eastl::is_sorted(v.begin(), v.end())); + } // for each sort function... + + } // for each size type... + + } // for each randomization type... + + } // for each run + + EA::UnitTest::ReportVerbosity(2, "Total time: %.2f s\n", stopwatchGlobal.GetElapsedTimeFloat()); + + delete[] pBuffer; + + // Now print the results. + for (int i = 0; i < kRandomizationTypeCount; i++) + { + for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++) + { + const eastl_size_t size = kSizes[sizeType]; + + for (SortFunctionType sortFunction : sortFunctions) + { + sOutput.append_sprintf("%25s, %14s, Size: %6u, Time: %11" PRIu64 " ticks, Compares: %11" PRIu64 "\n", + GetSortFunctionName(sortFunction), GetRandomizationTypeName(i), + (unsigned)size, sResults[i][sizeType][sortFunction].mTime, + sResults[i][sizeType][sortFunction].mCompareCount); + } + + sOutput.append("\n"); + } + } + + EA::UnitTest::ReportVerbosity(2, "%s\n\n", sOutput.c_str()); + } + + { + // Do a speed test for the case of slow assignment. + // By this we mean to compare sorting speeds when the movement of elements is slow. + // Sort functions use element comparison to tell where elements go and use element + // movement to get them there. But some sorting functions accomplish sorting performance by + // minimizing the amount of movement, some minimize the amount of comparisons, and the + // best do a good job of minimizing both. + auto sortFunctions = allSortFunctions; + // Can't implement this for qsort because the C standard library doesn't expose it. + // We could implement it by copying and modifying the source code. + sortFunctions.erase(eastl::remove(sortFunctions.begin(), sortFunctions.end(), sf_qsort), sortFunctions.end()); + + EA::UnitTest::ReportVerbosity(2, "Sort comparison: Slow assignment speed test\n"); + + typedef SlowAssign ElementType; + typedef eastl::less CompareFunction; + + eastl::string sOutput; + sOutput.set_capacity(100000); + ElementType* pBuffer = new ElementType[kArraySizeMax]; + + memset(sResults, 0, sizeof(sResults)); + + stopwatchGlobal.Restart(); + + for (int c = 0; c < kRunCount; c++) + { + for (int i = 0; i < kRandomizationTypeCount; i++) + { + for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++) + { + const eastl_size_t size = kSizes[sizeType]; + + for (SortFunctionType sortFunction : sortFunctions) + { + eastl::vector v(size); + + Randomize(v, rng, (RandomizationType)i); + ElementType::Reset(); + + switch (sortFunction) + { + case sf_quick_sort: + stopwatch.Restart(); + eastl::quick_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_tim_sort: + stopwatch.Restart(); + eastl::tim_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction()); + stopwatch.Stop(); + break; + + case sf_insertion_sort: + stopwatch.Restart(); + eastl::insertion_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_shell_sort: + stopwatch.Restart(); + eastl::shell_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_heap_sort: + stopwatch.Restart(); + eastl::heap_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_merge_sort: + stopwatch.Restart(); + eastl::merge_sort(v.begin(), v.end(), *get_default_allocator((EASTLAllocatorType*)NULL), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_merge_sort_buffer: + stopwatch.Restart(); + eastl::merge_sort_buffer(v.begin(), v.end(), pBuffer, CompareFunction()); + stopwatch.Stop(); + break; + + case sf_comb_sort: + stopwatch.Restart(); + eastl::comb_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_bubble_sort: + stopwatch.Restart(); + eastl::bubble_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_selection_sort: + stopwatch.Restart(); + eastl::selection_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_shaker_sort: + stopwatch.Restart(); + eastl::shaker_sort(v.begin(), v.end(), CompareFunction()); + stopwatch.Stop(); + break; + + case sf_radix_sort: + stopwatch.Restart(); + eastl::radix_sort>(v.begin(), v.end(), pBuffer); + stopwatch.Stop(); + break; + + case sf_std_sort: + stopwatch.Restart(); + std::sort(v.begin(), v.end(), std::less()); + stopwatch.Stop(); + break; + + case sf_std_stable_sort: + stopwatch.Restart(); + std::stable_sort(v.begin(), v.end(), std::less()); + stopwatch.Stop(); + break; + + case sf_qsort: + case sf_count: + default: + // unsupported + break; + } + + const uint64_t elapsedTime = (uint64_t)stopwatch.GetElapsedTime(); + + // If this result was faster than a previously fastest result, record this one instead. + if ((c == 0) || (elapsedTime < sResults[i][sizeType][sortFunction].mTime)) + { + sResults[i][sizeType][sortFunction].mTime = elapsedTime; + sResults[i][sizeType][sortFunction].mAssignCount = (uint64_t)ElementType::nAssignCount; + } + + VERIFY(eastl::is_sorted(v.begin(), v.end())); + + } // for each sort function... + + } // for each size type... + + } // for each randomization type... + + } // for each run + + EA::UnitTest::ReportVerbosity(2, "Total time: %.2f s\n", stopwatchGlobal.GetElapsedTimeFloat()); + + delete[] pBuffer; + + // Now print the results. + for (int i = 0; i < kRandomizationTypeCount; i++) + { + for (size_t sizeType = 0; sizeType < EAArrayCount(kSizes); sizeType++) + { + const eastl_size_t size = kSizes[sizeType]; + + for (SortFunctionType sortFunction : sortFunctions) + { + sOutput.append_sprintf("%25s, %14s, Size: %6u, Time: %11" PRIu64 " ticks, Assignments: %11" PRIu64 "\n", + GetSortFunctionName(sortFunction), GetRandomizationTypeName(i), + (unsigned)size, sResults[i][sizeType][sortFunction].mTime, + sResults[i][sizeType][sortFunction].mAssignCount); + } + + sOutput.append("\n"); + } + } + EA::UnitTest::ReportVerbosity(2, "%s\n", sOutput.c_str()); + } + + #if !defined(EA_DEBUG) + EA::UnitTest::SetNormalThreadPriority(); + #endif + + return nErrorCount; +} + +typedef eastl::function OutputResultCallback; +typedef eastl::function PostExecuteCallback; +typedef eastl::function PreExecuteCallback; + + +template +static int CompareSmallInputSortPerformanceHelper(eastl::vector &arraySizes, eastl::vector &sortFunctions, const PreExecuteCallback &preExecuteCallback, const PostExecuteCallback &postExecuteCallback, const OutputResultCallback &outputResultCallback) +{ + int nErrorCount = 0; + + EA::UnitTest::RandGenT rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatchGlobal(EA::StdC::Stopwatch::kUnitsSeconds); + const eastl_size_t kArraySizeMax = *eastl::max_element(eastl::begin(arraySizes), eastl::end(arraySizes)); + const int kRunCount = 4; + const int numSubArrays = 128; + + eastl::string sOutput; + sOutput.set_capacity(100000); + ElementType* pBuffer = new ElementType[kArraySizeMax]; + + stopwatchGlobal.Restart(); + + for (int i = 0; i < kRandomizationTypeCount; i++) + { + for (size_t size : arraySizes) + { + for (SortFunctionType sortFunction : sortFunctions) + { + BenchmarkResult bestResult{}; + + for (int c = 0; c < kRunCount; c++) + { + eastl::vector v(size * numSubArrays); + + rng.SetSeed(EA::UnitTest::GetRandSeed()); + Randomize(v, rng, (RandomizationType)i); + preExecuteCallback(); + + switch (sortFunction) + { + case sf_quick_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::quick_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_tim_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::tim_sort_buffer(begin, begin + size, pBuffer, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_insertion_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::insertion_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_shell_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::shell_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_heap_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::heap_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_merge_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::merge_sort(begin, begin + size, *get_default_allocator((EASTLAllocatorType*)NULL), CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_merge_sort_buffer: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::merge_sort_buffer(begin, begin + size, pBuffer, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_comb_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::comb_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_bubble_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::bubble_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_selection_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::selection_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_shaker_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + eastl::shaker_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_std_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + std::sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_std_stable_sort: + stopwatch.Restart(); + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + std::stable_sort(begin, begin + size, CompareFunction()); + } + stopwatch.Stop(); + break; + + case sf_qsort: + case sf_radix_sort: + case sf_count: + default: + EATEST_VERIFY_F(false, "Missing case statement for sort function %s.", GetSortFunctionName(sortFunction)); + break; + } + + BenchmarkResult result {}; + result.mTime = (uint64_t)stopwatch.GetElapsedTime(); + postExecuteCallback(result); + + // If this result was faster than a previously fastest result, record this one instead. + if ((c == 0) || (result.mTime < bestResult.mTime)) + bestResult = result; + + for (auto begin = v.begin(); begin != v.end(); begin += size) + { + VERIFY(eastl::is_sorted(begin, begin + size)); + } + } // for each run + + outputResultCallback(sOutput, GetSortFunctionName(sortFunction), GetRandomizationTypeName(i), size, numSubArrays, bestResult); + + } // for each sort function... + sOutput.append("\n"); + + } // for each size type... + + } // for each randomization type... + + EA::UnitTest::ReportVerbosity(2, "Total time: %.2f s\n", stopwatchGlobal.GetElapsedTimeFloat()); + EA::UnitTest::ReportVerbosity(2, "%s\n", sOutput.c_str()); + + delete[] pBuffer; + return nErrorCount; +} + +static int CompareSmallInputSortPerformance() +{ + int nErrorCount = 0; + eastl::vector arraySizes{1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 64, 128, 256}; + // Test quick sort and merge sort to provide a "base line" for performance. The other sort algorithms are mostly + // O(n^2) and they are benchmarked to determine what sorts are ideal for sorting small arrays or sub-arrays. (i.e. + // this is useful to determine good algorithms to choose as a base case for some of the recursive sorts). + eastl::vector sortFunctions{sf_quick_sort, sf_merge_sort_buffer, sf_bubble_sort, sf_comb_sort, + sf_insertion_sort, sf_selection_sort, sf_shell_sort, sf_shaker_sort}; + + EA::UnitTest::ReportVerbosity(2, "Small Sub-array Sort comparison: Regular speed test\n"); + nErrorCount += CompareSmallInputSortPerformanceHelper>( + arraySizes, sortFunctions, PreExecuteCallback([]() {}), PostExecuteCallback([](BenchmarkResult&) {}), + OutputResultCallback([](eastl::string& output, const char* sortFunction, const char* randomizationType, + size_t size, size_t numSubArrays, const BenchmarkResult& result) { + output.append_sprintf("%25s, %14s, Size: %8u, Time: %0.1f ticks %0.2f ticks/elem\n", sortFunction, + randomizationType, (unsigned)size, float(result.mTime) / float(numSubArrays), + float(result.mTime) / float(size * numSubArrays)); + })); + + EA::UnitTest::ReportVerbosity(2, "Small Sub-array Sort comparison: Slow compare speed test\n"); + nErrorCount += CompareSmallInputSortPerformanceHelper>( + arraySizes, sortFunctions, PreExecuteCallback([]() { SlowCompare::Reset(); }), + PostExecuteCallback( + [](BenchmarkResult& result) { result.mCompareCount = (uint64_t)SlowCompare::nCompareCount; }), + OutputResultCallback([](eastl::string& output, const char* sortFunction, const char* randomizationType, + size_t size, size_t numSubArrays, const BenchmarkResult& result) { + output.append_sprintf("%25s, %14s, Size: %6u, Time: %0.2f ticks, Compares: %0.2f\n", sortFunction, + randomizationType, (unsigned)size, float(result.mTime) / float(numSubArrays), + float(result.mCompareCount) / float(numSubArrays)); + })); + + EA::UnitTest::ReportVerbosity(2, "Small Sub-array Sort comparison: Slow assignment speed test\n"); + nErrorCount += CompareSmallInputSortPerformanceHelper, eastl::less>>( + arraySizes, sortFunctions, PreExecuteCallback([]() { SlowAssign::Reset(); }), + PostExecuteCallback([](BenchmarkResult& result) { + result.mCompareCount = (uint64_t)SlowCompare::nCompareCount; + result.mAssignCount = (uint64_t)SlowAssign::nAssignCount; + }), + OutputResultCallback([](eastl::string& output, const char* sortFunction, const char* randomizationType, + size_t size, size_t numSubArrays, const BenchmarkResult& result) { + output.append_sprintf("%25s, %14s, Size: %6u, Time: %0.2f ticks, Assignments: %0.2f\n", sortFunction, + randomizationType, (unsigned)size, float(result.mTime) / float(numSubArrays), + float(result.mAssignCount) / float(numSubArrays)); + })); + + return nErrorCount; +} + + +void BenchmarkSort() +{ + EASTLTest_Printf("Sort\n"); + + EA::UnitTest::RandGenT rng(12345678); // For debugging sort code we should use 12345678, for normal testing use EA::UnitTest::GetRandSeed(). + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + if (EA::UnitTest::GetVerbosity() >= 3) + { + CompareSortPerformance(); + CompareSmallInputSortPerformance(); + } + + { // Exercise some declarations + int nErrorCount = 0; + + ValuePair vp1 = {0, 0}, vp2 = {0, 0}; + VPCompare c1, c2; + + VERIFY(c1.operator()(vp1, vp2) == c2.operator()(vp1, vp2)); + VERIFY((vp1 < vp2) || (vp1 == vp2) || !(vp1 == vp2)); + } + + { + eastl::vector intVector(10000); + eastl::generate(intVector.begin(), intVector.end(), rng); + + for (int i = 0; i < 2; i++) + { + /////////////////////////////// + // Test quick_sort/vector/ValuePair + /////////////////////////////// + + StdVectorVP stdVectorVP(intVector.size()); + EaVectorVP eaVectorVP(intVector.size()); + + for (eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + { + const ValuePair vp = {intVector[j], intVector[j]}; + stdVectorVP[j] = vp; + eaVectorVP[j] = vp; + } + + TestQuickSortStdVP(stopwatch1, stdVectorVP); + TestQuickSortEaVP (stopwatch2, eaVectorVP); + + if(i == 1) + Benchmark::AddResult("sort/q_sort/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + // Benchmark the sorting of something that is already sorted. + TestQuickSortStdVP(stopwatch1, stdVectorVP); + TestQuickSortEaVP (stopwatch2, eaVectorVP); + + if(i == 1) + Benchmark::AddResult("sort/q_sort/vector/sorted", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test quick_sort/vector/Int + /////////////////////////////// + + StdVectorInt stdVectorInt(intVector.size()); + EaVectorInt eaVectorInt (intVector.size()); + + for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + { + stdVectorInt[j] = intVector[j]; + eaVectorInt[j] = intVector[j]; + } + + TestQuickSortStdInt(stopwatch1, stdVectorInt); + TestQuickSortEaInt (stopwatch2, eaVectorInt); + + if(i == 1) + Benchmark::AddResult("sort/q_sort/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + // Benchmark the sorting of something that is already sorted. + TestQuickSortStdInt(stopwatch1, stdVectorInt); + TestQuickSortEaInt (stopwatch2, eaVectorInt); + + if(i == 1) + Benchmark::AddResult("sort/q_sort/vector/sorted", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test quick_sort/vector/TestObject + /////////////////////////////// + + StdVectorTO stdVectorTO(intVector.size()); + EaVectorTO eaVectorTO(intVector.size()); + + for (eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + { + stdVectorTO[j] = TestObject(intVector[j]); + eaVectorTO[j] = TestObject(intVector[j]); + } + + TestQuickSortStdTO(stopwatch1, stdVectorTO); + TestQuickSortEaTO(stopwatch2, eaVectorTO); + + if (i == 1) + Benchmark::AddResult("sort/q_sort/vector", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + // Benchmark the sorting of something that is already sorted. + TestQuickSortStdTO(stopwatch1, stdVectorTO); + TestQuickSortEaTO(stopwatch2, eaVectorTO); + + if (i == 1) + Benchmark::AddResult("sort/q_sort/vector/sorted", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test quick_sort/TestObject[] + /////////////////////////////// + + // Reset the values back to the unsorted state. + for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + { + stdVectorTO[j] = TestObject(intVector[j]); + eaVectorTO[j] = TestObject(intVector[j]); + } + + TestQuickSortStdTO(stopwatch1, stdVectorTO); + TestQuickSortEaTO (stopwatch2, eaVectorTO); + + if(i == 1) + Benchmark::AddResult("sort/q_sort/TestObject[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + // Benchmark the sorting of something that is already sorted. + TestQuickSortStdTO(stopwatch1, stdVectorTO); + TestQuickSortEaTO (stopwatch2, eaVectorTO); + + if(i == 1) + Benchmark::AddResult("sort/q_sort/TestObject[]/sorted", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } +} + + + + + diff --git a/benchmark/source/BenchmarkString.cpp b/benchmark/source/BenchmarkString.cpp new file mode 100644 index 0000000..5dfefbc --- /dev/null +++ b/benchmark/source/BenchmarkString.cpp @@ -0,0 +1,531 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + + +using namespace EA; + + +namespace +{ + template + void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + for(int i = 0; i < 100000; i++) + c.push_back((typename Container::value_type)(i & ((typename Container::value_type)~0))); + stopwatch.Stop(); + } + + + template + void TestInsert1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p) + { + const typename Container::size_type s = c.size(); + stopwatch.Restart(); + for(int i = 0; i < 100; i++) + c.insert(s - (typename Container::size_type)(i * 317), p); + stopwatch.Stop(); + } + + + template + void TestErase1(EA::StdC::Stopwatch& stopwatch, Container& c) + { + const typename Container::size_type s = c.size(); + stopwatch.Restart(); + for(int i = 0; i < 100; i++) + c.erase(s - (typename Container::size_type)(i * 339), 7); + stopwatch.Stop(); + } + + + template + void TestReplace1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int n) + { + const typename Container::size_type s = c.size(); + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + c.replace(s - (typename Container::size_type)(i * 5), ((n - 2) + (i & 3)), p, n); // The second argument rotates through n-2, n-1, n, n+1, n-2, etc. + stopwatch.Stop(); + } + + + template + void TestReserve(EA::StdC::Stopwatch& stopwatch, Container& c) + { + const typename Container::size_type s = c.capacity(); + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + c.reserve((s - 2) + (i & 3)); // The second argument rotates through n-2, n-1, n, n+1, n-2, etc. + stopwatch.Stop(); + } + + + template + void TestSize(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + Benchmark::DoNothing(&c, c.size()); + stopwatch.Stop(); + } + + + template + void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c) + { + int32_t temp = 0; + stopwatch.Restart(); + for(typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++) + temp += c[j]; + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)temp); + } + + + template + void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + Benchmark::DoNothing(&c, *eastl::find(c.begin(), c.end(), (typename Container::value_type)~0)); + stopwatch.Stop(); + } + + + template + void TestFind1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n) + { + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + Benchmark::DoNothing(&c, c.find(p, (typename Container::size_type)pos, (typename Container::size_type)n)); + stopwatch.Stop(); + } + + + template + void TestRfind1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n) + { + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + Benchmark::DoNothing(&c, c.rfind(p, (typename Container::size_type)pos, (typename Container::size_type)n)); + stopwatch.Stop(); + } + + template + void TestFirstOf1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n) + { + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + Benchmark::DoNothing(&c, c.find_first_of(p, (typename Container::size_type)pos, (typename Container::size_type)n)); + stopwatch.Stop(); + } + + template + void TestLastOf1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n) + { + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + Benchmark::DoNothing(&c, c.find_last_of(p, (typename Container::size_type)pos, (typename Container::size_type)n)); + stopwatch.Stop(); + } + + template + void TestFirstNotOf1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n) + { + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + Benchmark::DoNothing(&c, c.find_first_not_of(p, (typename Container::size_type)pos, (typename Container::size_type)n)); + stopwatch.Stop(); + } + + template + void TestLastNotOf1(EA::StdC::Stopwatch& stopwatch, Container& c, T* p, int pos, int n) + { + stopwatch.Restart(); + for(int i = 0; i < 1000; i++) + Benchmark::DoNothing(&c, c.find_last_not_of(p, (typename Container::size_type)pos, (typename Container::size_type)n)); + stopwatch.Stop(); + } + + + template + void TestCompare(EA::StdC::Stopwatch& stopwatch, Container& c1, Container& c2) // size() + { + stopwatch.Restart(); + for(int i = 0; i < 500; i++) + Benchmark::DoNothing(&c1, c1.compare(c2)); + stopwatch.Stop(); + } + + + template + void TestSwap(EA::StdC::Stopwatch& stopwatch, Container& c1, Container& c2) // size() + { + stopwatch.Restart(); + for(int i = 0; i < 10000; i++) // Make sure this is an even count so that when done things haven't changed. + { + c1.swap(c2); + Benchmark::DoNothing(&c1); + } + stopwatch.Stop(); + } + +} // namespace + + + + +void BenchmarkString() +{ + EASTLTest_Printf("String\n"); + + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { + for(int i = 0; i < 2; i++) + { + std::basic_string ss8(16, 0); // We initialize to size of 16 because different implementations may make + eastl::basic_string es8(16, 0); // different tradeoffs related to startup size. Initial operations are faster + // when strings start with a higher reserve, but they use more memory. + std::basic_string ss16(16, 0); // We try to nullify this tradeoff for the tests below by starting all at + eastl::basic_string es16(16, 0); // the same baseline allocation. + + + /////////////////////////////// + // Test push_back + /////////////////////////////// + + TestPushBack(stopwatch1, ss8); + TestPushBack(stopwatch2, es8); + + if(i == 1) + Benchmark::AddResult("string/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestPushBack(stopwatch1, ss16); + TestPushBack(stopwatch2, es16); + + if(i == 1) + Benchmark::AddResult("string/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test insert(size_type position, const value_type* p) + /////////////////////////////// + + const char8_t pInsert1_8[] = { 'a', 0 }; + TestInsert1(stopwatch1, ss8, pInsert1_8); + TestInsert1(stopwatch2, es8, pInsert1_8); + + if(i == 1) + Benchmark::AddResult("string/insert/pos,p", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + const char16_t pInsert1_16[] = { 'a', 0 }; + TestInsert1(stopwatch1, ss16, pInsert1_16); + TestInsert1(stopwatch2, es16, pInsert1_16); + + if(i == 1) + Benchmark::AddResult("string/insert/pos,p", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase(size_type position, size_type n) + /////////////////////////////// + + TestErase1(stopwatch1, ss8); + TestErase1(stopwatch2, es8); + + if(i == 1) + Benchmark::AddResult("string/erase/pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestErase1(stopwatch1, ss16); + TestErase1(stopwatch2, es16); + + if(i == 1) + Benchmark::AddResult("string/erase/pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test replace(size_type position, size_type n1, const value_type* p, size_type n2) + /////////////////////////////// + + const int kReplace1Size = 8; + const char8_t pReplace1_8[kReplace1Size] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' }; + + TestReplace1(stopwatch1, ss8, pReplace1_8, kReplace1Size); + TestReplace1(stopwatch2, es8, pReplace1_8, kReplace1Size); + + if(i == 1) + Benchmark::AddResult("string/replace/pos,n,p,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + const char16_t pReplace1_16[kReplace1Size] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' }; + + TestReplace1(stopwatch1, ss16, pReplace1_16, kReplace1Size); + TestReplace1(stopwatch2, es16, pReplace1_16, kReplace1Size); + + if(i == 1) + Benchmark::AddResult("string/replace/pos,n,p,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test reserve(size_type) + /////////////////////////////// + + TestReserve(stopwatch1, ss8); + TestReserve(stopwatch2, es8); + + if(i == 1) + Benchmark::AddResult("string/reserve", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestReserve(stopwatch1, ss16); + TestReserve(stopwatch2, es16); + + if(i == 1) + Benchmark::AddResult("string/reserve", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test size() + /////////////////////////////// + + TestSize(stopwatch1, ss8); + TestSize(stopwatch2, es8); + + if(i == 1) + Benchmark::AddResult("string/size", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSize(stopwatch1, ss16); + TestSize(stopwatch2, es16); + + if(i == 1) + Benchmark::AddResult("string/size", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test operator[]. + /////////////////////////////// + + TestBracket(stopwatch1, ss8); + TestBracket(stopwatch2, es8); + + if(i == 1) + Benchmark::AddResult("string/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestBracket(stopwatch1, ss16); + TestBracket(stopwatch2, es16); + + if(i == 1) + Benchmark::AddResult("string/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test iteration via find(). + /////////////////////////////// + + TestFind(stopwatch1, ss8); + TestFind(stopwatch2, es8); + + if(i == 1) + Benchmark::AddResult("string/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFind(stopwatch1, ss16); + TestFind(stopwatch2, es16); + + if(i == 1) + Benchmark::AddResult("string/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find(const value_type* p, size_type position, size_type n) + /////////////////////////////// + + const int kFind1Size = 7; + const char8_t pFind1_8[kFind1Size] = { 'p', 'a', 't', 't', 'e', 'r', 'n' }; + + ss8.insert(ss8.size() / 2, pFind1_8); + es8.insert(es8.size() / 2, pFind1_8); + + TestFind1(stopwatch1, ss8, pFind1_8, 15, kFind1Size); + TestFind1(stopwatch2, es8, pFind1_8, 15, kFind1Size); + + if(i == 1) + Benchmark::AddResult("string/find/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + const char16_t pFind1_16[kFind1Size] = { 'p', 'a', 't', 't', 'e', 'r', 'n' }; + + #if !defined(EA_PLATFORM_IPHONE) && (!defined(EA_COMPILER_CLANG) && defined(EA_PLATFORM_MINGW)) // Crashes on iPhone. + ss16.insert(ss8.size() / 2, pFind1_16); + #endif + es16.insert(es8.size() / 2, pFind1_16); + + TestFind1(stopwatch1, ss16, pFind1_16, 15, kFind1Size); + TestFind1(stopwatch2, es16, pFind1_16, 15, kFind1Size); + + if(i == 1) + Benchmark::AddResult("string/find/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test rfind(const value_type* p, size_type position, size_type n) + /////////////////////////////// + + TestRfind1(stopwatch1, ss8, pFind1_8, 15, kFind1Size); + TestRfind1(stopwatch2, es8, pFind1_8, 15, kFind1Size); + + if(i == 1) + Benchmark::AddResult("string/rfind/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestRfind1(stopwatch1, ss16, pFind1_16, 15, kFind1Size); + TestRfind1(stopwatch2, es16, pFind1_16, 15, kFind1Size); + + if(i == 1) + Benchmark::AddResult("string/rfind/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + //NOTICE (RASHIN): + //FindFirstOf variants are incredibly slow on palm pixi debug builds. + //Disabling for now... + #if !defined(EA_DEBUG) + /////////////////////////////// + // Test find_first_of(const value_type* p, size_type position, size_type n + /////////////////////////////// + + const int kFindOf1Size = 7; + const char8_t pFindOf1_8[kFindOf1Size] = { '~', '~', '~', '~', '~', '~', '~' }; + + TestFirstOf1(stopwatch1, ss8, pFindOf1_8, 15, kFindOf1Size); + TestFirstOf1(stopwatch2, es8, pFindOf1_8, 15, kFindOf1Size); + + if(i == 1) + Benchmark::AddResult("string/find_first_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + const char16_t pFindOf1_16[kFindOf1Size] = { '~', '~', '~', '~', '~', '~', '~' }; + + TestFirstOf1(stopwatch1, ss16, pFindOf1_16, 15, kFindOf1Size); + TestFirstOf1(stopwatch2, es16, pFindOf1_16, 15, kFindOf1Size); + + if(i == 1) + Benchmark::AddResult("string/find_first_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find_last_of(const value_type* p, size_type position, size_type n + /////////////////////////////// + + TestLastOf1(stopwatch1, ss8, pFindOf1_8, 15, kFindOf1Size); + TestLastOf1(stopwatch2, es8, pFindOf1_8, 15, kFindOf1Size); + + if(i == 1) + Benchmark::AddResult("string/find_last_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestLastOf1(stopwatch1, ss16, pFindOf1_16, 15, kFindOf1Size); + TestLastOf1(stopwatch2, es16, pFindOf1_16, 15, kFindOf1Size); + + if(i == 1) + Benchmark::AddResult("string/find_last_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find_first_not_of(const value_type* p, size_type position, size_type n + /////////////////////////////// + + TestFirstNotOf1(stopwatch1, ss8, pFind1_8, 15, kFind1Size); + TestFirstNotOf1(stopwatch2, es8, pFind1_8, 15, kFind1Size); + + if(i == 1) + Benchmark::AddResult("string/find_first_not_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestFirstNotOf1(stopwatch1, ss16, pFind1_16, 15, kFind1Size); + TestFirstNotOf1(stopwatch2, es16, pFind1_16, 15, kFind1Size); + + if(i == 1) + Benchmark::AddResult("string/find_first_not_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test find_last_of(const value_type* p, size_type position, size_type n + /////////////////////////////// + + TestLastNotOf1(stopwatch1, ss8, pFind1_8, 15, kFind1Size); + TestLastNotOf1(stopwatch2, es8, pFind1_8, 15, kFind1Size); + + if(i == 1) + Benchmark::AddResult("string/find_last_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestLastNotOf1(stopwatch1, ss16, pFind1_16, 15, kFind1Size); + TestLastNotOf1(stopwatch2, es16, pFind1_16, 15, kFind1Size); + + if(i == 1) + Benchmark::AddResult("string/find_last_of/p,pos,n", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + #endif + + /////////////////////////////// + // Test compare() + /////////////////////////////// + + std::basic_string ss8X(ss8); + eastl::basic_string es8X(es8); + std::basic_string ss16X(ss16); + eastl::basic_string es16X(es16); + + TestCompare(stopwatch1, ss8, ss8X); + TestCompare(stopwatch2, es8, es8X); + + if(i == 1) + Benchmark::AddResult("string/compare", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestCompare(stopwatch1, ss16, ss16X); + TestCompare(stopwatch2, es16, es16X); + + if(i == 1) + Benchmark::AddResult("string/compare", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + + /////////////////////////////// + // Test swap() + /////////////////////////////// + + TestSwap(stopwatch1, ss8, ss8X); + TestSwap(stopwatch2, es8, es8X); + + if(i == 1) + Benchmark::AddResult("string/swap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + TestSwap(stopwatch1, ss16, ss16X); + TestSwap(stopwatch2, es16, es16X); + + if(i == 1) + Benchmark::AddResult("string/swap", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + } + } + +} + + + + + + + + + diff --git a/benchmark/source/BenchmarkTupleVector.cpp b/benchmark/source/BenchmarkTupleVector.cpp new file mode 100644 index 0000000..3a8e79d --- /dev/null +++ b/benchmark/source/BenchmarkTupleVector.cpp @@ -0,0 +1,667 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #pragma warning(disable: 4350) +#endif +#include +#include +#include +#include +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +using namespace EA; + + +typedef std::vector StdVectorUint64; +typedef eastl::tuple_vector EaTupleVectorUint64; + + struct PaddingStruct +{ + char padding[56] = { 0 }; +}; +static const PaddingStruct DefaultPadding; +typedef eastl::tuple PaddedTuple; +typedef std::vector StdVectorUint64Padded; +typedef eastl::tuple_vector EaTupleVectorUint64Padded; + +namespace +{ + + + ////////////////////////////////////////////////////////////////////////////// + // MovableType + // + struct MovableType + { + int8_t* mpData; + enum { kDataSize = 128 }; + + MovableType() : mpData(new int8_t[kDataSize]) + { memset(mpData, 0, kDataSize); } + + MovableType(const MovableType& x) : mpData(new int8_t[kDataSize]) + { memcpy(mpData, x.mpData, kDataSize); } + + MovableType& operator=(const MovableType& x) + { + if(!mpData) + mpData = new int8_t[kDataSize]; + memcpy(mpData, x.mpData, kDataSize); + return *this; + } + + #if EASTL_MOVE_SEMANTICS_ENABLED + MovableType(MovableType&& x) EA_NOEXCEPT : mpData(x.mpData) + { x.mpData = NULL; } + + MovableType& operator=(MovableType&& x) + { + eastl::swap(mpData, x.mpData); // In practice it may not be right to do a swap, depending on the case. + return *this; + } + #endif + + ~MovableType() + { delete[] mpData; } + }; + + + ////////////////////////////////////////////////////////////////////////////// + // AutoRefCount + // + // Basic ref-counted object. + // + template + class AutoRefCount + { + public: + T* mpObject; + + public: + AutoRefCount() EA_NOEXCEPT : mpObject(NULL) + {} + + AutoRefCount(T* pObject) EA_NOEXCEPT : mpObject(pObject) + { + if(mpObject) + mpObject->AddRef(); + } + + AutoRefCount(T* pObject, int) EA_NOEXCEPT : mpObject(pObject) + { + // Inherit the existing refcount. + } + + AutoRefCount(const AutoRefCount& x) EA_NOEXCEPT : mpObject(x.mpObject) + { + if(mpObject) + mpObject->AddRef(); + } + + AutoRefCount& operator=(const AutoRefCount& x) + { + return operator=(x.mpObject); + } + + AutoRefCount& operator=(T* pObject) + { + if(pObject != mpObject) + { + T* const pTemp = mpObject; // Create temporary to prevent possible problems with re-entrancy. + if(pObject) + pObject->AddRef(); + mpObject = pObject; + if(pTemp) + pTemp->Release(); + } + return *this; + } + + #if EASTL_MOVE_SEMANTICS_ENABLED + AutoRefCount(AutoRefCount&& x) EA_NOEXCEPT : mpObject(x.mpObject) + { + x.mpObject = NULL; + } + + AutoRefCount& operator=(AutoRefCount&& x) + { + if(mpObject) + mpObject->Release(); + mpObject = x.mpObject; + x.mpObject = NULL; + return *this; + } + #endif + + ~AutoRefCount() + { + if(mpObject) + mpObject->Release(); + } + + T& operator *() const EA_NOEXCEPT + { return *mpObject; } + + T* operator ->() const EA_NOEXCEPT + { return mpObject; } + + operator T*() const EA_NOEXCEPT + { return mpObject; } + + }; // class AutoRefCount + + + struct RefCounted + { + int mRefCount; + static int msAddRefCount; + static int msReleaseCount; + + RefCounted() : mRefCount(1) {} + + int AddRef() + { ++msAddRefCount; return ++mRefCount; } + + int Release() + { + ++msReleaseCount; + if(mRefCount > 1) + return --mRefCount; + delete this; + return 0; + } + }; + + int RefCounted::msAddRefCount = 0; + int RefCounted::msReleaseCount = 0; + +} // namespace + + +namespace +{ + template + void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector& intVector) + { + stopwatch.Restart(); + for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + c.push_back((uint64_t)intVector[j]); + stopwatch.Stop(); + } + + + template + void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c) + { + uint64_t temp = 0; + stopwatch.Restart(); + for(typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++) + temp += c[j]; + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff)); + } + + void TestBracket(EA::StdC::Stopwatch& stopwatch, EaTupleVectorUint64& c) + { + uint64_t temp = 0; + stopwatch.Restart(); + for (typename EaTupleVectorUint64::size_type j = 0, jEnd = c.size(); j < jEnd; j++) + temp += eastl::get<0>(c[j]); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff)); + } + + template + void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + typedef typename Container::iterator iterator_t; // This typedef is required to get this code to compile on RVCT + iterator_t it = eastl::find(c.begin(), c.end(), UINT64_C(0xffffffffffff)); + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)*it); + } + + void TestFind(EA::StdC::Stopwatch& stopwatch, EaTupleVectorUint64& c) + { + eastl::tuple val(0xffffffffffff); + stopwatch.Restart(); + EaTupleVectorUint64::iterator it = eastl::find(c.begin(), c.end(), val); + stopwatch.Stop(); + if (it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eastl::get<0>(*it)); + } + + template + void TestSort(EA::StdC::Stopwatch& stopwatch, Container& c) + { + // Intentionally use eastl sort in order to measure just + // vector access speed and not be polluted by sort speed. + stopwatch.Restart(); + eastl::quick_sort(c.begin(), c.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(c[0] & 0xffffffff)); + } + + void TestSort(EA::StdC::Stopwatch& stopwatch, EaTupleVectorUint64& c) + { + // Intentionally use eastl sort in order to measure just + // vector access speed and not be polluted by sort speed. + stopwatch.Restart(); + eastl::quick_sort(c.begin(), c.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(eastl::get<0>(c[0]) & 0xffffffff)); + } + + + template + void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j) + { + it = c.insert(it, UINT64_C(0xffffffffffff)); + + if(it == c.end()) // Try to safely increment the iterator three times. + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + } + + + template + void TestErase(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j) + { + it = c.erase(it); + + if(it == c.end()) // Try to safely increment the iterator three times. + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + } + + + template + void TestMoveReallocate(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + while(c.size() < 8192) + c.resize(c.capacity() + 1); + stopwatch.Stop(); + } + + + template + void TestMoveErase(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + while(!c.empty()) + c.erase(c.begin()); + stopwatch.Stop(); + } + + ////////////////////////////////////////////////////////////////////////// + // Variations of test functions for the Padded structures + template + void TestTuplePushBack(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector& intVector) + { + stopwatch.Restart(); + for (eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + { + PaddedTuple tup((uint64_t)intVector[j], DefaultPadding); + c.push_back(tup); + } + stopwatch.Stop(); + } + + + template + void TestTupleBracket(EA::StdC::Stopwatch& stopwatch, Container& c) + { + uint64_t temp = 0; + stopwatch.Restart(); + for (typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++) + temp += eastl::get<0>(c[j]); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff)); + } + + + template + void TestTupleFind(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + typedef typename Container::iterator iterator_t; // This typedef is required to get this code to compile on RVCT + iterator_t it = eastl::find_if(c.begin(), c.end(), [](auto tup) { return eastl::get<0>(tup) == 0xFFFFFFFF; }); + stopwatch.Stop(); + if (it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)eastl::get<0>(*it)); + } + + template + void TestTupleSort(EA::StdC::Stopwatch& stopwatch, Container& c) + { + // Intentionally use eastl sort in order to measure just + // vector access speed and not be polluted by sort speed. + stopwatch.Restart(); + eastl::quick_sort(c.begin(), c.end(), [](auto a, auto b) { return eastl::get<0>(a) < eastl::get<0>(b); }); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(eastl::get<0>(c[0]) & 0xffffffff)); + } + + template + void TestTupleInsert(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + PaddedTuple tup(0xFFFFFFFF, DefaultPadding); + + stopwatch.Restart(); + for (j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j) + { + it = c.insert(it, tup); + + if (it == c.end()) // Try to safely increment the iterator three times. + it = c.begin(); + if (++it == c.end()) + it = c.begin(); + if (++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + } + + template + void TestTupleErase(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for (j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j) + { + it = c.erase(it); + + if (it == c.end()) // Try to safely increment the iterator three times. + it = c.begin(); + if (++it == c.end()) + it = c.begin(); + if (++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + } + +} // namespace + + + + + +void BenchmarkTupleVector() +{ + EASTLTest_Printf("TupleVector\n"); + + EA::UnitTest::RandGenT rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { + eastl::vector intVector(100000); + eastl::generate(intVector.begin(), intVector.end(), rng); + + for(int i = 0; i < 2; i++) + { + StdVectorUint64 stdVectorUint64; + EaTupleVectorUint64 eaTupleVectorUint64; + + + /////////////////////////////// + // Test push_back + /////////////////////////////// + + TestPushBack(stopwatch1, stdVectorUint64, intVector); + TestPushBack(stopwatch2, eaTupleVectorUint64, intVector); + + if(i == 1) + Benchmark::AddResult("tuple_vector/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test operator[]. + /////////////////////////////// + + TestBracket(stopwatch1, stdVectorUint64); + TestBracket(stopwatch2, eaTupleVectorUint64); + + if(i == 1) + Benchmark::AddResult("tuple_vector/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test iteration via find(). + /////////////////////////////// + + TestFind(stopwatch1, stdVectorUint64); + TestFind(stopwatch2, eaTupleVectorUint64); + TestFind(stopwatch1, stdVectorUint64); + TestFind(stopwatch2, eaTupleVectorUint64); + + if(i == 1) + Benchmark::AddResult("tuple_vector/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test sort + /////////////////////////////// + + // Currently VC++ complains about our sort function decrementing std::iterator that is already at begin(). In the strictest sense, + // that's a valid complaint, but we aren't testing std STL here. We will want to revise our sort function eventually. + #if !defined(_MSC_VER) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2) + TestSort(stopwatch1, stdVectorUint64); + TestSort(stopwatch2, eaTupleVectorUint64); + + if(i == 1) + Benchmark::AddResult("tuple_vector/sort", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + #endif + + /////////////////////////////// + // Test insert + /////////////////////////////// + + TestInsert(stopwatch1, stdVectorUint64); + TestInsert(stopwatch2, eaTupleVectorUint64); + + if(i == 1) + Benchmark::AddResult("tuple_vector/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase + /////////////////////////////// + + TestErase(stopwatch1, stdVectorUint64); + TestErase(stopwatch2, eaTupleVectorUint64); + + if(i == 1) + Benchmark::AddResult("tuple_vector/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////////////////// + // Test move of MovableType + // Should be much faster with C++11 move. + /////////////////////////////////////////// + + std::vector stdVectorMovableType; + eastl::tuple_vector eaTupleVectorMovableType; + + TestMoveReallocate(stopwatch1, stdVectorMovableType); + TestMoveReallocate(stopwatch2, eaTupleVectorMovableType); + + if(i == 1) + Benchmark::AddResult("tuple_vector/reallocate", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + TestMoveErase(stopwatch1, stdVectorMovableType); + TestMoveErase(stopwatch2, eaTupleVectorMovableType); + + if(i == 1) + Benchmark::AddResult("tuple_vector/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////////////////// + // Test move of AutoRefCount + // Should be much faster with C++11 move. + /////////////////////////////////////////// + + std::vector > stdVectorAutoRefCount; + eastl::tuple_vector > eaTupleVectorAutoRefCount; + + for(size_t a = 0; a < 2048; a++) + { + stdVectorAutoRefCount.push_back(AutoRefCount(new RefCounted)); + eaTupleVectorAutoRefCount.push_back(AutoRefCount(new RefCounted)); + } + + RefCounted::msAddRefCount = 0; + RefCounted::msReleaseCount = 0; + TestMoveErase(stopwatch1, stdVectorAutoRefCount); + //EASTLTest_Printf("tuple_vector/erase std counts: %d %d\n", RefCounted::msAddRefCount, RefCounted::msReleaseCount); + + RefCounted::msAddRefCount = 0; + RefCounted::msReleaseCount = 0; + TestMoveErase(stopwatch2, eaTupleVectorAutoRefCount); + //EASTLTest_Printf("tuple_vector/erase EA counts: %d %d\n", RefCounted::msAddRefCount, RefCounted::msReleaseCount); + + if(i == 1) + Benchmark::AddResult("tuple_vector/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + ////////////////////////////////////////////////////////////////////////// + // Test various operations with "padded" data, to demonstrate access/modification of sparse data + + StdVectorUint64Padded stdVectorUint64Padded; + EaTupleVectorUint64Padded eaTupleVectorUint64Padded; + + /////////////////////////////// + // Test push_back + /////////////////////////////// + + TestTuplePushBack(stopwatch1, stdVectorUint64Padded, intVector); + TestTuplePushBack(stopwatch2, eaTupleVectorUint64Padded, intVector); + + if(i == 1) + Benchmark::AddResult("tuple_vector/push_back", stopwatch1.GetUnits(), + stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test operator[]. + /////////////////////////////// + + TestTupleBracket(stopwatch1, stdVectorUint64Padded); + TestTupleBracket(stopwatch2, eaTupleVectorUint64Padded); + + if(i == 1) + Benchmark::AddResult("tuple_vector/operator[]", stopwatch1.GetUnits(), + stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test iteration via find(). + /////////////////////////////// + + TestTupleFind(stopwatch1, stdVectorUint64Padded); + TestTupleFind(stopwatch2, eaTupleVectorUint64Padded); + TestTupleFind(stopwatch1, stdVectorUint64Padded); + TestTupleFind(stopwatch2, eaTupleVectorUint64Padded); + + if(i == 1) + Benchmark::AddResult("tuple_vector/iteration", stopwatch1.GetUnits(), + stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test sort + /////////////////////////////// + + // Currently VC++ complains about our sort function decrementing std::iterator that is already at + // begin(). In the strictest sense, that's a valid complaint, but we aren't testing std STL here. We + // will want to revise our sort function eventually. + #if !defined(_MSC_VER) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2) + TestTupleSort(stopwatch1, stdVectorUint64Padded); + TestTupleSort(stopwatch2, eaTupleVectorUint64Padded); + + if(i == 1) + Benchmark::AddResult("tuple_vector/sort", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), + stopwatch2.GetElapsedTime()); + #endif + + /////////////////////////////// + // Test insert + /////////////////////////////// + + TestTupleInsert(stopwatch1, stdVectorUint64Padded); + TestTupleInsert(stopwatch2, eaTupleVectorUint64Padded); + + if(i == 1) + Benchmark::AddResult("tuple_vector/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), + stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase + /////////////////////////////// + + TestTupleErase(stopwatch1, stdVectorUint64Padded); + TestTupleErase(stopwatch2, eaTupleVectorUint64Padded); + + if(i == 1) + Benchmark::AddResult("tuple_vector/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), + stopwatch2.GetElapsedTime()); + } + } +} + + + + + + + + + + + + diff --git a/benchmark/source/BenchmarkVector.cpp b/benchmark/source/BenchmarkVector.cpp new file mode 100644 index 0000000..9331530 --- /dev/null +++ b/benchmark/source/BenchmarkVector.cpp @@ -0,0 +1,452 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #pragma warning(disable: 4350) +#endif +#include +#include +#include +#include +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +using namespace EA; + + +typedef std::vector StdVectorUint64; +typedef eastl::vector EaVectorUint64; + + +namespace +{ + + + ////////////////////////////////////////////////////////////////////////////// + // MovableType + // + struct MovableType + { + int8_t* mpData; + enum { kDataSize = 128 }; + + MovableType() : mpData(new int8_t[kDataSize]) + { memset(mpData, 0, kDataSize); } + + MovableType(const MovableType& x) : mpData(new int8_t[kDataSize]) + { memcpy(mpData, x.mpData, kDataSize); } + + MovableType& operator=(const MovableType& x) + { + if(!mpData) + mpData = new int8_t[kDataSize]; + memcpy(mpData, x.mpData, kDataSize); + return *this; + } + + MovableType(MovableType&& x) EA_NOEXCEPT : mpData(x.mpData) + { x.mpData = NULL; } + + MovableType& operator=(MovableType&& x) + { + eastl::swap(mpData, x.mpData); // In practice it may not be right to do a swap, depending on the case. + return *this; + } + + ~MovableType() + { delete[] mpData; } + }; + + + ////////////////////////////////////////////////////////////////////////////// + // AutoRefCount + // + // Basic ref-counted object. + // + template + class AutoRefCount + { + public: + T* mpObject; + + public: + AutoRefCount() EA_NOEXCEPT : mpObject(NULL) + {} + + AutoRefCount(T* pObject) EA_NOEXCEPT : mpObject(pObject) + { + if(mpObject) + mpObject->AddRef(); + } + + AutoRefCount(T* pObject, int) EA_NOEXCEPT : mpObject(pObject) + { + // Inherit the existing refcount. + } + + AutoRefCount(const AutoRefCount& x) EA_NOEXCEPT : mpObject(x.mpObject) + { + if(mpObject) + mpObject->AddRef(); + } + + AutoRefCount& operator=(const AutoRefCount& x) + { + return operator=(x.mpObject); + } + + AutoRefCount& operator=(T* pObject) + { + if(pObject != mpObject) + { + T* const pTemp = mpObject; // Create temporary to prevent possible problems with re-entrancy. + if(pObject) + pObject->AddRef(); + mpObject = pObject; + if(pTemp) + pTemp->Release(); + } + return *this; + } + + AutoRefCount(AutoRefCount&& x) EA_NOEXCEPT : mpObject(x.mpObject) + { + x.mpObject = NULL; + } + + AutoRefCount& operator=(AutoRefCount&& x) + { + if(mpObject) + mpObject->Release(); + mpObject = x.mpObject; + x.mpObject = NULL; + return *this; + } + + ~AutoRefCount() + { + if(mpObject) + mpObject->Release(); + } + + T& operator *() const EA_NOEXCEPT + { return *mpObject; } + + T* operator ->() const EA_NOEXCEPT + { return mpObject; } + + operator T*() const EA_NOEXCEPT + { return mpObject; } + + }; // class AutoRefCount + + + struct RefCounted + { + int mRefCount; + static int msAddRefCount; + static int msReleaseCount; + + RefCounted() : mRefCount(1) {} + + int AddRef() + { ++msAddRefCount; return ++mRefCount; } + + int Release() + { + ++msReleaseCount; + if(mRefCount > 1) + return --mRefCount; + delete this; + return 0; + } + }; + + int RefCounted::msAddRefCount = 0; + int RefCounted::msReleaseCount = 0; + +} // namespace + + +namespace +{ + template + void TestPushBack(EA::StdC::Stopwatch& stopwatch, Container& c, eastl::vector& intVector) + { + stopwatch.Restart(); + for(eastl_size_t j = 0, jEnd = intVector.size(); j < jEnd; j++) + c.push_back((uint64_t)intVector[j]); + stopwatch.Stop(); + } + + + template + void TestBracket(EA::StdC::Stopwatch& stopwatch, Container& c) + { + uint64_t temp = 0; + stopwatch.Restart(); + for(typename Container::size_type j = 0, jEnd = c.size(); j < jEnd; j++) + temp += c[j]; + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(temp & 0xffffffff)); + } + + + template + void TestFind(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + typedef typename Container::iterator iterator_t; // This typedef is required to get this code to compile on RVCT + iterator_t it = eastl::find(c.begin(), c.end(), UINT64_C(0xffffffffffff)); + stopwatch.Stop(); + if(it != c.end()) + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)*it); + } + + + template + void TestSort(EA::StdC::Stopwatch& stopwatch, Container& c) + { + // Intentionally use eastl sort in order to measure just + // vector access speed and not be polluted by sort speed. + stopwatch.Restart(); + eastl::quick_sort(c.begin(), c.end()); + stopwatch.Stop(); + sprintf(Benchmark::gScratchBuffer, "%u", (unsigned)(c[0] & 0xffffffff)); + } + + + template + void TestInsert(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j) + { + it = c.insert(it, UINT64_C(0xffffffffffff)); + + if(it == c.end()) // Try to safely increment the iterator three times. + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + } + + + template + void TestErase(EA::StdC::Stopwatch& stopwatch, Container& c) + { + typename Container::size_type j, jEnd; + typename Container::iterator it; + + stopwatch.Restart(); + for(j = 0, jEnd = 100, it = c.begin(); j < jEnd; ++j) + { + it = c.erase(it); + + if(it == c.end()) // Try to safely increment the iterator three times. + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + if(++it == c.end()) + it = c.begin(); + } + stopwatch.Stop(); + } + + + template + void TestMoveReallocate(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + while(c.size() < 8192) + c.resize(c.capacity() + 1); + stopwatch.Stop(); + } + + + template + void TestMoveErase(EA::StdC::Stopwatch& stopwatch, Container& c) + { + stopwatch.Restart(); + while(!c.empty()) + c.erase(c.begin()); + stopwatch.Stop(); + } + + +} // namespace + + + + + +void BenchmarkVector() +{ + EASTLTest_Printf("Vector\n"); + + EA::UnitTest::RandGenT rng(EA::UnitTest::GetRandSeed()); + EA::StdC::Stopwatch stopwatch1(EA::StdC::Stopwatch::kUnitsCPUCycles); + EA::StdC::Stopwatch stopwatch2(EA::StdC::Stopwatch::kUnitsCPUCycles); + + { + eastl::vector intVector(100000); + eastl::generate(intVector.begin(), intVector.end(), rng); + + for(int i = 0; i < 2; i++) + { + StdVectorUint64 stdVectorUint64; + EaVectorUint64 eaVectorUint64; + + + /////////////////////////////// + // Test push_back + /////////////////////////////// + + TestPushBack(stopwatch1, stdVectorUint64, intVector); + TestPushBack(stopwatch2, eaVectorUint64, intVector); + + if(i == 1) + Benchmark::AddResult("vector/push_back", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test operator[]. + /////////////////////////////// + + TestBracket(stopwatch1, stdVectorUint64); + TestBracket(stopwatch2, eaVectorUint64); + + if(i == 1) + Benchmark::AddResult("vector/operator[]", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test iteration via find(). + /////////////////////////////// + + TestFind(stopwatch1, stdVectorUint64); + TestFind(stopwatch2, eaVectorUint64); + TestFind(stopwatch1, stdVectorUint64); + TestFind(stopwatch2, eaVectorUint64); + + if(i == 1) + Benchmark::AddResult("vector/iteration", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test sort + /////////////////////////////// + + // Currently VC++ complains about our sort function decrementing std::iterator that is already at begin(). In the strictest sense, + // that's a valid complaint, but we aren't testing std STL here. We will want to revise our sort function eventually. + #if !defined(_MSC_VER) || !defined(_ITERATOR_DEBUG_LEVEL) || (_ITERATOR_DEBUG_LEVEL < 2) + TestSort(stopwatch1, stdVectorUint64); + TestSort(stopwatch2, eaVectorUint64); + + if(i == 1) + Benchmark::AddResult("vector/sort", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + #endif + + /////////////////////////////// + // Test insert + /////////////////////////////// + + TestInsert(stopwatch1, stdVectorUint64); + TestInsert(stopwatch2, eaVectorUint64); + + if(i == 1) + Benchmark::AddResult("vector/insert", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////// + // Test erase + /////////////////////////////// + + TestErase(stopwatch1, stdVectorUint64); + TestErase(stopwatch2, eaVectorUint64); + + if(i == 1) + Benchmark::AddResult("vector/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////////////////// + // Test move of MovableType + // Should be much faster with C++11 move. + /////////////////////////////////////////// + + std::vector stdVectorMovableType; + eastl::vector eaVectorMovableType; + + TestMoveReallocate(stopwatch1, stdVectorMovableType); + TestMoveReallocate(stopwatch2, eaVectorMovableType); + + if(i == 1) + Benchmark::AddResult("vector/reallocate", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + TestMoveErase(stopwatch1, stdVectorMovableType); + TestMoveErase(stopwatch2, eaVectorMovableType); + + if(i == 1) + Benchmark::AddResult("vector/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + + + /////////////////////////////////////////// + // Test move of AutoRefCount + // Should be much faster with C++11 move. + /////////////////////////////////////////// + + std::vector > stdVectorAutoRefCount; + eastl::vector > eaVectorAutoRefCount; + + for(size_t a = 0; a < 2048; a++) + { + stdVectorAutoRefCount.push_back(AutoRefCount(new RefCounted)); + eaVectorAutoRefCount.push_back(AutoRefCount(new RefCounted)); + } + + RefCounted::msAddRefCount = 0; + RefCounted::msReleaseCount = 0; + TestMoveErase(stopwatch1, stdVectorAutoRefCount); + EASTLTest_Printf("vector/erase std counts: %d %d\n", RefCounted::msAddRefCount, RefCounted::msReleaseCount); + + RefCounted::msAddRefCount = 0; + RefCounted::msReleaseCount = 0; + TestMoveErase(stopwatch2, eaVectorAutoRefCount); + EASTLTest_Printf("vector/erase EA counts: %d %d\n", RefCounted::msAddRefCount, RefCounted::msReleaseCount); + + if(i == 1) + Benchmark::AddResult("vector/erase", stopwatch1.GetUnits(), stopwatch1.GetElapsedTime(), stopwatch2.GetElapsedTime()); + } + } +} + + + + + + + + + + + + diff --git a/benchmark/source/EASTLBenchmark.cpp b/benchmark/source/EASTLBenchmark.cpp new file mode 100644 index 0000000..8e4d3ae --- /dev/null +++ b/benchmark/source/EASTLBenchmark.cpp @@ -0,0 +1,291 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) +#endif +#include +#include +#include +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + + +namespace Benchmark +{ + static int64_t ConvertStopwatchUnits(EA::StdC::Stopwatch::Units unitsSource, int64_t valueSource, EA::StdC::Stopwatch::Units unitsDest) + { + using namespace EA::StdC; + + int64_t valueDest = valueSource; + + if(unitsSource != unitsDest) + { + double sourceMultiplier; + + switch (unitsSource) + { + case Stopwatch::kUnitsCPUCycles: + sourceMultiplier = Stopwatch::GetUnitsPerCPUCycle(unitsDest); // This will typically be a number less than 1. + valueDest = (int64_t)(valueSource * sourceMultiplier); + break; + + case Stopwatch::kUnitsCycles: + sourceMultiplier = Stopwatch::GetUnitsPerStopwatchCycle(unitsDest); // This will typically be a number less than 1. + valueDest = (int64_t)(valueSource * sourceMultiplier); + break; + + case Stopwatch::kUnitsNanoseconds: + case Stopwatch::kUnitsMicroseconds: + case Stopwatch::kUnitsMilliseconds: + case Stopwatch::kUnitsSeconds: + case Stopwatch::kUnitsMinutes: + case Stopwatch::kUnitsUserDefined: + // To do. Also, handle the case of unitsDest being Cycles or CPUCycles and unitsSource being a time. + break; + } + } + + return valueDest; + } + + void WriteTime(int64_t timeNS, eastl::string& sTime) + { + if(timeNS > 1000000000) + sTime.sprintf(" %6.2f s", (double)timeNS / 1000000000); + else if(timeNS > 1000000) + sTime.sprintf("%6.1f ms", (double)timeNS / 1000000); + else if(timeNS > 1000) + sTime.sprintf("%6.1f us", (double)timeNS / 1000); + else + sTime.sprintf("%6.1f ns", (double)timeNS / 1); + } + + + + Environment gEnvironment; + + Environment& GetEnvironment() + { + return gEnvironment; + } + + + + ResultSet gResultSet; + + ResultSet& GetResultSet() + { + return gResultSet; + } + + + + // Scratch sprintf buffer + char gScratchBuffer[1024]; + + + void DoNothing(...) + { + // Intentionally nothing. + } + + + void AddResult(const char* pName, int units, int64_t nTime1, int64_t nTime2, const char* pNotes) + { + Result result; + + result.msName = pName; + result.mUnits = units; + result.mTime1 = nTime1; + result.mTime1NS = ConvertStopwatchUnits((EA::StdC::Stopwatch::Units)units, nTime1, EA::StdC::Stopwatch::kUnitsNanoseconds); + result.mTime2 = nTime2; + result.mTime2NS = ConvertStopwatchUnits((EA::StdC::Stopwatch::Units)units, nTime2, EA::StdC::Stopwatch::kUnitsNanoseconds); + + if(pNotes) + result.msNotes = pNotes; + + gResultSet.insert(result); + } + + + void PrintResultLine(const Result& result) + { + const double fRatio = (double)result.mTime1 / (double)result.mTime2; + const double fRatioPrinted = (fRatio > 100) ? 100 : fRatio; + const double fPercentChange = fabs(((double)result.mTime1 - (double)result.mTime2) / (((double)result.mTime1 + (double)result.mTime2) / 2)); + const bool bDifference = (result.mTime1 > 10) && (result.mTime2 > 10) && (fPercentChange > 0.25); + const char* pDifference = (bDifference ? (result.mTime1 < result.mTime2 ? "-" : "+") : ""); + + eastl::string sClockTime1, sClockTime2; + + WriteTime(result.mTime1NS, sClockTime1); // This converts an integer in nanoseconds (e.g. 23400000) to a string (e.g. "23.4 ms") + WriteTime(result.mTime2NS, sClockTime2); + + EA::UnitTest::Report("%-43s | %13" PRIu64 " %s | %13" PRIu64 " %s | %10.2f%10s", result.msName.c_str(), result.mTime1, sClockTime1.c_str(), result.mTime2, sClockTime2.c_str(), fRatioPrinted, pDifference); + + if(result.msNotes.length()) // If there are any notes... + EA::UnitTest::Report(" %s", result.msNotes.c_str()); + EA::UnitTest::Report("\n"); + } + + + #if defined(EASTL_BENCHMARK_WRITE_FILE) && EASTL_BENCHMARK_WRITE_FILE + + #if !defined(EASTL_BENCHMARK_WRITE_FILE_PATH) + #define EASTL_BENCHMARK_WRITE_FILE_PATH "BenchmarkResults.txt" + #endif + + struct FileWriter + { + FILE* mpReportFile; + EA::EAMain::ReportFunction mpSavedReportFunction; + static FileWriter* gpFileWriter; + + static void StaticPrintfReportFunction(const char8_t* pText) + { + if(gpFileWriter) + gpFileWriter->PrintfReportFunction(pText); + } + + void PrintfReportFunction(const char8_t* pText) + { + fwrite(pText, strlen(pText), 1, mpReportFile); + EA::EAMain::ReportFunction gpReportFunction = EA::EAMain::GetDefaultReportFunction(); + gpReportFunction(pText); + } + + FileWriter() : mpReportFile(NULL), mpSavedReportFunction(NULL) + { + mpReportFile = fopen(EASTL_BENCHMARK_WRITE_FILE_PATH, "w+"); + + if(mpReportFile) + { + gpFileWriter = this; + mpSavedReportFunction = EA::EAMain::GetDefaultReportFunction(); + EA::EAMain::SetReportFunction(StaticPrintfReportFunction); + } + } + + ~FileWriter() + { + if(mpReportFile) + { + gpFileWriter = NULL; + EA::EAMain::SetReportFunction(mpSavedReportFunction); + fclose(mpReportFile); + } + } + }; + + FileWriter* FileWriter::gpFileWriter = NULL; + #endif + + + void PrintResults() + { + #if defined(EASTL_BENCHMARK_WRITE_FILE) && EASTL_BENCHMARK_WRITE_FILE + FileWriter fileWriter; // This will auto-execute. + #endif + + // Print the results + EA::UnitTest::Report("\n"); + EA::UnitTest::Report("****************************************************************************************\n"); + EA::UnitTest::Report("EASTL Benchmark test results\n"); + EA::UnitTest::Report("****************************************************************************************\n"); + EA::UnitTest::Report("\n"); + EA::UnitTest::Report("EASTL version: %s\n", EASTL_VERSION); + EA::UnitTest::Report("Platform: %s\n", gEnvironment.msPlatform.c_str()); + EA::UnitTest::Report("Compiler: %s\n", EA_COMPILER_STRING); + #if defined(EA_DEBUG) || defined(_DEBUG) + EA::UnitTest::Report("Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled.\n"); + EA::UnitTest::Report("Build: Debug. Inlining disabled. STL debug features disabled.\n"); + #else + EA::UnitTest::Report("Allocator: PPMalloc::GeneralAllocator. Thread safety enabled.\n"); + EA::UnitTest::Report("Build: Full optimization. Inlining enabled.\n"); + #endif + EA::UnitTest::Report("\n"); + EA::UnitTest::Report("Values are ticks and time to complete tests; smaller values are better.\n"); + EA::UnitTest::Report("\n"); + EA::UnitTest::Report("%-43s%26s%26s%13s%13s\n", "Test", gEnvironment.msSTLName1.c_str(), gEnvironment.msSTLName2.c_str(), "Ratio", "Difference?"); + EA::UnitTest::Report("---------------------------------------------------------------------------------------------------------------------\n"); + + eastl::string sTestTypeLast; + eastl::string sTestTypeTemp; + + for(ResultSet::iterator it = gResultSet.begin(); it != gResultSet.end(); ++it) + { + const Result& result = *it; + + eastl_size_t n = result.msName.find('/'); + if(n == eastl::string::npos) + n = result.msName.length(); + sTestTypeTemp.assign(result.msName, 0, n); + + if(sTestTypeTemp != sTestTypeLast) // If it looks like we are changing to a new test type... add an empty line to help readability. + { + if(it != gResultSet.begin()) + EA::UnitTest::Report("\n"); + sTestTypeLast = sTestTypeTemp; + } + + PrintResultLine(result); + } + + // We will print out a final line that has the sum of the rows printed above. + Result resultSum; + resultSum.msName = "sum"; + + for(ResultSet::iterator its = gResultSet.begin(); its != gResultSet.end(); ++its) + { + const Result& resultTemp = *its; + + EASTL_ASSERT(resultTemp.mUnits == EA::StdC::Stopwatch::kUnitsCPUCycles); // Our ConvertStopwatchUnits call below assumes that every measured time is CPUCycles. + resultSum.mTime1 += resultTemp.mTime1; + resultSum.mTime2 += resultTemp.mTime2; + } + + // We do this convert as a final step instead of the loop in order to avoid loss of precision. + resultSum.mTime1NS = ConvertStopwatchUnits(EA::StdC::Stopwatch::kUnitsCPUCycles, resultSum.mTime1, EA::StdC::Stopwatch::kUnitsNanoseconds); + resultSum.mTime2NS = ConvertStopwatchUnits(EA::StdC::Stopwatch::kUnitsCPUCycles, resultSum.mTime2, EA::StdC::Stopwatch::kUnitsNanoseconds); + EA::UnitTest::Report("\n"); + PrintResultLine(resultSum); + + EA::UnitTest::Report("\n"); + EA::UnitTest::Report("****************************************************************************************\n"); + EA::UnitTest::Report("\n"); + + // Clear the results + gResultSet.clear(); + gEnvironment.clear(); + } + +} // namespace Benchmark + + + + + + + + + + + + + + + + + + + diff --git a/benchmark/source/EASTLBenchmark.h b/benchmark/source/EASTLBenchmark.h new file mode 100644 index 0000000..a0833e6 --- /dev/null +++ b/benchmark/source/EASTLBenchmark.h @@ -0,0 +1,228 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTLBENCHMARK_H +#define EASTLBENCHMARK_H + + +// Intrinsic control +// +// Our benchmark results are being skewed by inconsistent decisions by the +// VC++ compiler to use intrinsic functions. Additionally, many of our +// benchmarks work on large blocks of elements, whereas intrinsics often +// are an improvement only over small blocks of elements. As a result, +// enabling of intrinsics is often resulting in poor benchmark results for +// code that gets an intrinsic enabled for it, even though it will often +// happen in real code to be the opposite case. The disabling of intrinsics +// here often results in EASTL performance being lower than it would be in +// real-world situations. +// +#include +#ifdef _MSC_VER + #pragma function(strlen, strcmp, strcpy, strcat, memcpy, memcmp, memset) +#endif + + +#include +#include +#include +#include +#include + + +void BenchmarkSort(); +void BenchmarkList(); +void BenchmarkString(); +void BenchmarkVector(); +void BenchmarkDeque(); +void BenchmarkSet(); +void BenchmarkMap(); +void BenchmarkHash(); +void BenchmarkAlgorithm(); +void BenchmarkHeap(); +void BenchmarkBitset(); +void BenchmarkTupleVector(); + + +namespace Benchmark +{ + + // Environment + // + // The environment for this benchmark test. + // + struct Environment + { + eastl::string8 msPlatform; // Name of test platform (e.g. "Windows") + eastl::string8 msSTLName1; // Name of competitor #1 (e.g. "EASTL"). + eastl::string8 msSTLName2; // Name of competitor #2 (e.g. "MS STL"). + + void clear() { msPlatform.set_capacity(0); msSTLName1.set_capacity(0); msSTLName2.set_capacity(0); } + }; + + Environment& GetEnvironment(); + + + // Result + // + // An individual benchmark result. + // + struct Result + { + eastl::string8 msName; // Test name (e.g. "vector/insert"). + int mUnits; // Timing units (e.g. EA::StdC::Stopwatch::kUnitsSeconds). + int64_t mTime1; // Time of competitor #1. + uint64_t mTime1NS; // Nanoseconds. + int64_t mTime2; // Time of competitor #2. + int64_t mTime2NS; // Nanoseconds. + eastl::string8 msNotes; // Any comments to attach to this result. + + Result() : msName(), mUnits(EA::StdC::Stopwatch::kUnitsCPUCycles), + mTime1(0), mTime1NS(0), mTime2(0), mTime2NS(0), msNotes() { } + }; + + inline bool operator<(const Result& r1, const Result& r2) + { return r1.msName < r2.msName; } + + typedef eastl::set ResultSet; + + ResultSet& GetResultSet(); + + + // Scratch sprintf buffer + extern char gScratchBuffer[1024]; + + + + // Utility functions + // + void DoNothing(...); + void AddResult(const char* pName, int units, int64_t nTime1, int64_t nTime2, const char* pNotes = NULL); + void PrintResults(); + void WriteTime(int64_t timeNS, eastl::string& sTime); + + +} // namespace Benchmark + + + + +/////////////////////////////////////////////////////////////////////////////// +/// LargePOD +/// +/// Implements a structure which is essentially a largish POD. Useful for testing +/// containers and algorithms for their ability to efficiently work with PODs. +/// This class isn't strictly a POD by the definition of the C++ standard, +/// but it suffices for our interests. +/// +struct LargeObject +{ + int32_t mData[2048]; +}; + +struct LargePOD +{ + LargeObject mLargeObject1; + LargeObject mLargeObject2; + const char* mpName1; + const char* mpName2; + + explicit LargePOD(int32_t x = 0) // A true POD doesn't have a non-trivial constructor. + { + memset(mLargeObject1.mData, 0, sizeof(mLargeObject1.mData)); + memset(mLargeObject2.mData, 0, sizeof(mLargeObject2.mData)); + mLargeObject1.mData[0] = x; + + mpName1 = "LargePOD1"; + mpName2 = "LargePOD2"; + } + + LargePOD(const LargePOD& largePOD) // A true POD doesn't have a non-trivial copy-constructor. + : mLargeObject1(largePOD.mLargeObject1), + mLargeObject2(largePOD.mLargeObject2), + mpName1(largePOD.mpName1), + mpName2(largePOD.mpName2) + { + } + + virtual ~LargePOD() { } + + LargePOD& operator=(const LargePOD& largePOD) // A true POD doesn't have a non-trivial assignment operator. + { + if(&largePOD != this) + { + mLargeObject1 = largePOD.mLargeObject1; + mLargeObject2 = largePOD.mLargeObject2; + mpName1 = largePOD.mpName1; + mpName2 = largePOD.mpName2; + } + return *this; + } + + virtual void DoSomething() // Note that by declaring this virtual, this class is not truly a POD. + { // But it acts like a POD for the purposes of EASTL algorithms. + mLargeObject1.mData[1]++; + } + + operator int() + { + return (int)mLargeObject1.mData[0]; + } +}; + +//EASTL_DECLARE_POD(LargePOD); +//EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(LargePOD); +//EASTL_DECLARE_TRIVIAL_COPY(LargePOD); +//EASTL_DECLARE_TRIVIAL_ASSIGN(LargePOD); +//EASTL_DECLARE_TRIVIAL_DESTRUCTOR(LargePOD); +//EASTL_DECLARE_TRIVIAL_RELOCATE(LargePOD); + +// Operators +// We specifically define only == and <, in order to verify that +// our containers and algorithms are not mistakenly expecting other +// operators for the contained and manipulated classes. +inline bool operator==(const LargePOD& t1, const LargePOD& t2) +{ + return (memcmp(&t1.mLargeObject1, &t2.mLargeObject1, sizeof(t1.mLargeObject1)) == 0) && + (memcmp(&t1.mLargeObject2, &t2.mLargeObject2, sizeof(t1.mLargeObject2)) == 0) && + (strcmp(t1.mpName1, t2.mpName1) == 0) && + (strcmp(t1.mpName2, t2.mpName2) == 0); +} + +inline bool operator<(const LargePOD& t1, const LargePOD& t2) +{ + return (memcmp(&t1.mLargeObject1, &t2.mLargeObject1, sizeof(t1.mLargeObject1)) < 0) && + (memcmp(&t1.mLargeObject2, &t2.mLargeObject2, sizeof(t1.mLargeObject2)) < 0) && + (strcmp(t1.mpName1, t2.mpName1) < 0) && + (strcmp(t1.mpName2, t2.mpName2) < 0); +} + + + + + +#endif // Header sentry + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmark/source/main.cpp b/benchmark/source/main.cpp new file mode 100644 index 0000000..59ff5a9 --- /dev/null +++ b/benchmark/source/main.cpp @@ -0,0 +1,194 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + + +#include "EASTLBenchmark.h" +#include "EASTLTest.h" +#if !EASTL_OPENSOURCE + #include +#endif +#include +#include +#include +#include +#include +#include +EA_DISABLE_VC_WARNING(4946) +#include "EAMain/EAEntryPointMain.inl" +#include "EASTLTestAllocator.h" + + +/////////////////////////////////////////////////////////////////////////////// +// gpEAGeneralAllocator / gpEAGeneralAllocatorDebug +// +#if !EASTL_OPENSOURCE +namespace EA +{ + namespace Allocator + { + #ifdef EA_DEBUG + extern GeneralAllocatorDebug gGeneralAllocator; + extern PPM_API GeneralAllocatorDebug* gpEAGeneralAllocatorDebug; + #else + extern GeneralAllocator gGeneralAllocator; + extern PPM_API GeneralAllocator* gpEAGeneralAllocator; + #endif + } +} +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// Required by EASTL. +// +#if !defined(EASTL_EASTDC_VSNPRINTF) || !EASTL_EASTDC_VSNPRINTF + int Vsnprintf8(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments) + { + return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); + } + + int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments) + { + return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); + } + + #if (EASTDC_VERSION_N >= 10600) + int Vsnprintf32(char32_t* pDestination, size_t n, const char32_t* pFormat, va_list arguments) + { + return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); + } + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// main +// +int EAMain(int argc, char* argv[]) +{ + bool bWaitAtEnd = false; + bool bPrintHelp = false; + int nOptionCount = 0; + int nErrorCount = 0; + + EA::EAMain::PlatformStartup(); + EA::EAMain::SetVerbosity(2); // Default value. + + // Set up debug parameters. + #ifdef EA_DEBUG + // Only enable this temporarily to help find any problems you might find. + // EA::Allocator::gpEAGeneralAllocatorDebug->SetAutoHeapValidation(EA::Allocator::GeneralAllocator::kHeapValidationLevelBasic, 16); + #endif + + // Parse command line arguments + for(int i = 1; i < argc; i++) + { + if(strstr(argv[i], "-w") == argv[i]) + { + bWaitAtEnd = true; + nOptionCount++; + } + else if(strstr(argv[i], "-v") == argv[i]) + { + uint32_t verbosity = EA::StdC::AtoU32(argv[i] + 3); + EA::EAMain::SetVerbosity(verbosity); + nOptionCount++; + } + else if(strstr(argv[i], "-l:") == argv[i]) + { + gEASTL_TestLevel = atoi(argv[i] + 3); + if(gEASTL_TestLevel < kEASTL_TestLevelLow) + gEASTL_TestLevel = kEASTL_TestLevelLow; + else if(gEASTL_TestLevel > kEASTL_TestLevelHigh) + gEASTL_TestLevel = kEASTL_TestLevelHigh; + nOptionCount++; + } + else if(strstr(argv[i], "-s:") == argv[i]) + { + uint32_t seed = (eastl_size_t)atoi(argv[i] + 3); + EA::UnitTest::SetRandSeed(seed); + nOptionCount++; + } + else if((strstr(argv[i], "-?") == argv[i]) || (strstr(argv[i], "-h") == argv[i])) + { + bPrintHelp = true; + nOptionCount++; + } + } + + // Print user help. + if(!bPrintHelp) + bPrintHelp = (nOptionCount == 0); + + if(bPrintHelp) + { + EASTLTest_Printf("Options\n"); + EASTLTest_Printf(" -w Wait at end.\n"); + EASTLTest_Printf(" -l:N Test level in range of [1, 10]. 10 means maximum testing.\n"); + EASTLTest_Printf(" -s:N Specify a randomization seed. 0 is default and means use clock.\n"); + EASTLTest_Printf(" -? Show help.\n"); + } + + + // Set up test information + Benchmark::Environment& environment = Benchmark::GetEnvironment(); + environment.msPlatform = EA_PLATFORM_DESCRIPTION; + environment.msSTLName1 = GetStdSTLName(); + environment.msSTLName2 = "EASTL"; + + + // Run tests + #ifndef EA_DEBUG + EA::UnitTest::SetHighThreadPriority(); + #endif + + EA::StdC::Stopwatch stopwatch(EA::StdC::Stopwatch::kUnitsSeconds, true); // Measure seconds, start the counting immediately. + + BenchmarkAlgorithm(); + BenchmarkList(); + BenchmarkString(); + BenchmarkVector(); + BenchmarkDeque(); + BenchmarkSet(); + BenchmarkMap(); + BenchmarkHash(); + BenchmarkHeap(); + BenchmarkBitset(); + BenchmarkSort(); + BenchmarkTupleVector(); + + stopwatch.Stop(); + + #ifndef EA_DEBUG + EA::UnitTest::SetNormalThreadPriority(); + #endif + + Benchmark::PrintResults(); + + eastl::string sClockTime; + Benchmark::WriteTime(stopwatch.GetElapsedTime(), sClockTime); + + EASTLTest_Printf("Time to complete all tests: %s.\n", sClockTime.c_str()); + + // Done + if(bWaitAtEnd) + { + EASTLTest_Printf("\nPress any key to exit.\n"); + getchar(); // Wait for the user and shutdown + } + + EA::EAMain::PlatformShutdown(nErrorCount); + + return 0; +} + + + + + + + + + + diff --git a/benchmark/source/meson.build b/benchmark/source/meson.build new file mode 100644 index 0000000..f41b2b7 --- /dev/null +++ b/benchmark/source/meson.build @@ -0,0 +1,26 @@ + +includes = [include_directories('.'), + include_directories('../../test/packages/EABase/include/Common'), + include_directories('../../test/source')] + +sources = ['BenchmarkAlgorithm.cpp', 'BenchmarkBitset.cpp', 'BenchmarkDeque.cpp', + 'BenchmarkHash.cpp','BenchmarkHeap.cpp','BenchmarkList.cpp', + 'BenchmarkMap.cpp','BenchmarkSet.cpp','BenchmarkSort.cpp', + 'BenchmarkString.cpp','BenchmarkTupleVector.cpp','BenchmarkVector.cpp', + 'EASTLBenchmark.cpp', '../../test/source/EASTLTest.cpp', '../../test/source/EASTLTestAllocator.cpp', + 'main.cpp'] + +cpp_args +=['-D_CRT_SECURE_NO_WARNINGS', + '-D_SCL_SECURE_NO_WARNINGS', + '-DEASTL_OPENSOURCE=1', + '-DEASTL_THREAD_SUPPORT_AVAILABLE=0'] + + + + +EASTLBenchmarks = executable('EASTLBenchmarks', + include_directories : includes, + sources : sources, + cpp_args : cpp_args + ['-std:c++17'], + dependencies : [eastl_dep, _internal_static_EAAssert_dep, _internal_EAStdC_dep, _internal_EAThread_dep, _internal_EAMain_dep, _internal_EATest_dep], + install : true) \ No newline at end of file diff --git a/doc/Benchmarks.md b/doc/Benchmarks.md new file mode 100644 index 0000000..c41cdb6 --- /dev/null +++ b/doc/Benchmarks.md @@ -0,0 +1,851 @@ +# EASTL Benchmarks + +## Introduction + +This document provides a number of benchmark results of EASTL. Where possible, these benchmarks are implemented as comparisons with equivalent functionality found in other libraries such as compiler STL libraries or other well-known libraries. These comparison benchmarks concentrate on highlighting the differences between implementations rather than the similarities. In many mundane cases -- such as accessing a vector element via operator [] -- virtually all vector/array implementations you are likely to run into will have identical performance. + +It's also important to note that the platform you run on can make a significant difference in the results. On a modern 3+GHz Windows PC many operations are fast due to large memory caches, intelligent branch prediction, and parallel instruction execution. However, on embedded or console systems none of these may be the case. + +While EASTL generally outperforms std STL, there are some benchmarks here in which EASTL is slower than std STL. There are three primary explanations of this: + +1. EASTL is making some kind of speed, memory, or design tradeoff that results in the given speed difference. In may such cases, EASTL goes slower on one benchmark in order to go faster on another benchmark deemed more important. This explanation constitutes about 60% of the cases. +2. Compiler optimizations and resulting code generation is coincidencally favoring one kind of implementation over another, often when they are visually virtually identical. This explantation constitutes about 30% of the cases. +3. EASTL is simply not yet as optimized as it could be. This explanation constitutes about 10% of the cases (as of this writing there are about three such functions throughout EASTL). + +## Benchmarks + +Below is a table of links to detailed benchmark results derived from the Benchmark test present in the EASTL package. The detailed results are present below the table. Additional platforms will be added as results become available for those platforms. Debug benchmarks are present because (lack of) debug performance can be significant for highly templated libraries. EASTL has specific optimizations to enhance debug performance relative to other standard libraries; in some cases it is 10x or more faster than alternatives (though there are exceptions where EASTL is slower). Feel free to submit results for additional compilers/platforms. + +| Platform | Compiler | STL type | Build | Results | +|------|------|------|------|------| +| Win32 | VC++ 7.1 | Microsoft (Dinkumware) | Debug | [Detail]() | +| Win32 | VC++ 7.1 | Microsoft (Dinkumware) | Release | [Detail]() | +| Win32 | VC++ 7.1 | STLPort | Debug | [Detail]() | +| Win32 | VC++ 7.1 | STLPort | Release | [Detail]() | + +### Win32.VC71.MS.Debug + +``` +EASTL version: 0.96.00 +Platform: Windows on X86 +Compiler: Microsoft Visual C++ compiler, version 1310 +Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled. +Build: Debug. Inlining disabled. STL debug features disabled. + +Values are times to complete tests; smaller values are better. +Alarm indicates a greater than 10% difference. + +Test VC++ EASTL Ratio Alarm +---------------------------------------------------------------------------------------- +algorithm/adj_find/vector 33061345 6497757 5.09 * +algorithm/copy/vector 5844906 4876076 1.20 * +algorithm/copy/vector 1634346 166065 9.84 * +algorithm/copy_backward/vector 4515974 4638892 0.97 +algorithm/copy_backward/vector 1821168 121746 14.96 * +algorithm/count/vector 17048884 2720766 6.27 * +algorithm/equal_range/vector 1111147812 448756888 2.48 * +algorithm/fill/bool[] 1728722 91936 18.80 * +algorithm/fill/char[]/'d' 1299200 33745 38.50 * +algorithm/fill/vector/'d' 10205092 33796 100.00 * +algorithm/fill/vector/0 10200748 33805 100.00 * +algorithm/fill/vector 10416538 1399687 7.44 * +algorithm/fill/vector 10221837 1307700 7.82 * +algorithm/fill_n/bool[] 1399033 34196 40.91 * +algorithm/fill_n/char[] 1299225 33754 38.49 * +algorithm/fill_n/vector 5961637 1371900 4.35 * +algorithm/find_end/string/end 16569373 2657372 6.24 * +algorithm/find_end/string/middle 16558638 20242410 0.82 * +algorithm/find_end/string/none 16811207 40480468 0.42 * +algorithm/lex_cmp/schar[] 1749674 194429 9.00 * +algorithm/lex_cmp/vector 32824195 5253587 6.25 * +algorithm/lex_cmp/vector 29852034 202658 100.00 * +algorithm/lower_bound/vector 798624462 350027935 2.28 * +algorithm/min_element/vector 21675298 5314676 4.08 * +algorithm/rand_shuffle/vector 84236190 43677506 1.93 * +algorithm/reverse/list 3007292 2105799 1.43 * +algorithm/reverse/vector 2974618 2124796 1.40 * +algorithm/search/string 16228158 3594268 4.52 * +algorithm/search_n/string 16926985 1522096 11.12 * +algorithm/unique/vector 54206243 9988002 5.43 * +algorithm/unique/vector 26940079 1741991 15.47 * +algorithm/unique/vector 47621344 5213127 9.13 * +algorithm/upper_bound/vector 372381295 137901552 2.70 * + +bitset<1500>/>>=/1 90196544 92539832 0.97 +bitset<1500>/count 50753832 53742117 0.94 +bitset<1500>/flip 86935875 85121117 1.02 +bitset<1500>/reset 78153837 79922611 0.98 +bitset<1500>/set() 79214968 79360658 1.00 +bitset<1500>/set(i) 11300589 12199651 0.93 +bitset<1500>/test 11282679 13186450 0.86 * + +bitset<15>/>>=/1 10500577 6000559 1.75 * +bitset<15>/count 4000356 6399753 0.63 * +bitset<15>/flip 7268877 5647944 1.29 * +bitset<15>/reset 8564235 5800163 1.48 * +bitset<15>/set() 9935523 5914012 1.68 * +bitset<15>/set(i) 11199703 12503637 0.90 * +bitset<15>/test 10600623 12899592 0.82 * + +bitset<35>/>>=/1 13076052 6599834 1.98 * +bitset<35>/count 4800384 11500330 0.42 * +bitset<35>/flip 7915439 5816313 1.36 * +bitset<35>/reset 9400049 5803180 1.62 * +bitset<35>/set() 10701152 5840316 1.83 * +bitset<35>/set(i) 11342936 12271128 0.92 +bitset<35>/test 10670799 13099682 0.81 * + +bitset<75>/>>=/1 14198834 17151088 0.83 * +bitset<75>/count 5795530 8576373 0.68 * +bitset<75>/flip 8516703 8922995 0.95 +bitset<75>/reset 9999970 8526095 1.17 * +bitset<75>/set() 11124877 9009686 1.23 * +bitset<75>/set(i) 11300563 12531618 0.90 * +bitset<75>/test 11031913 13100523 0.84 * + +deque/erase 743801706 335646802 2.22 * +deque/insert 742331809 341912866 2.17 * +deque/iteration 29097030 16315827 1.78 * +deque/operator[] 49859598 24026313 2.08 * +deque/push_back 424807033 34497608 12.31 * +deque/push_front 402313373 38006322 10.59 * +deque/sort 725101017 581796551 1.25 * + +hash_map/clear 559462 961019 0.58 * +hash_map/count 53377807 8091448 6.60 * +hash_map/erase pos 613573 858084 0.72 * +hash_map/erase range 5488748 461134 11.90 * +hash_map/erase val 35760096 16379858 2.18 * +hash_map/find 43490335 10324823 4.21 * +hash_map/find_as/char* 49343818 8617139 5.73 * +hash_map/insert 107420281 168690439 0.64 * +hash_map/iteration 2456356 1255153 1.96 * +hash_map/operator[] 47209502 12581624 3.75 * + +hash_map/clear 533172 546449 0.98 +hash_map/count 28667432 2899997 9.89 * +hash_map/erase pos 683239 538289 1.27 * +hash_map/erase range 9632676 253037 38.07 * +hash_map/erase val 25466026 7752188 3.29 * +hash_map/find 20048253 4678502 4.29 * +hash_map/insert 71085798 37686187 1.89 * +hash_map/iteration 1460318 1338317 1.09 +hash_map/operator[] 23226692 7888748 2.94 * + +heap (uint32_t[])/make_heap 5399966 6961305 0.78 * +heap (uint32_t[])/pop_heap 108060534 103511318 1.04 +heap (uint32_t[])/push_heap 22595661 16640688 1.36 * +heap (uint32_t[])/sort_heap 93559424 83076731 1.13 * + +heap (vector)/make_heap 91770743 21724870 4.22 * +heap (vector)/pop_heap 1175599317 284007398 4.14 * +heap (vector)/push_heap 207804541 45918046 4.53 * +heap (vector)/sort_heap 970394145 208321477 4.66 * + +list/ctor(it) 805539509 760938607 1.06 +list/ctor(n) 80959236 75106995 1.08 +list/erase 1052543704 1044976137 1.01 +list/find 97785267 75970884 1.29 * +list/insert 873895175 807051107 1.08 +list/push_back 812797710 780742425 1.04 +list/remove 1850600714 1436980599 1.29 * +list/reverse 180270465 80466636 2.24 * +list/size/1 440148 599642 0.73 * +list/size/10 439433 1329817 0.33 * EASTL intentionally implements list::size as O(n). +list/size/100 439595 11030060 0.04 * EASTL intentionally implements list::size as O(n). +list/splice 177106094 69383027 2.55 * + +map/clear 508283 470807 1.08 +map/count 43145354 14280357 3.02 * +map/equal_range 38594004 16520447 2.34 * +map/erase/key 33948082 16123175 2.11 * +map/erase/pos 578332 455201 1.27 * MS uses a code bloating implementation of erase. +map/erase/range 387345 284538 1.36 * +map/find 22897224 12766100 1.79 * +map/insert 61665800 47286928 1.30 * +map/iteration 1977202 745391 2.65 * +map/lower_bound 19892941 12260928 1.62 * +map/operator[] 24199084 15429634 1.57 * +map/upper_bound 19842409 12064441 1.64 * + +set/clear 1027625 1000901 1.03 +set/count 39730182 13329565 2.98 * +set/equal_range 34681649 14768827 2.35 * +set/erase range 841458 602030 1.40 * +set/erase/pos 1380485 1084303 1.27 * MS uses a code bloating implementation of erase. +set/erase/val 31617425 13344023 2.37 * +set/find 19582428 10788864 1.82 * +set/insert 61434014 48232086 1.27 * +set/iteration 1512057 667820 2.26 * +set/lower_bound 18394885 10402785 1.77 * +set/upper_bound 17189083 10554425 1.63 * + +sort/q_sort/TestObject[] 87088799 15037988 5.79 * +sort/q_sort/TestObject[]/sorted 21502892 3284299 6.55 * +sort/q_sort/vector 87962047 15004677 5.86 * +sort/q_sort/vector/sorted 21396523 3341163 6.40 * +sort/q_sort/vector 80334589 10429161 7.70 * +sort/q_sort/vector/sorted 22133295 3230553 6.85 * +sort/q_sort/vector 72195388 5940302 12.15 * +sort/q_sort/vector/sorted 19635171 995495 19.72 * + +string/compare 523013373 534722089 0.98 +string/erase/pos,n 3446597 3439492 1.00 +string/find/p,pos,n 383873158 441902786 0.87 * +string/find_first_not_of/p,pos,n 174157 134131 1.30 * +string/find_first_of/p,pos,n 11715423 8520944 1.37 * +string/find_last_of/p,pos,n 1871556 1226457 1.53 * +string/insert/pos,p 3624877 3357058 1.08 +string/iteration 6766787933 581916665 11.63 * +string/operator[] 4820827 2335579 2.06 * +string/push_back 59812962 6757466 8.85 * +string/replace/pos,n,p,n 4371279 4459713 0.98 +string/reserve 2307530 1919386 1.20 * +string/rfind/p,pos,n 734826 372615 1.97 * +string/size 41608 28866 1.44 * +string/swap 1033932 1490994 0.69 * + +string/compare 63086797 64194771 0.98 +string/erase/pos,n 2045687 1960270 1.04 +string/find/p,pos,n 123872549 471364764 0.26 * +string/find_first_not_of/p,pos,n 140013 130271 1.07 +string/find_first_of/p,pos,n 8051906 8749994 0.92 +string/find_last_of/p,pos,n 1318835 1230715 1.07 +string/insert/pos,p 1770610 1724234 1.03 +string/iteration 28112136 2544475 11.05 * +string/operator[] 4810525 2255841 2.13 * +string/push_back 54869634 6127447 8.95 * +string/replace/pos,n,p,n 2737578 2847900 0.96 +string/reserve 1123395 394902 2.84 * +string/rfind/p,pos,n 737299 368518 2.00 * +string/size 42245 26801 1.58 * +string/swap 1036142 1491028 0.69 * + +vector/erase 56417135 55770251 1.01 +vector/insert 56617761 56100468 1.01 +vector/iteration 10413895 1291269 8.06 * +vector/operator[] 23507193 3479390 6.76 * +vector/push_back 34687939 13806627 2.51 * +vector/sort 256886550 84669657 3.03 * +``` + +### Win32.VC71.MS.Release + +``` +EASTL version: 0.96.00 +Platform: Windows on X86 +Compiler: Microsoft Visual C++ compiler, version 1310 +Allocator: PPMalloc::GeneralAllocator. Thread safety enabled. +Build: Full optimization. Inlining enabled. + +Values are times to complete tests; smaller values are better. +Alarm indicates a greater than 10% difference. + +Test VC++ EASTL Ratio Alarm +---------------------------------------------------------------------------------------- +algorithm/adj_find/vector 2783546 2750660 1.01 +algorithm/copy/vector 6474025 4972738 1.30 * +algorithm/copy/vector 157267 173162 0.91 +algorithm/copy_backward/vector 4836406 4374780 1.11 * +algorithm/copy_backward/vector 104780 120912 0.87 * +algorithm/count/vector 1368440 1368696 1.00 +algorithm/equal_range/vector 114199387 102783938 1.11 * +algorithm/fill/bool[] 253215 27353 9.26 * +algorithm/fill/char[]/'d' 253164 27404 9.24 * +algorithm/fill/vector/'d' 253105 27362 9.25 * +algorithm/fill/vector/0 253275 27353 9.26 * +algorithm/fill/vector 397001 394323 1.01 +algorithm/fill/vector 547196 642362 0.85 * +algorithm/fill_n/bool[] 229177 27361 8.38 * +algorithm/fill_n/char[] 228845 27404 8.35 * +algorithm/fill_n/vector 565233 1376822 0.41 * +algorithm/find_end/string/end 2107116 82356 25.59 * +algorithm/find_end/string/middle 2111672 664283 3.18 * +algorithm/find_end/string/none 2110423 1519596 1.39 * +algorithm/lex_cmp/schar[] 741021 176162 4.21 * +algorithm/lex_cmp/vector 2610494 2642183 0.99 +algorithm/lex_cmp/vector 697595 167866 4.16 * +algorithm/lower_bound/vector 62462233 58146664 1.07 +algorithm/min_element/vector 4350385 2671227 1.63 * +algorithm/rand_shuffle/vector 10868261 11300818 0.96 +algorithm/reverse/list 483718 470024 1.03 +algorithm/reverse/vector 476739 484322 0.98 +algorithm/search/string 2560387 1259496 2.03 * +algorithm/search_n/string 2770991 458524 6.04 * +algorithm/unique/vector 4194520 4658910 0.90 * +algorithm/unique/vector 538730 787924 0.68 * +algorithm/unique/vector 3169829 2575636 1.23 * +algorithm/upper_bound/vector 27495562 25321593 1.09 + +bitset<1500>/>>=/1 33464228 33469719 1.00 +bitset<1500>/count 18736116 18814903 1.00 +bitset<1500>/flip 19299309 18605438 1.04 +bitset<1500>/reset 22200487 15262847 1.45 * +bitset<1500>/set() 14418193 17557319 0.82 * +bitset<1500>/set(i) 1599250 1599199 1.00 +bitset<1500>/test 1599241 1599233 1.00 + +bitset<15>/>>=/1 2199222 2264442 0.97 +bitset<15>/count 1399406 1399193 1.00 +bitset<15>/flip 1266712 1199197 1.06 +bitset<15>/reset 1399364 1399109 1.00 +bitset<15>/set() 1199197 999201 1.20 * +bitset<15>/set(i) 1599258 1462952 1.09 +bitset<15>/test 1599275 1599224 1.00 + +bitset<35>/>>=/1 2599266 1933376 1.34 * +bitset<35>/count 2599240 2592559 1.00 +bitset<35>/flip 1693124 1199188 1.41 * +bitset<35>/reset 1399406 999201 1.40 * +bitset<35>/set() 1599403 1199205 1.33 * +bitset<35>/set(i) 1599241 1599190 1.00 +bitset<35>/test 1599250 1599232 1.00 + +bitset<75>/>>=/1 4199332 4199213 1.00 +bitset<75>/count 2999497 2199341 1.36 * +bitset<75>/flip 2399499 1830178 1.31 * +bitset<75>/reset 2199468 1199197 1.83 * +bitset<75>/set() 1999387 1199851 1.67 * +bitset<75>/set(i) 1599266 1599198 1.00 +bitset<75>/test 1599241 1662651 0.96 + +deque/erase 90444165 37113253 2.44 * +deque/insert 93299349 36175167 2.58 * +deque/iteration 2756414 2122076 1.30 * +deque/operator[] 5117969 4632075 1.10 +deque/push_back 30300757 3060357 9.90 * +deque/push_front 25498529 2808392 9.08 * +deque/sort 142283047 111292464 1.28 * + +hash_map/clear 146769 389699 0.38 * +hash_map/count 13059434 3460324 3.77 * +hash_map/erase pos 184246 331925 0.56 * +hash_map/erase range 382432 167237 2.29 * +hash_map/erase val 6187898 3302114 1.87 * +hash_map/find 11289369 3459024 3.26 * +hash_map/find_as/char* 13559192 3662387 3.70 * +hash_map/insert 17514012 14095176 1.24 * +hash_map/iteration 801014 218450 3.67 * +hash_map/operator[] 11457065 3690385 3.10 * + +hash_map/clear 141865 265379 0.53 * +hash_map/count 1766045 703613 2.51 * +hash_map/erase pos 172337 218458 0.79 * +hash_map/erase range 537846 102340 5.26 * +hash_map/erase val 2220132 1441787 1.54 * +hash_map/find 1612994 1043953 1.55 * +hash_map/insert 7141547 4348056 1.64 * +hash_map/iteration 199512 169328 1.18 * +hash_map/operator[] 1831733 1519707 1.21 * + +heap (uint32_t[])/make_heap 3366247 1949093 1.73 * +heap (uint32_t[])/pop_heap 57280514 53779440 1.07 +heap (uint32_t[])/push_heap 9700217 7582935 1.28 * +heap (uint32_t[])/sort_heap 47227751 46131948 1.02 + +heap (vector)/make_heap 11458442 11510819 1.00 +heap (vector)/pop_heap 122897267 119061132 1.03 +heap (vector)/push_heap 21688481 21176220 1.02 +heap (vector)/sort_heap 90867380 88869523 1.02 + +list/ctor(it) 74591104 69845817 1.07 +list/ctor(n) 6243998 5838582 1.07 +list/erase 299509298 206013676 1.45 * +list/find 40927185 14514243 2.82 * +list/insert 71277251 47234534 1.51 * +list/push_back 73780527 44116725 1.67 * +list/remove 786197776 326434612 2.41 * +list/reverse 49283128 25029678 1.97 * +list/size/1 159741 139400 1.15 * +list/size/10 159324 346579 0.46 * EASTL intentionally implements list::size as O(n). +list/size/100 159188 97235419 0.00 * EASTL intentionally implements list::size as O(n). +list/splice 63548584 19322931 3.29 * + +map/clear 167408 170501 0.98 +map/count 10213685 4748346 2.15 * +map/equal_range 9515053 5677558 1.68 * +map/erase/key 6646260 4302300 1.54 * +map/erase/pos 297135 327938 0.91 MS uses a code bloating implementation of erase. +map/erase/range 148614 163702 0.91 +map/find 5637531 4767055 1.18 * +map/insert 9591128 9030349 1.06 +map/iteration 323595 325261 0.99 +map/lower_bound 5398239 4784089 1.13 * +map/operator[] 5631250 5141166 1.10 +map/upper_bound 5436336 4762431 1.14 * + +set/clear 155983 156026 1.00 +set/count 9635965 4392146 2.19 * +set/equal_range 8504157 5247832 1.62 * +set/erase range 140488 119408 1.18 * +set/erase/pos 260678 286697 0.91 MS uses a code bloating implementation of erase. +set/erase/val 6008225 4012825 1.50 * +set/find 5145432 4381945 1.17 * +set/insert 8087129 8697251 0.93 +set/iteration 271507 304538 0.89 * +set/lower_bound 4666228 4404250 1.06 +set/upper_bound 4623600 4402974 1.05 + +sort/q_sort/TestObject[] 9596169 5578652 1.72 * +sort/q_sort/TestObject[]/sorted 602463 1016132 0.59 * +sort/q_sort/vector 9674828 5430199 1.78 * +sort/q_sort/vector/sorted 606908 1111647 0.55 * +sort/q_sort/vector 6284194 3423452 1.84 * +sort/q_sort/vector/sorted 711629 569364 1.25 * +sort/q_sort/vector 5453379 2916146 1.87 * +sort/q_sort/vector/sorted 537047 419144 1.28 * + +string/compare 435083295 251985824 1.73 * +string/erase/pos,n 3454842 3451858 1.00 +string/find/p,pos,n 401954723 165298157 2.43 * +string/find_first_not_of/p,pos,n 131452 65374 2.01 * +string/find_first_of/p,pos,n 11657444 4144515 2.81 * +string/find_last_of/p,pos,n 1604248 567571 2.83 * +string/insert/pos,p 3398734 3355460 1.01 +string/iteration 218856504 218771844 1.00 +string/operator[] 714161 240023 2.98 * +string/push_back 34968235 2444897 14.30 * +string/replace/pos,n,p,n 4226693 4198498 1.01 +string/reserve 1901765 390805 4.87 * +string/rfind/p,pos,n 195483 150985 1.29 * +string/size 11169 11245 0.99 +string/swap 1459280 419807 3.48 * + +string/compare 63071275 77209580 0.82 * +string/erase/pos,n 2008652 1944494 1.03 +string/find/p,pos,n 123201023 167536164 0.74 * +string/find_first_not_of/p,pos,n 93372 67864 1.38 * +string/find_first_of/p,pos,n 7542492 3375758 2.23 * +string/find_last_of/p,pos,n 933972 583576 1.60 * +string/insert/pos,p 1737213 1750847 0.99 +string/iteration 893834 899130 0.99 +string/operator[] 817879 313437 2.61 * +string/push_back 20857734 2004410 10.41 * +string/replace/pos,n,p,n 2578696 2607655 0.99 +string/reserve 915127 85289 10.73 * +string/rfind/p,pos,n 196103 148894 1.32 * +string/size 11619 11220 1.04 +string/swap 1461056 419874 3.48 * + +vector/erase 55235116 55284587 1.00 +vector/insert 55166046 55142755 1.00 +vector/iteration 553954 509719 1.09 +vector/operator[] 1284239 798516 1.61 * +vector/push_back 5399549 3867959 1.40 * +vector/sort 43636314 42619952 1.02 +``` + +### Win32.VC71.STLPort.Debug + +``` +EASTL version: 0.96.00 +Platform: Windows on X86 +Compiler: Microsoft Visual C++ compiler, version 1310 +Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled. +Build: Debug. Inlining disabled. STL debug features disabled. + +Values are times to complete tests; smaller values are better. +Alarm indicates a greater than 10% difference. + +Test STLPort EASTL Ratio Alarm +---------------------------------------------------------------------------------------- +algorithm/adj_find/vector 5661170 5689517 1.00 +algorithm/copy/vector 5573815 5124428 1.09 +algorithm/copy/vector 148273 125782 1.18 * +algorithm/copy_backward/vector 5429791 4834510 1.12 * +algorithm/copy_backward/vector 156765 163038 0.96 +algorithm/count/vector 2730922 2730072 1.00 +algorithm/equal_range/vector 639366489 452896251 1.41 * +algorithm/fill/bool[] 1299326 27361 47.49 * +algorithm/fill/char[]/'d' 27378 27361 1.00 +algorithm/fill/vector/'d' 34459 27361 1.26 * +algorithm/fill/vector/0 1299224 27361 47.48 * +algorithm/fill/vector 1400647 1400145 1.00 +algorithm/fill/vector 1308779 1309085 1.00 +algorithm/fill_n/bool[] 1299156 27352 47.50 * +algorithm/fill_n/char[] 1299258 27369 47.47 * +algorithm/fill_n/vector 1451162 1313632 1.10 +algorithm/find_end/string/end 13089999 2526412 5.18 * +algorithm/find_end/string/middle 12627412 20190101 0.63 * +algorithm/find_end/string/none 12704185 40728803 0.31 * +algorithm/lex_cmp/schar[] 1749844 195806 8.94 * +algorithm/lex_cmp/vector 5060968 4799882 1.05 +algorithm/lex_cmp/vector 1668354 189490 8.80 * +algorithm/lower_bound/vector 450240945 353437573 1.27 * +algorithm/min_element/vector 5861744 5326371 1.10 +algorithm/rand_shuffle/vector 40780449 45780090 0.89 * +algorithm/reverse/list 2657678 2130627 1.25 * +algorithm/reverse/vector 2666424 2124889 1.25 * +algorithm/search/string 3110379 3613460 0.86 * +algorithm/search_n/string 3061665 1521261 2.01 * +algorithm/unique/vector 12423684 9485439 1.31 * +algorithm/unique/vector 3718699 1726596 2.15 * +algorithm/unique/vector 6205110 4591631 1.35 * +algorithm/upper_bound/vector 185391094 139336317 1.33 * + +bitset<1500>/>>=/1 120666960 92449816 1.31 * STLPort is broken, neglects wraparound check. +bitset<1500>/count 201709793 52874726 3.81 * +bitset<1500>/flip 87360297 81737071 1.07 +bitset<1500>/reset 23950178 77390323 0.31 * +bitset<1500>/set() 84608107 76912011 1.10 +bitset<1500>/set(i) 18023620 12229604 1.47 * +bitset<1500>/test 18006553 13276396 1.36 * + +bitset<15>/>>=/1 11935904 6012695 1.99 * STLPort is broken, neglects wraparound check. +bitset<15>/count 9368581 6022742 1.56 * +bitset<15>/flip 11600706 6533635 1.78 * +bitset<15>/reset 5830957 5874690 0.99 +bitset<15>/set() 11695328 5701621 2.05 * +bitset<15>/set(i) 16363205 12570216 1.30 * +bitset<15>/test 16743172 13201452 1.27 * + +bitset<35>/>>=/1 22950918 6774457 3.39 * STLPort is broken, neglects wraparound check. +bitset<35>/count 12655309 11736256 1.08 +bitset<35>/flip 13738575 5800042 2.37 * +bitset<35>/reset 15561434 5800510 2.68 * +bitset<35>/set() 13564283 5600709 2.42 * +bitset<35>/set(i) 18519689 12199973 1.52 * +bitset<35>/test 18000569 13103566 1.37 * + +bitset<75>/>>=/1 25579525 16669664 1.53 * STLPort is broken, neglects wraparound check. +bitset<75>/count 18740698 8480492 2.21 * +bitset<75>/flip 13555630 8300335 1.63 * +bitset<75>/reset 15200133 8200000 1.85 * +bitset<75>/set() 14408112 8001959 1.80 * +bitset<75>/set(i) 18137741 12374257 1.47 * +bitset<75>/test 18422135 13100038 1.41 * + +deque/erase 651933790 326443043 2.00 * +deque/insert 659786183 333304660 1.98 * +deque/iteration 23734592 16173706 1.47 * +deque/operator[] 59126816 23911774 2.47 * +deque/push_back 58056988 31859266 1.82 * +deque/push_front 57780891 31743199 1.82 * +deque/sort 818414195 596568113 1.37 * + +hash_map/clear 3422133 2204517 1.55 * +hash_map/count 9869545 8624924 1.14 * +hash_map/erase pos 3256350 2069299 1.57 * +hash_map/erase range 3230203 1151392 2.81 * +hash_map/erase val 16860362 15939778 1.06 +hash_map/find 10286971 9920910 1.04 +hash_map/find_as/char* 118136025 9458468 12.49 * +hash_map/insert 188948336 174490082 1.08 +hash_map/iteration 4037049 2021036 2.00 * +hash_map/operator[] 11472127 12887699 0.89 * + +hash_map/clear 2522264 1331848 1.89 * +hash_map/count 3210739 2897063 1.11 * +hash_map/erase pos 1862281 1304783 1.43 * +hash_map/erase range 698079 579606 1.20 * +hash_map/erase val 8806722 7041298 1.25 * +hash_map/find 3604875 4709645 0.77 * +hash_map/insert 40785711 40376342 1.01 +hash_map/iteration 3064088 1508834 2.03 * +hash_map/operator[] 6053742 8176906 0.74 * + +heap (uint32_t[])/make_heap 5799813 5738596 1.01 +heap (uint32_t[])/pop_heap 113775168 102076134 1.11 * +heap (uint32_t[])/push_heap 21649151 16854845 1.28 * +heap (uint32_t[])/sort_heap 97535213 83290735 1.17 * + +heap (vector)/make_heap 22215557 22277063 1.00 +heap (vector)/pop_heap 275392171 277340039 0.99 +heap (vector)/push_heap 51479442 47342577 1.09 +heap (vector)/sort_heap 214474736 218497540 0.98 + +list/ctor(it) 767753795 753421427 1.02 +list/ctor(n) 74185322 73386245 1.01 +list/erase 1021003824 1033873589 0.99 +list/find 77666072 74917622 1.04 +list/insert 788071150 774188737 1.02 +list/push_back 760490154 737327348 1.03 +list/remove 1682511938 1434771006 1.17 * +list/reverse 87237327 80394623 1.09 +list/size/1 3828111 599530 6.39 * +list/size/10 9600605 1329535 7.22 * EASTL intentionally implements list::size as O(n). +list/size/100 62952334 15022551 4.19 * EASTL intentionally implements list::size as O(n). +list/splice 96536412 60804817 1.59 * + +map/clear 1142127 1099066 1.04 +map/count 19659726 14647548 1.34 * +map/equal_range 36680687 18219086 2.01 * +map/erase/key 28892154 16037774 1.80 * +map/erase/pos 1209643 1185495 1.02 +map/erase/range 715402 670539 1.07 +map/find 21020992 13429575 1.57 * +map/insert 59530871 51120640 1.16 * +map/iteration 972825 1191946 0.82 * +map/lower_bound 18852651 12495034 1.51 * +map/operator[] 22889573 16676736 1.37 * +map/upper_bound 18603584 12406922 1.50 * + +set/clear 919555 882988 1.04 +set/count 17561110 12461084 1.41 * +set/equal_range 31522488 15230282 2.07 * +set/erase range 687582 564765 1.22 * +set/erase/pos 1044352 1045355 1.00 +set/erase/val 25525304 12940774 1.97 * +set/find 17140751 10704866 1.60 * +set/insert 56035051 45555664 1.23 * +set/iteration 682669 640831 1.07 +set/lower_bound 16339932 10475740 1.56 * +set/upper_bound 17779424 10652599 1.67 * + +sort/q_sort/TestObject[] 17000866 14823515 1.15 * +sort/q_sort/TestObject[]/sorted 6658559 3263328 2.04 * +sort/q_sort/vector 17476629 14953285 1.17 * +sort/q_sort/vector/sorted 6667034 3327435 2.00 * +sort/q_sort/vector 15391357 10820848 1.42 * +sort/q_sort/vector/sorted 6617122 3232949 2.05 * +sort/q_sort/vector 8343906 6014846 1.39 * +sort/q_sort/vector/sorted 3039430 1003127 3.03 * + +string/compare 1489709846 532664000 2.80 * +string/erase/pos,n 3528690 3439864 1.03 +string/find/p,pos,n 2521448321 443752189 5.68 * +string/find_first_not_of/p,pos,n 661206 137419 4.81 * +string/find_first_of/p,pos,n 54746434 8521335 6.42 * +string/find_last_of/p,pos,n 10607778 1212414 8.75 * +string/insert/pos,p 3445016 3360126 1.03 +string/iteration 580955636 579452556 1.00 +string/operator[] 2206353 1987809 1.11 * +string/push_back 22421368 6007808 3.73 * +string/replace/pos,n,p,n 5138454 4464786 1.15 * +string/reserve 4922413418 335622 100.00 * +string/rfind/p,pos,n 1440308 380578 3.78 * +string/size 25355 25398 1.00 +string/swap 2122704 1490823 1.42 * + +string/compare 77222134 77443134 1.00 +string/erase/pos,n 1965344 1956521 1.00 +string/find/p,pos,n 2468091951 474205522 5.20 * +string/find_first_not_of/p,pos,n 660960 130211 5.08 * +string/find_first_of/p,pos,n 55020899 9240171 5.95 * +string/find_last_of/p,pos,n 10576210 1239053 8.54 * +string/insert/pos,p 1822756 1750880 1.04 +string/iteration 2617889 2540148 1.03 +string/operator[] 2254794 2256443 1.00 +string/push_back 12463022 5210321 2.39 * +string/replace/pos,n,p,n 3744862 2855260 1.31 * +string/reserve 1372046888 218815 100.00 * +string/rfind/p,pos,n 1446232 366902 3.94 * +string/size 26859 25431 1.06 +string/swap 2123350 1490509 1.42 * + +vector/erase 55164013 56417449 0.98 +vector/insert 55872973 56432664 0.99 +vector/iteration 1329102 1324623 1.00 +vector/operator[] 5264738 3136746 1.68 * +vector/push_back 14903245 13171175 1.13 * +vector/sort 88429095 88542171 1.00 +``` + +### Win32.VC71.STLPort.Release + +``` +EASTL version: 0.96.00 +Platform: Windows on X86 +Compiler: Microsoft Visual C++ compiler, version 1310 +Allocator: PPMalloc::GeneralAllocator. Thread safety enabled. +Build: Full optimization. Inlining enabled. + +Values are times to complete tests; smaller values are better. +Alarm indicates a greater than 10% difference. + +Test STLPort EASTL Ratio Alarm +---------------------------------------------------------------------------------------- +algorithm/adj_find/vector 2741046 2731441 1.00 +algorithm/copy/vector 6065923 5085142 1.19 * +algorithm/copy/vector 158304 165555 0.96 +algorithm/copy_backward/vector 4710258 4896476 0.96 +algorithm/copy_backward/vector 146030 142630 1.02 +algorithm/count/vector 1395921 1406334 0.99 +algorithm/equal_range/vector 211692764 118969493 1.78 * +algorithm/fill/bool[] 366078 33737 10.85 * +algorithm/fill/char[]/'d' 33736 33771 1.00 +algorithm/fill/vector/'d' 28466 33720 0.84 * +algorithm/fill/vector/0 366086 33728 10.85 * +algorithm/fill/vector 466250 401591 1.16 * +algorithm/fill/vector 521603 693481 0.75 * +algorithm/fill_n/bool[] 599709 33762 17.76 * +algorithm/fill_n/char[] 599573 33711 17.79 * +algorithm/fill_n/vector 434971 1374084 0.32 * +algorithm/find_end/string/end 1494742 85349 17.51 * +algorithm/find_end/string/middle 1480700 687208 2.15 * +algorithm/find_end/string/none 1540540 1546431 1.00 +algorithm/lex_cmp/schar[] 921638 178797 5.15 * +algorithm/lex_cmp/vector 2623559 2643551 0.99 +algorithm/lex_cmp/vector 960899 183608 5.23 * +algorithm/lower_bound/vector 60630534 56531528 1.07 +algorithm/min_element/vector 4209022 2768527 1.52 * +algorithm/rand_shuffle/vector 13762010 15969052 0.86 * +algorithm/reverse/list 673387 731825 0.92 +algorithm/reverse/vector 634576 754511 0.84 * +algorithm/search/string 1262599 1387608 0.91 +algorithm/search_n/string 1166242 458592 2.54 * +algorithm/unique/vector 4912193 5336317 0.92 +algorithm/unique/vector 809387 809081 1.00 +algorithm/unique/vector 4371814 2414255 1.81 * +algorithm/upper_bound/vector 31899081 29555596 1.08 + +bitset<1500>/>>=/1 63308136 40553560 1.56 * STLPort is broken, neglects wraparound check. +bitset<1500>/count 62523178 22799473 2.74 * +bitset<1500>/flip 20302845 19919232 1.02 +bitset<1500>/reset 18892015 15403148 1.23 * +bitset<1500>/set() 15803302 17322192 0.91 +bitset<1500>/set(i) 2799271 2999310 0.93 +bitset<1500>/test 2999293 2799262 1.07 + +bitset<15>/>>=/1 1199239 3199256 0.37 * STLPort is broken, neglects wraparound check. +bitset<15>/count 3599461 2199231 1.64 * +bitset<15>/flip 1199231 1199188 1.00 +bitset<15>/reset 1199188 1199180 1.00 +bitset<15>/set() 1199214 1199180 1.00 +bitset<15>/set(i) 2599257 1399262 1.86 * +bitset<15>/test 2599274 2599283 1.00 + +bitset<35>/>>=/1 6643974 4599239 1.44 * STLPort is broken, neglects wraparound check. +bitset<35>/count 5151331 5399438 0.95 +bitset<35>/flip 1999404 1199273 1.67 * +bitset<35>/reset 9805285 1399313 7.01 * +bitset<35>/set() 2799279 1199248 2.33 * +bitset<35>/set(i) 2799246 1599241 1.75 * +bitset<35>/test 2999234 2999251 1.00 + +bitset<75>/>>=/1 7002045 6999333 1.00 STLPort is broken, neglects wraparound check. +bitset<75>/count 5999351 3002259 2.00 * +bitset<75>/flip 3599334 3599163 1.00 +bitset<75>/reset 9799344 3399218 2.88 * +bitset<75>/set() 3599232 3599062 1.00 +bitset<75>/set(i) 2799228 1599284 1.75 * +bitset<75>/test 2999250 2799339 1.07 + +deque/erase 127108651 115258113 1.10 +deque/insert 137727889 116552332 1.18 * +deque/iteration 7144182 6009899 1.19 * +deque/operator[] 34241222 20535039 1.67 * +deque/push_back 6585800 3932126 1.67 * +deque/push_front 6805865 3993513 1.70 * +deque/sort 395352323 348778188 1.13 * + +hash_map/clear 426640 447015 0.95 +hash_map/count 4359344 3883089 1.12 * +hash_map/erase pos 584392 458142 1.28 * +hash_map/erase range 221034 196078 1.13 * +hash_map/erase val 3539867 3790813 0.93 +hash_map/find 3966831 3811910 1.04 +hash_map/find_as/char* 11591612 4243710 2.73 * +hash_map/insert 16763887 16719194 1.00 +hash_map/iteration 909968 478609 1.90 * +hash_map/operator[] 4360041 4108313 1.06 + +hash_map/clear 302634 283722 1.07 +hash_map/count 916487 907426 1.01 +hash_map/erase pos 388042 321385 1.21 * +hash_map/erase range 122680 116280 1.06 +hash_map/erase val 1710931 1729529 0.99 +hash_map/find 1089462 1346527 0.81 * +hash_map/insert 4560310 5072350 0.90 * +hash_map/iteration 960117 495354 1.94 * +hash_map/operator[] 1872830 1890595 0.99 + +heap (uint32_t[])/make_heap 3528418 3327257 1.06 +heap (uint32_t[])/pop_heap 63243859 61011853 1.04 +heap (uint32_t[])/push_heap 11602424 10045869 1.15 * +heap (uint32_t[])/sort_heap 52965362 48744729 1.09 + +heap (vector)/make_heap 13191456 13089711 1.01 +heap (vector)/pop_heap 148555656 144787742 1.03 +heap (vector)/push_heap 28696689 26618830 1.08 +heap (vector)/sort_heap 112473989 114018643 0.99 + +list/ctor(it) 80186731 74006287 1.08 +list/ctor(n) 6232311 6128007 1.02 +list/erase 344556374 212877808 1.62 * +list/find 39859075 14591347 2.73 * +list/insert 86935153 56138233 1.55 * +list/push_back 79569180 46700641 1.70 * +list/remove 785786758 324201016 2.42 * +list/reverse 45248186 24852759 1.82 * +list/size/1 219844 219496 1.00 +list/size/10 519563 519579 1.00 EASTL intentionally implements list::size as O(n). +list/size/100 4567194 101230266 0.05 * EASTL intentionally implements list::size as O(n). +list/splice 68321087 23601687 2.89 * + +map/clear 168011 180540 0.93 +map/count 4830439 5139287 0.94 +map/equal_range 8700090 6158531 1.41 * +map/erase/key 6696776 4617038 1.45 * +map/erase/pos 309273 333183 0.93 +map/erase/range 137419 136068 1.01 +map/find 4773498 4931352 0.97 +map/insert 9651877 9311699 1.04 +map/iteration 372946 416364 0.90 * +map/lower_bound 4784234 4915797 0.97 +map/operator[] 5040254 5183147 0.97 +map/upper_bound 4724292 4915984 0.96 + +set/clear 165300 173289 0.95 +set/count 4958654 4885086 1.02 +set/equal_range 8434134 5698681 1.48 * +set/erase range 145554 133960 1.09 +set/erase/pos 299914 324760 0.92 +set/erase/val 6506155 4335034 1.50 * +set/find 4866879 4556043 1.07 +set/insert 8340523 8957257 0.93 +set/iteration 294465 343442 0.86 * +set/lower_bound 4548095 4756498 0.96 +set/upper_bound 4559196 4521498 1.01 + +sort/q_sort/TestObject[] 7316766 7013894 1.04 +sort/q_sort/TestObject[]/sorted 1668439 1332885 1.25 * +sort/q_sort/vector 7331530 7017260 1.04 +sort/q_sort/vector/sorted 1601629 1247120 1.28 * +sort/q_sort/vector 7071643 7067869 1.00 +sort/q_sort/vector/sorted 2136390 1703799 1.25 * +sort/q_sort/vector 3292891 2943627 1.12 * +sort/q_sort/vector/sorted 653693 473612 1.38 * + +string/compare 356579259 432760228 0.82 * +string/erase/pos,n 3430422 3428645 1.00 +string/find/p,pos,n 229263402 225830975 1.02 +string/find_first_not_of/p,pos,n 187391 81404 2.30 * +string/find_first_of/p,pos,n 4411831 4413532 1.00 +string/find_last_of/p,pos,n 731655 726155 1.01 +string/insert/pos,p 3408628 3319726 1.03 +string/iteration 309993861 310333547 1.00 +string/operator[] 580839 579904 1.00 +string/push_back 3983338 2975553 1.34 * +string/replace/pos,n,p,n 4361095 4211504 1.04 +string/reserve 935141729 247010 100.00 * +string/rfind/p,pos,n 248956 223397 1.11 * +string/size 13311 13107 1.02 +string/swap 519129 579445 0.90 * + +string/compare 76695559 76828015 1.00 +string/erase/pos,n 1951566 1947282 1.00 +string/find/p,pos,n 185878944 185605039 1.00 +string/find_first_not_of/p,pos,n 196877 81600 2.41 * +string/find_first_of/p,pos,n 4147685 4145356 1.00 +string/find_last_of/p,pos,n 605897 598222 1.01 +string/insert/pos,p 1781592 1768264 1.01 +string/iteration 921502 921272 1.00 +string/operator[] 361250 359873 1.00 +string/push_back 3363288 2530493 1.33 * +string/replace/pos,n,p,n 2682600 2633130 1.02 +string/reserve 672517501 78387 100.00 * +string/rfind/p,pos,n 226202 200013 1.13 * +string/size 11280 11109 1.02 +string/swap 519393 559759 0.93 + +vector/erase 55184856 55192217 1.00 +vector/insert 56764267 55682726 1.02 +vector/iteration 423122 424039 1.00 +vector/operator[] 1189397 860991 1.38 * +vector/push_back 5626609 4027317 1.40 * +vector/sort 49227036 49231362 1.00 +``` + +---------------------------------------------- +End of document diff --git a/doc/BestPractices.md b/doc/BestPractices.md new file mode 100644 index 0000000..6a0a571 --- /dev/null +++ b/doc/BestPractices.md @@ -0,0 +1,749 @@ +# EASTL Best Practices + +In this document we discuss best practices for using EASTL. The primary emphasis is on performance with a secondary emphasis on correctness and maintainability. Some best practices apply only to some situations, and these will be pointed out as we go along. In order to be easily digestible, we present these practices as a list of items in the tone of the Effective C++ series of books. + +## Summary + +The descriptions here are intentionally terse; this is to make them easier to visually scan. + +1. [Consider intrusive containers.](#consider-intrusive-containers) +2. [Consider fixed-size containers.](#consider-fixed-size-containers) +3. [Consider custom allocators.](#consider-custom-allocators) +4. [Consider hash tables instead of maps.](#consider-hash-tables-instead-of-maps) +5. [Consider a vector_map (a.k.a. sorted vector) for unchanging data.](#consider-a-vector_map-aka-sorted-vector-for-unchanging-data) +6. [Consider slist instead of list.](#consider-slist-instead-of-list) +7. [Avoid redundant end() and size() in loops.](#avoid-redundant-end-and-size-in-loops) +8. [Iterate containers instead of using operator\[\].](#iterate-containers-instead-of-using-operator) +9. [Learn to use the string class appropriately.](#learn-to-use-the-string-class-appropriately) +10. [Cache list size if you want size() to be O(1).](#cache-list-size-if-you-want-listsize-to-be-o1) +11. [Use empty() instead of size() when possible.](#use-empty-instead-of-size-when-possible) +12. [Know your container efficiencies.](#know-your-container-efficiencies) +13. [Use vector::reserve.](#use-vectorreserve) +14. [Use vector::set_capacity to trim memory usage.](#use-vectorset_capacity-to-trim-memory-usage) +15. [Use swap() instead of a manually implemented version.](#use-swap-instead-of-a-manually-implemented-version) +16. [Consider storing pointers instead of objects.](#consider-storing-pointers-instead-of-objects) +17. [Consider smart pointers instead of raw pointers.](#consider-smart-pointers-instead-of-raw-pointers) +18. [Use iterator pre-increment instead of post-increment.](#use-iterator-pre-increment-instead-of-post-increment) +19. [Make temporary references so the code can be traced/debugged.](#make-temporary-references-so-the-code-can-be-traceddebugged) +20. [Consider bitvector or bitset instead of vector\.](#consider-bitvector-or-bitset-instead-of-vector) +21. [Vectors can be treated as contiguous memory.](#vectors-can-be-treated-as-contiguous-memory) +22. [Search hash_map\ via find_as() instead of find().](#search-hash_map-via-find_as-instead-of-find) +23. [Take advantage of type_traits (e.g. EASTL_DECLARE_TRIVIAL_RELOCATE).](#take-advantage-of-type_traits-eg-eastl_declare_trivial_relocate) +24. [Name containers to track memory usage.](#name-containers-to-track-memory-usage) +25. [Learn the algorithms.](#learn-the-algorithms) +26. [Pass and return containers by reference instead of value.](#pass-and-return-containers-by-reference-instead-of-value) +27. [Consider using reset() for fast container teardown.](#consider-using-reset-for-fast-container-teardown) +28. [Consider using fixed_substring instead of copying strings.](#consider-using-fixed_substring-instead-of-copying-strings) +29. [Consider using vector::push_back(void).](#consider-using-vectorpush_backvoid) + +## Detail + +### Consider intrusive containers. + +Intrusive containers (such as intrusive_list) differ from regular containers (such as list) in that they use the stored objects to manage the linked list instead of using nodes allocated from a memory heap. The result is better usage of memory. Additionally intrusive_list objects can be removed from their list without knowing what list they belong to. To make an intrusive_list of Widgets, you have Widget inherit from intrusive_list_node or simply have mpPrev/mpNext member variables. + +To create an intrusive_list container, you can use the following code: + +```cpp +class Widget : public intrusive_list_node + +{ }; + + + +intrusive_list widgetList; + +widgetList.push_back(someWidget); +``` + +### Consider fixed-size containers. + +Fixed-size containers (such as fixed_list) are variations of regular containers (such as list) in that they allocate from a fixed block of local memory instead of allocating from a generic heap. The result is better usage of memory due to reduced fragmentation, better cache behavior, and faster allocation/deallocation. The presence of fixed-size containers negate the most common complaint that people have about STL: that it fragments the heap or "allocates all over the place." + +EASTL fixed containers include: + +* fixed_list +* fixed_slist +* fixed_vector +* fixed_string +* fixed_map +* fixed_multimap +* fixed_set +* fixed_multiset +* fixed_hash_map +* fixed_hash_multimap +* fixed_hash_set +* fixed_hash_multiset + +To create a fixed_set, you can use the following code: + +```cpp +fixed_set intSet; // Create a set capable of holding 250 elements. + +intSet.push_back(37); +``` + +### Consider custom allocators. + +While EASTL provides fixed-size containers in order to control container memory usage, EASTL lets you assign a custom allocator to any container. This lets you define your own memory pool. EASTL has a more flexible and powerful mechanism of doing this that standard STL, as EASTL understands object alignment requirements, allows for debug naming, allows for sharing allocators across containers, and allows dynamic allocator assignment. + +To create a list container that uses your custom allocator and uses block naming, you can use the following code: + +```cpp +list intList(pSomeAllocator, "graphics/intList"); + +intList.push_back(37); +``` + +### Consider hash tables instead of maps. + +Hash containers (such as hash_map) provide the same interface as associative containers (such as map) but have faster lookup and use less memory. The primary disadvantage relative to associative containers is that hash containers are not sorted. + +To make a hash_map (dictionary) of integers to strings, you can use the following code: +```cpp +hash_map stringTable; + +stringTable[37] = "hello"; +``` + +### Consider a vector_map (a.k.a. sorted vector) for unchanging data. + +You can improve speed, memory usage, and cache behavior by using a vector_map instead of a map (or vector_set instead of set, etc.). The primary disadvantage of vector_map is that insertions and removal of elements is O(n) instead of O(1). However, if your associative container is not going to be changing much or at all, you can benefit from using a vector_map. Consider calling reserve on the vector_map in order to set the desired capacity up front. + +To make a vector_set, you can use the following code: + +```cpp +vector_set intSet(16); // Create a vector_set with an initial capacity of 16. + +intSet.insert(37); +``` + +Note that you can use containers other than vector to implement vector_set. Here's how you do it with deque: + +```cpp +vector_set, EASTLAllocatorType, deque > intSet; + +intSet.insert(37); +``` + +### Consider slist instead of list. + +An slist is a singly-linked list; it is much like a list except that it can only be traversed in a forward direction and not a backward direction. The benefit is that each node is 4 bytes instead of 8 bytes. This is a small improvement, but if you don't need reverse iteration then it can be an improvement. There's also intrusive_slist as an option. + +To make an slist, you can use the following code: + +```cpp +slist intSlist; + +intSlist.push_front(37); +``` + +### Avoid redundant end() and size() in loops. + +Instead of writing code like this: + +```cpp +for(deque::iterator it = d.begin(); it != d.end(); ++it) + + ... +``` + +write code like this: + +```cpp +for(deque::iterator it = d.begin(), itEnd = d.end(); it != itEnd; ++it) + + ... +``` + +The latter avoids a function call and return of an object (which in deque's case happens to be more than just a pointer). The above only works when the container is unchanged or for containers that have a constant end value. But "constant end value" we mean containers which can be modified but end always remains the same. + +| Constant begin | Non-constant begin | Constant end | Non-constant end | +|------|------|------|------| +| array1 | string
vector
deque
intrusive_list
intrusive_slist
vector_map
vector_multimap
vector_set
vector_multiset
bit_vector
hash_map
hash_multimap
hash_set
hash_multiset
intrusive_hash_map
intrusive_hash_multimap
intrusive_hash_set
intrusive_hash_multiset | array
list
slist
intrusive_list
intrusive_slist
map
multimap
set
multiset
hash_map2
hash_multimap2
hash_set2
hash_multiset2
intrusive_hash_map
intrusive_hash_multimap
intrusive_hash_set
intrusive_hash_multiset | string
vector
deque
vector_map
vector_multimap
vector_set
vector_multiset
bit_vector | + +* 1 Arrays can be neither resized nor reallocated. +* 2 Constant end if the hashtable can't/won't re-hash. Non-constant if it can re-hash. + +### Iterate containers instead of using operator[]. + +It's faster to iterate random access containers via iterators than via operator[], though operator[] usage may look simpler. + +Instead of doing this: + +```cpp +for(unsigned i = 0, iEnd = intVector.size(); i != iEnd; ++i) + + intVector[i] = 37; +``` + +you can execute more efficiently by doing this: + +```cpp +for(vector::iterator it = intVector.begin(), itEnd = intVector.end(); it != itEnd; ++it) + + *it = 37; +``` + +### Learn to use the string class appropriately. + +Oddly enough, the most mis-used STL container is easily the string class. The tales of string abuse could rival the 1001 Arabian Nights. Most of the abuses involve doing things in a harder way than need be. In examining the historical mis-uses of string, it is clear that many of the problems stem from the user thinking in terms of C-style string operations instead of object-oriented strings. This explains why statements such as strlen(s.c_str()) are so common, whereas the user could just use s.length() instead and be both clearer and more efficient. + +Here we provide a table of actual collected examples of things done and how they could have been done instead. + +| What was written | What could have been written | +|------|------| +| `s = s.Left(i) + '+' + s.Right(s.length() - i - 1);` | `s[i] = '+';` | +| `string s(""); // This is the most commonly found misuse.` | `string s;` | +| `s = "";` | `s.clear();` | +| `s.c_str()[0] = 'u';` | `s[0] = 'u';` | +| `len = strlen(s.c_str());` | `len = s.length();` | +| `s = string("u");` | `s = "u";` | +| `puts(s + string("u"));` | `puts(s + "u");` | +| `string s(" ");`
`puts(s.c_str());` | `puts(" ");` | +| `s.sprintf("u");` | s = "u";` | +| `char array[32];`
`sprintf(array, "%d", 10);`
`s = string(array);` | `s.sprintf("%d", 10);` | + +The chances are that if you want to do something with a string, there is a very basic way to do it. You don't want your code to appear in a future version of the above table. + +### Cache list size if you want list::size() to be O(1). + +EASTL's list, slist, intrusive_list, and intrusive_slist containers have a size() implementation which is O(n). That is, these containers don't keep a count (cache) of the current list size and when you call the size() function they iterate the list. This is by design and the reasoning behind it has been deeply debated and considered (and is discussed in the FAQ and the list header file). In summary, list doesn't cache its size because the only function that would benefit is the size function while many others would be negatively impacted and the memory footprint would be negatively impacted, yet list::size is not a very frequently called function in well-designed code. At the same time, nothing prevents the user from caching the size himself, though admittedly it adds some tedium and risk to the code writing process. + +Here's an example of caching the list size manually: + +```cpp +list intList; + + size_t n = 0; + + + + intList.push_back(37); + + ++n; + + intList.pop_front(); + + --n; +``` + +### Use empty() instead of size() when possible. + +All conventional containers have both an empty function and a size function. For all containers empty() executes with O(1) (constant time) efficiency. However, this is not so for size(), as some containers need to calculate the size and others need to do pointer subtraction (which may involve integer division) to find the size. + +### Know your container efficiencies. + +The above two practices lead us to this practice, which is a generalization of the above. We present a table of basic information for the conventional EASTL containers. The values are described at the bottom. + +| Container | empty() efficiency | size() efficiency | operator[] efficiency | insert() efficiency | erase() efficiency | find() efficiency | sort efficiency | +|------|------|------|------|------|------|------|------| +| slist | 1 | O(n) | - | O(1) | O(1) | O(n) | O(n+) | +| list | 1 | n | - | 1 | 1 | n | n log(n) | +| intrusive_slist | 1 | n | - | 1 | 1 | 1 | n+ | +| intrusive_list | 1 | n | - | 1 | 1 | 1 | n log(n) | +| array | 1 | 1 | 1 | - | - | n | n log(n) | +| vector | 1 | 1a | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) | +| vector_set | 1 | 1a | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 | +| vector_multiset | 1 | 1a | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 | +| vector_map | 1 | 1a | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 | +| vector_multimap | 1 | 1a | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 | +| deque | 1 | 1a | 1 | 1 at begin or end, else n / 2 | 1 at begin or end, else n / 2 | n | n log(n) | +| bit_vector | 1 | 1a | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) | +| string, cow_string | 1 | 1a | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) | +| set | 1 | 1 | - | log(n) | log(n) | log(n) | 1 | +| multiset | 1 | 1 | - | log(n) | log(n) | log(n) | 1 | +| map | 1 | 1 | log(n) | log(n) | log(n) | log(n) | 1 | +| multimap | 1 | 1 | - | log(n) | log(n) | log(n) | 1 | +| hash_set | 1 | 1 | - | 1 | 1 | 1 | - | +| hash_multiset | 1 | 1 | - | 1 | 1 | 1 | - | +| hash_map | 1 | 1 | - | 1 | 1 | 1 | - | +| hash_multimap | 1 | 1 | - | 1 | 1 | 1 | - | +| intrusive_hash_set | 1 | 1 | - | 1 | 1 | 1 | - | +| intrusive_hash_multiset | 1 | 1 | - | 1 | 1 | 1 | - | +| intrusive_hash_map | 1 | 1 | - | 1 | 1 | 1 | - | +| intrusive_hash_multimap | 1 | 1 | - | 1 | 1 | 1 | - | + +Notes: + +* \- means that the operation does not exist. +* 1 means amortized constant time. Also known as O(1) +* n means time proportional to the container size. Also known as O(n) +* log(n) means time proportional to the natural logarithm of the container size. Also known as O(log(n)) +* n log(n) means time proportional to log(n) times the size of the container. Also known as O(n log(n)) +* n+ means that the time is at least n, and possibly higher. +* Inserting at the end of a vector may cause the vector to be resized; resizing a vector is O(n). However, the amortized time complexity for vector insertions at the end is constant. +* Sort assumes the usage of the best possible sort for a large container of random data. Some sort algorithms (e.g. quick_sort) require random access iterators and so the sorting of some containers requires a different sort algorithm. We do not include bucket or radix sorts, as they are always O(n). +* a vector, deque, string size is O(1) but involves pointer subtraction and thus integer division and so is not as efficient as containers that store the size directly. + +### Use vector::reserve. + +You can prevent vectors (and strings) from reallocating as you add items by specifying up front how many items you will be requiring. You can do this in the constructor or by calling the reserve function at any time. The capacity function returns the amount of space which is currently reserved. + +Here's how you could specify reserved capacity in a vector: + +```cpp +vector v(37); // Reserve space to hold up to 37 items. + + or + +vector v; // This empty construction causes to memory to be allocated or reserved. + + v.reserve(37); +``` + +The EASTL vector (and string) implementation looks like this: + +```cpp +template + + class vector { + + T* mpBegin; // Beginning of used element memory. + + T* mpEnd; // End of used element memory. + + T* mpCapacity; // End of storage capacity. Is >= mpEnd + + } +``` + +Another approach to being efficient with vector memory usage is to use fixed_vector. + +### Use vector::set_capacity to trim memory usage. + +A commonly asked question about vectors and strings is, "How do I reduce the capacity of a vector?" The conventional solution for std STL is to use the somewhat non-obvious trick of using vector(v).swap(v). EASTL provides the same functionality via a member function called set_capacity() which is present in both the vector and string classes. + +An example of reducing a vector is the following: + +```cpp +vector v; + +... + + v.set_capacity(); +``` + +An example of resizing to zero and completely freeing the memory of a vector is the following: + +```cpp +vector v; + + ... + + v.set_capacity(0); +``` + +### Use swap() instead of a manually implemented version. + +The generic swap algorithm provides a basic version for any kind of object. However, each EASTL container provides a specialization of swap which is optimized for that container. For example, the list container implements swap by simply swapping the internal member pointers and not by moving individual elements. + +### Consider storing pointers instead of objects. + +There are times when storing pointers to objects is more efficient or useful than storing objects directly in containers. It can be more efficient to store pointers when the objects are big and the container may need to construct, copy, and destruct objects during sorting or resizing. Moving pointers is usually faster than moving objects. It can be useful to store pointers instead of objects when somebody else owns the objects or the objects are in another container. It might be useful for a Widget to be in a list and in a hash table at the same time. + +### Consider smart pointers instead of raw pointers. + +If you take the above recommendation and store objects as pointers instead of as objects, you may want to consider storing them as smart pointers instead of as regular pointers. This is particularly useful for when you want to delete the object when it is removed from the container. Smart pointers will automatically delete the pointed-to object when the smart pointer is destroyed. Otherwise, you will have to be careful about how you work with the list so that you don't generate memory leaks. Smart pointers implement a shared reference count on the stored pointer, as so any operation you do on a smart pointer container will do the right thing. Any pointer can be stored in a smart pointer, and custom new/delete mechanisms can work with smart pointers. The primary smart pointer is shared_ptr. + +Here is an example of creating and using a shared_ptr: + +```cpp +typedef shared_ptr WPtr; + + list wList; + + + + wList.push_back(WPtr(new Widget)); // The user may have operator new/delete overrides. + +wList.pop_back(); // Implicitly deletes the Widget. +``` + +Here is an example of creating and using a shared_ptr that uses a custom allocation and deallocation mechanism: + +```cpp +typedef shared_ptr WPtr; // WidgetDelete is a custom destroyer. + + list wList; + + + + wList.push_back(WPtr(WidgetCreate(Widget))); // WidgetCreate is a custom allocator. + +wList.pop_back(); // Implicitly calls WidgetDelete. +``` + +### Use iterator pre-increment instead of post-increment. + +Pre-increment (e.g. ++x) of iterators is better than post-increment (x++) when the latter is not specifically needed. It is common to find code that uses post-incrementing when it could instead use pre-incrementing; presumably this is due to post-increment looking a little better visually. The problem is that the latter constructs a temporary object before doing the increment. With built-in types such as pointers and integers, the compiler will recognize that the object is a trivial built-in type and that the temporary is not needed, but the compiler cannot do this for other types, even if the compiler sees that the temporary is not used; this is because the constructor may have important side effects and the compiler would be broken if it didn't construct the temporary object. + +EASTL iterators are usually not trivial types and so it's best not to hope the compiler will do the best thing. Thus you should always play it safe an use pre-increment of iterators whenever post-increment is not required. + +Here is an example of using iterator pre-increment; for loops like this should always use pre-increment: + +```cpp +for(set::iterator it(intSet.begin()), itEnd(intSet.end()); it != itEnd; ++it) + + *it = 37; +``` + +### Make temporary references so the code can be traced/debugged. + +Users want to be able to inspect or modify variables which are referenced by iterators. While EASTL containers and iterators are designed to make this easier than other STL implementations, it makes things very easy if the code explicitly declares a reference to the iterated element. In addition to making the variable easier to debug, it also makes code easier to read and makes the debug (and possibly release) version of the application run more efficiently. + +Instead of doing this: + +```cpp +for(list::iterator it = wl.begin(), itEnd = wl.end(); it != itEnd; ++it) { + + (*it).x = 37; + + (*it).y = 38; + + (*it).z = 39; + + } +``` + +Consider doing this: + +```cpp +for(list::iterator it = wl.begin(), itEnd = wl.end(); it != itEnd; ++it) { + + Widget& w = *it; // The user can easily inspect or modify w here. + + w.x = 37; + + w.y = 38; + + w.z = 39; + + } +``` + +### Consider bitvector or bitset instead of vector. + +In EASTL, a vector of bool is exactly that. It intentionally does not attempt to make a specialization which implements a packed bit array. The bitvector class is specifically designed for this purpose. There are arguments either way, but if vector were allowed to be something other than an array of bool, it would go against user expectations and prevent users from making a true array of bool. There's a mechanism for specifically getting the bit packing, and it is bitvector. + +Additionally there is bitset, which is not a conventional iterateable container but instead acts like bit flags. bitset may better suit your needs than bitvector if you need to do flag/bit operations instead of array operations. bitset does have an operator[], though. + +### Vectors can be treated as contiguous memory. + +EASTL vectors (and strings) guarantee that elements are present in a linear contiguous array. This means that you can use a vector as you would a C-style array by using the vector data() member function or by using &v[0]. + +To use a vector as a pointer to an array, you can use the following code: + +```cpp +struct Widget { + + uint32_t x; + + uint32_t y; + + }; + + + + vector v; + + + + quick_sort((uint64_t*)v.data(), (uint64_t*)(v.data() + v.size())); +``` + +### Search hash_map via find_as() instead of find(). + +EASTL hash tables offer a bonus function called find_as when lets you search a hash table by something other than the container type. This is particularly useful for hash tables of string objects that you want to search for by string literals (e.g. "hello") or char pointers. If you search for a string via the find function, your string literal will necessarily be converted to a temporary string object, which is inefficient. + +To use find_as, you can use the following code: + +```cpp +hash_map hashMap; + + hash_map::iterator it = hashMap.find_as("hello"); // Using default hash and compare. +``` + +### Take advantage of type_traits (e.g. EASTL_DECLARE_TRIVIAL_RELOCATE). + +EASTL includes a fairly serious type traits library that is on par with the one found in Boost but offers some additional performance-enhancing help as well. The type_traits library provides information about class *types*, as opposed to class instances. For example, the is_integral type trait tells if a type is one of int, short, long, char, uint64_t, etc. + +There are three primary uses of type traits: + +* Allowing for optimized operations on some data types. +* Allowing for different logic pathways based on data types. +* Allowing for compile-type assertions about data type expectations. + +Most of the type traits are automatically detected and implemented by the compiler. However, EASTL allows for the user to explicitly give the compiler hints about type traits that the compiler cannot know, via the EASTL_DECLARE declarations. If the user has a class that is relocatable (i.e. can safely use memcpy to copy values), the user can use the EASTL_DECLARE_TRIVIAL_RELOCATE declaration to tell the compiler that the class can be copied via memcpy. This will automatically significantly speed up some containers and algorithms that use that class. + +Here is an example of using type traits to tell if a value is a floating point value or not: + +```cpp +template + + DoSomething(T t) { + + assert(is_floating_point::value); + + } +``` + +Here is an example of declaring a class as relocatable and using it in a vector. + +```cpp +EASTL_DECLARE_TRIVIAL_RELOCATE(Widget); // Usually you put this at the Widget class declaration. + + vector wVector; + + wVector.erase(wVector.begin()); // This operation will be optimized via using memcpy. +``` + +The following is a full list of the currently recognized type traits. Most of these are implemented as of this writing, but if there is one that is missing, feel free to contact the maintainer of this library and request that it be completed. + +* is_void +* is_integral +* is_floating_point +* is_arithmetic +* is_fundamental +* is_const +* is_volatile +* is_abstract +* is_signed +* is_unsigned +* is_array +* is_pointer +* is_reference +* is_member_object_pointer +* is_member_function_pointer +* is_member_pointer +* is_enum +* is_union +* is_class +* is_polymorphic +* is_function +* is_object +* is_scalar +* is_compound +* is_same +* is_convertible +* is_base_of +* is_empty +* is_pod +* is_aligned +* has_trivial_constructor +* has_trivial_copy +* has_trivial_assign +* has_trivial_destructor +* has_trivial_relocate1 +* has_nothrow_constructor +* has_nothrow_copy +* has_nothrow_assign +* has_virtual_destructor +* alignment_of +* rank +* extent +* +1 has_trivial_relocate is not found in Boost nor the C++ standard update proposal. However, it is very useful in allowing for the generation of optimized object moving operations. It is similar to the is_pod type trait, but goes further and allows non-pod classes to be categorized as relocatable. Such categorization is something that no compiler can do, as only the user can know if it is such. Thus EASTL_DECLARE_TRIVIAL_RELOCATE is provided to allow the user to give the compiler a hint. + +### Name containers to track memory usage. + +All EASTL containers which allocate memory have a built-in function called set_name and have a constructor argument that lets you specify the container name. This name is used in memory tracking and allows for the categorization and measurement of memory usage. You merely need to supply a name for your containers to use and it does the rest. + +Here is an example of creating a list and naming it "collision list": + +`list collisionList(allocator("collision list"));` + +or + +```cpp +list collisionList; + +collisionList.get_allocator().set_name("collision list"); +``` + +Note that EASTL containers do not copy the name contents but merely copy the name pointer. This is done for simplicity and efficiency. A user can get around this limitation by creating a persistently present string table. Additionally, the user can get around this by declaring static but non-const strings and modifying them at runtime. + +### Learn the algorithms. + +EASTL algorithms provide a variety of optimized implementations of fundamental algorithms. Many of the EASTL algorithms are the same as the STL algorithm set, though EASTL adds additional algorithms and additional optimizations not found in STL implementations such as Microsoft's. The copy algorithm, for example, will memcpy data types that have the has_trivial_relocate type trait instead of doing an element-by-element copy. + +The classifications we use here are not exactly the same as found in the C++ standard; they have been modified to be a little more intuitive. Not all the functions listed here may be yet available in EASTL as you read this. If you want some function then send a request to the maintainer. Detailed documentation for each algorithm is found in algorithm.h or the otherwise corresponding header file for the algorithm. + +**Search** + +* find, find_if +* find_end +* find_first_of +* adjacent_find +* binary_search +* search, search_n +* lower_bound +* upper_bound +* equal_range + +**Sort** + +* is_sorted +* quick_sort +* insertion_sort +* shell_sort +* heap_sort +* merge_sort, merge_sort_buffer +* merge +* inplace_merge +* partial_sort +* stable_sort +* partial_sort_copy +* + +**Modifying** + +* fill, fill_n +* generate, generate_n +* random_shuffle +* swap +* iter_swap +* swap_ranges +* remove, remove_if +* remove_copy, remove_copy_if +* replace, replace_if +* replace_copy, replace_copy_if +* reverse +* reverse_copy +* rotate +* rotate_copy +* partition +* stable_partition +* transform +* next_permutation +* prev_permutation +* unique +* unique_copy + +**Non-Modifying** + +* for_each +* copy +* copy_backward +* count, count_if +* equal +* mismatch +* min +* max +* min_element +* max_element +* lexicographical_compare +* nth_element + +**Heap** + +* is_heap +* make_heap +* push_heap +* pop_heap +* change_heap +* sort_heap +* remove_heap + +**Set** + +* includes +* set_difference +* set_symmetric_difference +* set_intersection +* set_union + +### Pass and return containers by reference instead of value. + +If you aren't paying attention you might accidentally write code like this: + +```cpp +void DoSomething(list widgetList) { + + ... + +} +``` + +The problem with the above is that widgetList is passed by value and not by reference. Thus the a copy of the container is made and passed instead of a reference of the container being passed. This may seem obvious to some but this happens periodically and the compiler gives no warning and the code will often execute properly, but inefficiently. Of course there are some occasions where you really do want to pass values instead of references. + +### Consider using reset() for fast container teardown. + +EASTL containers have a reset function which unilaterally resets the container to a newly constructed state. The contents of the container are forgotten; no destructors are called and no memory is freed. This is a risky but power function for the purpose of implementing very fast temporary containers. There are numerous cases in high performance programming when you want to create a temporary container out of a scratch buffer area, use the container, and then just "vaporize" it, as it would be waste of time to go through the trouble of clearing the container and destroying and freeing the objects. Such functionality is often used with hash tables or maps and with a stack allocator (a.k.a. linear allocator). + +Here's an example of usage of the reset function and a PPMalloc-like StackAllocator: + +```cpp +pStackAllocator->push_bookmark(); + + hash_set, StackAllocator> wSet(pStackAllocator); + + + + wSet.reset(); + + pStackAllocator->pop_bookmark(); +``` + +### Consider using fixed_substring instead of copying strings. + +EASTL provides a fixed_substring class which uses a reference to a character segment instead of allocating its own string memory. This can be a more efficient way to work with strings under some circumstances. + +Here's an example of usage of fixed_substring: + +```cpp +basic_string str("hello world"); + + fixed_substring sub(str, 6, 5); // sub == "world" + +fixed_substring can refer to any character array and not just one that derives from a string object. +``` + +### Consider using vector::push_back(void). + +EASTL provides an alternative way to insert elements into containers that avoids copy construction and/or the creation of temporaries. Consider the following code: + +```cpp +vector widgetArray; + + widgetArray.push_back(Widget()); +``` + +The standard vector push_back function requires you to supply an object to copy from. This incurs the cost of the creation of a temporary and for some types of classes or situations this cost may be undesirable. It additionally requires that your contained class support copy-construction whereas you may not be able to support copy construction. As an alternative, EASTL provides a push_back(void) function which requires nothing to copy from but instead constructs the object in place in the container. So you can do this: + +```cpp +vector widgetArray; + + widgetArray.push_back(); + +widgetArray.back().x = 0; // Example of how to reference the new object. +``` + +Other containers with such copy-less functions include: + +```cpp +vector::push_back() + + deque::push_back() + + deque::push_front() + + list::push_back() + + list::push_front() + + slist::push_front() + + map::insert(const key_type& key) + + multimap::insert(const key_type& key) + + hash_map::insert(const key_type& key) + + hash_multimap::insert(const key_type& key) +``` + +Note that the map functions above allow you to insert a default value specified by key alone and not a value_type like with the other map insert functions. + +---------------------------------------------- +End of document diff --git a/doc/Bonus/tuple_vector_readme.md b/doc/Bonus/tuple_vector_readme.md new file mode 100644 index 0000000..f406ac5 --- /dev/null +++ b/doc/Bonus/tuple_vector_readme.md @@ -0,0 +1,416 @@ +## Introduction to tuple_vector + +`tuple_vector` is a data container that is designed to abstract and simplify +the handling of a "structure of arrays" layout of data in memory. In +particular, it mimics the interface of `vector`, including functionality to do +inserts, erases, push_backs, and random-access. It also provides a +`RandomAccessIterator` and corresponding functionality, making it compatible +with most STL (and STL-esque) algorithms such as ranged-for loops, `find_if`, +`remove_if`, or `sort`. + +When used or applied properly, this container can improve performance of +some algorithms through cache-coherent data accesses or allowing for +sensible SIMD programming, while keeping the structure of a single +container, to permit a developer to continue to use existing algorithms in +STL and the like. + +## Review of "Structure of arrays" data layouts + +When trying to improve the performance of some code, it can sometimes be +desirable to transform how some data is stored in memory to be laid out not as +an "array of structures", but as a "structure of arrays". That is, instead of +storing a series of objects as a single contiguous chunk of memory, one or +more data members are instead stored as separate chunks of memory that are +handled and accessed in parallel to each other. + +This can be beneficial in two primary respects: + +1) To improve the cache coherency of the data accesses, e.g. by utilizing more +data that is loaded per cache line loaded from memory, and thereby reducing +the amount of time waiting on memory accesses from off-CPU memory. +This presentation from Mike Acton touches on this, among other things: +https://www.youtube.com/watch?v=rX0ItVEVjHc + +2) To allow the data to be more easily loaded and utilized by SIMD kernels, +by being able to load memory directly into a SIMD register. +This is touched on in this presentation from Andreas Fredriksson for writing +code with SIMD intrinsics: +http://www.gdcvault.com/play/1022249/SIMD-at-Insomniac-Games-How +...and as well in this guide for writing performant ISPC kernels: +https://ispc.github.io/perfguide.html + +## How TupleVecImpl works + +`tuple_vector` inherits from `TupleVecImpl`, which +provides the bulk of the functionality for those data containers. It manages +the memory allocated, marshals data members to each array of memory, generates +the necessary iterators, and so on. + +When a `tuple_vector` is declared, it is alongside a list of types, or "tuple +elements", indicating what data to store in the container, similar to how `tuple` +operates. `TupleVecImpl` uses this list of tuple elements to then inherit from a series of +`TupleVecLeaf` structures, which each have their own pointer to an array of their +corresponding type in memory. When dereferencing the container, either to fetch a +tuple of references or just fetching pointers to the memory, it is these pointers +that are utilized or fetched. + +While each `TupleVecLeaf` contains a pointer to its own block of memory, they +are not individual memory allocations. When `TupleVecImpl` needs to grow its +capacity, it calculates the total size needed for a single allocation, taking +into account the number of objects for the container, the size of each tuple +element's type, and the alignment requirements for each type. Pointers into the +allocation for each tuple element are also determined at the same time, which +are passed to each `TupleVecLeaf`. From there, many of the interactions with +`TupleVecImpl`, to modify or access members of the container, then reference +each `TupleVecLeaf`'s data pointer in series, using parameter packs to repeat +each operation for each parent `TupleVecLeaf`. + +## How tuple_vector's iterator works + +`TupleVecImpl` provides a definition to an iterator type, `TupleVecIter`. +As mentioned above, `TupleVecIter` provides all of the functionality to operate +as a `RandomAccessIterator`. When it is dereferenced, it provides a tuple of +references, similar to `at()` or `operator[]` on `TupleVecImpl`, as opposed to +a reference of some other type. As well, a customization of `move_iterator` for +`TupleVecIter` is provided, which will return a tuple of rvalue-references. + +The way that `TupleVecIter` operates internally is to track an index into the +container, as well as a copy of all of the `TupleVecImpl`'s `TupleVecLeaf` +pointers at the time of the iterator's construction. As a result, modifying the +iterator involves just changing the index, and dereferencing the iterator into +the tuple of references involves dereferencing each pointer with an offset +specified by that index. + +Of the various ways of handling the multitude of references, this tended to +provide the best code-generation. For example, having a tuple of pointers that +are collectively modified with each iterator modification resulted in the compiler +not being able to accurately determine which pointers were relevant to the final +output of some function, creating many redundant operations. Similarly, having +the iterator refer to the source `TupleVecImpl` for the series of pointers +often resulted in extra, unnecessary, data hops to the `TupleVecImpl` to repeatedly +fetch data that was not practically mutable, but theoretically mutable. While this +solution is the heaviest in terms of storage, the resulted assembly tends to be +competitive with traditional structure-of-arrays setups. + +## How to work with tuple_vector, and where to use it + +Put simply, `tuple_vector` can be used as a replacement for `vector`. For example, +instead of declaring a structure and vector as: + +``` +struct Entity +{ + bool active; + float lifetime; + Vec3 position; +} +vector entityVec; +``` + +...the `tuple_vector` equivalent of this can be defined as: + +``` +tuple_vector entityVec; +``` + +In terms of how `tuple_vector` is modified and accessed, it has a similar +featureset as `vector`, except where `vector` would accept or return a single +value, it instead accepts or returns a tuple of values or unstructured series +of equivalent arguments. + +For example, the following functions can be used to access the data, either by +fetching a tuple of references to a series of specific values, or the data +pointers to the tuple elements: + +``` +tuple operator[](size_type) +tuple at(size_type) +tuple iterator::operator*() +tuple move_iterator::operator*() +tuple data() + +// extract the Ith tuple element pointer from the tuple_vector +template +T* get() +// e.g. bool* get<0>(), float* get<1>(), and Vec3* get<2>() + +// extract the tuple element pointer of type T from the tuple_vector +// note that this function can only be used if there is one instance +// of type T in the tuple_vector's elements +template +T* get() +// e.g. bool* get(), float* get(), and Vec3* get() +``` + +And `push_back(...)` has the following overloads, accepting either values or tuples as needed. + +``` +tuple push_back() +push_back(const bool&, const float&, const Vec3&) +push_back(tuple) +push_back(bool&&, float&&, Vec3&&) +push_back(tuple) +``` +...and so on, and so forth, for others like the constructor, `insert(...)`, +`emplace(...)`, `emplace_back(...)`, `assign(...)`, and `resize(...)`. + +As well, note that the tuple types that are accepted or returned for +`tuple_vector` have typedefs available in the case of not wanting to use +automatic type deduction: +``` +typedef eastl::tuple value_tuple; +typedef eastl::tuple reference_tuple; +typedef eastl::tuple const_reference_tuple; +typedef eastl::tuple ptr_tuple; +typedef eastl::tuple const_ptr_tuple; +typedef eastl::tuple rvalue_tuple; +``` +With this, and the fact that the iterator type satisfies +the `RandomAccessIterator` requirements, it is possible to use `tuple_vector` in +most ways and manners that `vector` was previously used, with few structural +differences. + +However, even if not using it strictly as a replacement for `vector`, it is +still useful as a tool for simplifying management of a traditional structure of +arrays. That is, it is possible to use `tuple_vector` to just perform a single +large memory allocation instead of a series of smaller memory allocations, +by sizing the `tuple_vector` as needed, fetching the necessary pointers with +`data()` or `get<...>()`, and carrying on normally. + +One example where this can be utilized is with ISPC integration. Given the +following ISPC function definition: + + export void simple(uniform float vin[], uniform float vfactors[], uniform float vout[], uniform int size); + +...which generates the following function prototype for C/C++ usage: + + extern void simple(float* vin, float* vfactors, float* vout, int32_t size); + +...this can be utilized with some raw float arrays: +``` +float* vin = new float[NumElements]; +float* vfactors = new float[NumElements]; +float* vout = new float[NumElements]; + +// Initialize input buffer +for (int i = 0; i < NumElements; ++i) +{ + vin[i] = (float)i; + vfactors[i] = (float)i / 2.0f; +} + +// Call simple() function from simple.ispc file +simple(vin, vfactors, vout, NumElements); + +delete vin; +delete vfactors; +delete vout; +``` +or, with `tuple_vector`: + +``` +tuple_vector simpleData(NumElements); +float* vin = simpleData.get<0>(); +float* vfactors = simpleData.get<1>(); +float* vout = simpleData.get<2>(); + +// Initialize input buffer +for (int i = 0; i < NumElements; ++i) +{ + vin[i] = (float)i; + vfactors[i] = (float)i / 2.0f; +} + +// Call simple() function from simple.ispc file +simple(vin, vfactors, vout, NumElements); +``` + +`simpleData` here only has a single memory allocation during its construction, +instead of the three in the first example, and also automatically releases the +memory when it falls out of scope. + +It is possible to also skip a memory allocation entirely, in some circumstances. +EASTL provides "fixed" counterparts of many data containers which allows for a +data container to have an inlined buffer of memory. For example, +`eastl::vector` has the following counterpart: + + eastl::fixed_vector + +This buffer allows for enough space to hold a `nodeCount` number of `T` objects, +skipping any memory allocation at all, until the requested size becomes +greater than `nodeCount` - assuming `enableOverflow` is True. + +There is a similar counterpart to `eastl::tuple_vector` available as well: + + eastl::fixed_tuple_vector + +This does the similar legwork in creating an inlined buffer, and all of the +functionality of `tuple_vector` otherwise is supported. Note the slight +difference in declaration, though: `nodeCount` and `enableOverflow` are defined +first, and `enableOverflow` is not a default parameter. This change arises out +of restrictions surrounding variadic templates, in that they must be declared +last, and cannot be mixed with default template parameters. + +Lastly, `eastl::vector` and other EASTL data containers support custom Memory Allocator +types, through their template parameters. For example, `eastl::vector`'s full declaration +is actually: + + eastl::vector + +However, because such a default template parameter cannot be used with +variadic templates, a separate type for `tuple_vector` is required for such a +definition: + + eastl::tuple_vector_alloc + +Note that `tuple_vector` uses EASTLAllocatorType as the allocator. + +## Performance comparisons/discussion + +A small benchmark suite for `tuple_vector` is included when running the +EASTLBenchmarks project. It provides the following output on a Core i7 3770k +(Skylake) at 3.5GHz, with DDR3-1600 memory. + +The `tuple_vector` benchmark cases compare total execution time of similar +algorithms run against `eastl::tuple_vector` and `std::vector`, such as +erasing or inserting elements, iterating through the array to find a specific +element, sum all of the elements together via operator[] access, or just +running `eastl::sort` on the data containers. More information about the +EASTLBenchmarks suite can be found in EASTL/doc/EASTL Benchmarks.html + +Benchmark | STD execution time | EASTL execution time | Ratio +--------- | -------- | ---------- | ----- +`tuple_vector/erase ` | 1.7 ms | 1.7 ms | 1.00 +`tuple_vector/erase ` | 104.6 ms | 106.3 ms | 0.98 +`tuple_vector/reallocate ` | 1.3 ms | 1.7 ms | 0.77 - + | | | +`tuple_vector/erase ` | 3.4 ms | 3.5 ms | 0.98 +`tuple_vector/insert ` | 3.4 ms | 3.4 ms | 0.99 +`tuple_vector/iteration ` | 56.3 us | 81.4 us | 0.69 - +`tuple_vector/operator[] ` | 67.4 us | 61.8 us | 1.09 +`tuple_vector/push_back ` | 1.3 ms | 818.3 us | 1.53 + +`tuple_vector/sort ` | 5.8 ms | 7.3 ms | 0.80 + | | | +`tuple_vector/erase ` | 34.7 ms | 32.9 ms | 1.05 +`tuple_vector/insert ` | 41.0 ms | 32.6 ms | 1.26 +`tuple_vector/iteration ` | 247.1 us | 80.5 us | 3.07 + +`tuple_vector/operator[]` | 695.7 us | 81.1 us | 8.58 + +`tuple_vector/push_back ` | 10.0 ms | 6.0 ms | 1.67 + +`tuple_vector/sort ` | 8.2 ms | 10.1 ms | 0.81 + | | | +`vector/erase ` | 1.3 ms | 1.2 ms | 1.05 +`vector/erase ` | 104.4 ms | 109.4 ms | 0.95 +`vector/reallocate ` | 1.5 ms | 1.5 ms | 0.95 + | | | +`vector/erase ` | 4.3 ms | 3.6 ms | 1.20 +`vector/insert ` | 4.8 ms | 4.8 ms | 1.01 +`vector/iteration ` | 71.5 us | 77.3 us | 0.92 +`vector/operator[] ` | 90.7 us | 87.2 us | 1.04 +`vector/push_back ` | 1.6 ms | 1.2 ms | 1.38 + +`vector/sort ` | 7.7 ms | 8.2 ms | 0.93 + +First off, `tuple_vector`'s performance versus `std::vector` is +comparable, as expected, as the `tuple_vector`'s management for one type +becomes very similar to just a regular vector. The major notable exception is +the iteration case, which runs `eastl::find_if`. This +performance differences is a consequence of the iterator design, and how +it works with indices, not a direct pointer, so the code generation suffers slightly +in this compute-bound scenario. This is worth noting as a demonstration of a +case where falling back to pointer-based iteration by fetching the `begin` and +`end` pointers of that tuple element may be preferable, instead of using the +iterator constructs. + +The set of `tuple_vector` tests are more interesting. +This is a comparison between a single `std::vector` with a +structure containing a `uint64` and 56 bytes of padding, and a `tuple_vector` with +two elements: one for `uint64` and one for 56 bytes of padding. The erase, +insert, push_back, and sort cases all perform at a similar relative rate as +they did in the `tuple_vector` tests - demonstrating that operations +that have to touch all of elements do not have a significant change in +performance. + +However, iteration and operator[] are very different, because +those only access the `uint64` member of both `vector` and `tuple_vector` to run +some operation. The iteration test now runs 3x faster whereas before it ran +0.7x as fast, and operator[] runs 8.5x faster, instead of 1.1x. This +demonstrates some of the utility of `tuple_vector`, in that these algorithms end +up being limited by the CPU's compute capabilities, as opposed to being +limited by how fast they can load memory in from DRAM. + +In a series of other tests, generally speaking, `tuple_vector` tends to perform +on par with manual management of multiple arrays in many algorithms and +operations, often even generating the same code. It should be noted that +significant degrees of inlining and optimization are required to get the most out +of `tuple_vector`. Compared to accessing a series of arrays or vectors, +`tuple_vector` does perform a multitude of extra trivial function calls internally +in order to manage the various elements, or interact with `eastl::tuple` through +its interface, so running in debug configurations can run significantly slower +in some cases, e.g. sometimes running at 0.2x the speed compared to vector. + +## The problem of referencing tuple elements + +This will be experienced shortly after using `tuple_vector` in most capacities, +but it should be noted that the most significant drawback is that there is no +way to **symbolically** reference each tuple element of the `tuple_vector` - much +in the same way as `tuple`. For example, if translating a struct such as... + +``` +struct Entity +{ + float x, y, z; + float lifetime; +}; +``` +...to `tuple_vector`, it will exist as: + +``` +tuple_vector entityVec; +``` + +...and can only be accessed in a manner like `entityVec.get<3>()` to refer to +the `lifetime` member. With existing tools, the only good alternatives are to +encapsulate each float as a separate struct to give it unique typenames... + +``` +struct entityX { float val; }; +struct entityY { float val; }; +struct entityZ { float val; }; +struct entityLifetime { float val; }; + +tuple_vector entityVec; +``` +...and then access each tuple element by typename like +`entityVec.get()`; or, creating an enumerated value to replace +the indices... + +``` +enum EntityTypeEnum +{ + entityX = 0, + entityY = 1, + entityZ = 2, + entityLifetime = 3 +}; + +tuple_vector entityVec; +``` + +...and then access each tuple element by the enumerated value: +`entityVec.get()`. + +Either way, there is a fairly significant maintenance and readability issue +around this. This is arguably more severe than with `tuple` on its own +because that is generally not intended for structures with long lifetime. + +Ideally, if the language could be mutated to accommodate such a thing, it would +be good to have some combination of typenames and symbolic names in the +declaration, e.g. something like + +``` +tuple_vector entityVec; +``` +and be able to reference the tuple elements not just by typename or index, but +through their corresponding symbol, like `entityVec.get()`. Or, it may +be interesting if the necessary `get` functions could be even automatically +generated through a reflection system, e.g. `entityVec.get_lifetime()`. +All of this remains a pipe dream for now. diff --git a/doc/CMake/EASTL_Project_Integration.md b/doc/CMake/EASTL_Project_Integration.md new file mode 100644 index 0000000..4b014f9 --- /dev/null +++ b/doc/CMake/EASTL_Project_Integration.md @@ -0,0 +1,93 @@ +## Using EASTL in your own projects + +This page describes the steps needed to use EASTL in your own projects + +## Setting up your project + +### Using CMake + +Add to your CMakeLists.txt: + +```cmake +set(EASTL_ROOT_DIR C:/EASTL) +include_directories (${EASTL_ROOT_DIR}/include) +include_directories (${EASTL_ROOT_DIR}/test/packages/EAAssert/include) +include_directories (${EASTL_ROOT_DIR}/test/packages/EABase/include/Common) +include_directories (${EASTL_ROOT_DIR}/test/packages/EAMain/include) +include_directories (${EASTL_ROOT_DIR}/test/packages/EAStdC/include) +include_directories (${EASTL_ROOT_DIR}/test/packages/EATest/include) +include_directories (${EASTL_ROOT_DIR}/test/packages/EAThread/include) +set(EASTL_LIBRARY debug ${EASTL_ROOT_DIR}/build/Debug/EASTL.lib optimized ${EASTL_ROOT_DIR}/build/Release/EASTL.lib) +add_custom_target(NatVis SOURCES ${EASTL_ROOT_DIR}/doc/EASTL.natvis) +``` + +And then add the library into the linker + +``` +target_link_libraries(... ${EASTL_LIBRARY}) +``` + +### Using Visual Studio + +Using Visual Studio projecs directly you will need do the following steps: +- Add the include paths +- Add the library path +- Add the library dependency +- Add natvis (optional) + +> Note that in the examples below ${EASTL_ROOT_DIR} is the folder in which you stored EASTL. You could create an environment variable for this. + +#### Add the include paths + +Add the following paths to your C/C++ -> General -> Additional include directories: +``` +${EASTL_ROOT_DIR}/include +${EASTL_ROOT_DIR}/test/packages/EAAssert/include +${EASTL_ROOT_DIR}/test/packages/EABase/include/Common +${EASTL_ROOT_DIR}/test/packages/EAMain/include) +${EASTL_ROOT_DIR}/test/packages/EAStdC/include) +${EASTL_ROOT_DIR}/test/packages/EATest/include) +${EASTL_ROOT_DIR}/test/packages/EAThread/include) +``` + +#### Add the library path + +Add the following library path to your Linker -> General -> Additional Library Directories: +``` +${EASTL_ROOT_DIR}/build/$(Configuration) +``` + +#### Add the library dependency + +Either add the following library to your Linker -> Input -> Additional Dependencies +``` +EASTL.lib +``` +Or in code use the following: +``` +#pragma comment(lib, "EASTL.lib") +``` + +#### Add natvis (optional) + +> Adding the natvis file to your project allows the debugger to use custom visualizers for the eastl data types. This greatly enhances the debugging experience. + +Add the natvis file anywhere in your solution: + +``` +Right-click your project: Add -> Existing item and then add the following file: +${EASTL_ROOT_DIR}/doc/EASTL.natvis +``` + +## Setting up your code + +### Overloading operator new[] + +EASTL requires you to have an overload for the operator new[], here is an example that just forwards to global new[]: + +```c +void* __cdecl operator new[](size_t size, const char* name, int flags, unsigned debugFlags, const char* file, int line) +{ + return new uint8_t[size]; +} +``` diff --git a/doc/Design.md b/doc/Design.md new file mode 100644 index 0000000..bda7378 --- /dev/null +++ b/doc/Design.md @@ -0,0 +1,374 @@ +# EASTL Design + +## Introduction + +EASTL (EA Standard Template Library) is designed to be a template library which encompasses and extends the functionality of standard C++ STL while improving it in various ways useful to game development. Much of EASTL's design is identical to standard STL, as the large majority of the STL is well-designed for many uses. The primary areas where EASTL deviates from standard STL implementations are essentially the following: + +* EASTL has a simplified and more flexible custom allocation scheme. +* EASTL has significantly easier to read code. +* EASTL has extension containers and algorithms. +* EASTL has optimizations designed for game development. + +Of the above items, the only one which is an incompatible difference with STL is the case of memory allocation. The method for defining a custom allocator for EASTL is slightly different than that of standard STL, though they are 90% similar. The 10% difference, however, is what makes EASTL generally easier and more powerful to work with than standard STL. Containers without custom allocators act identically between EASTL and standard STL. + +## Motivations + +Our motifications for making EASTL drive the design of EASTL. As identified in the EASTL RFC (Request for Comment), the primary reasons for implementing a custom version of the STL are: + +* Some STL implementations (especially Microsoft STL) have inferior performance characteristics that make them unsuitable for game development. EASTL is faster than all existing STL implementations. +* The STL is sometimes hard to debug, as most STL implementations use cryptic variable names and unusual data structures. +* STL allocators are sometimes painful to work with, as they have many requirements and cannot be modified once bound to a container. +* The STL includes excess functionality that can lead to larger code than desirable. It's not very easy to tell programmers they shouldn't use that functionality. +* The STL is implemented with very deep function calls. This results is unacceptable performance in non-optimized builds and sometimes in optimized builds as well. +* The STL doesn't support alignment of contained objects. +* STL containers won't let you insert an entry into a container without supplying an entry to copy from. This can be inefficient. +* Useful STL extensions (e.g. slist, hash_map, shared_ptr) found in existing STL implementations such as STLPort are not portable because they don't exist in other versions of STL or aren't consistent between STL versions. + +* The STL lacks useful extensions that game programmers find useful (e.g. intrusive_list) but which could be best optimized in a portable STL environment. +* The STL has specifications that limit our ability to use it efficiently. For example, STL vectors are not guaranteed to use contiguous memory and so cannot be safely used as an array. +* The STL puts an emphasis on correctness before performance, whereas sometimes you can get significant performance gains by making things less academcially pure. +* STL containers have private implementations that don't allow you to work with their data in a portable way, yet sometimes this is an important thing to be able to do (e.g. node pools). +* All existing versions of STL allocate memory in empty versions of at least some of their containers. This is not ideal and prevents optimizations such as container memory resets that can greatly increase performance in some situations. +* The STL is slow to compile, as most modern STL implementations are very large. + +* There are legal issues that make it hard for us to freely use portable STL implementations such as STLPort. +* We have no say in the design and implementation of the STL and so are unable to change it to work for our needs. + +## Prime Directives + +The implementation of EASTL is guided foremost by the following directives which are listed in order of importance. + +1. Efficiency (speed and memory usage) +2. Correctness +3. Portability +4. Readability + +Note that unlike commercial STL implementations which must put correctness above all, we put a higher value on efficiency. As a result, some functionality may have some usage limitation that is not present in other similar systems but which allows for more efficient operation, especially on the platforms of significance to us. + +Portability is significant, but not critical. Yes, EASTL must compile and run on all platforms that we will ship games for. But we don't take that to mean under all compilers that could be conceivably used for such platforms. For example, Microsoft VC6 can be used to compile Windows programs, but VC6's C++ support is too weak for EASTL and so you simply cannot use EASTL under VC6. + +Readability is something that EASTL achieves better than many other templated libraries, particularly Microsoft STL and STLPort. We make every attempt to make EASTL code clean and sensible. Sometimes our need to provide optimizations (particularly related to type_traits and iterator types) results in less simple code, but efficiency happens to be our prime directive and so it overrides all other considerations. + +## Thread Safety + +It's not simple enough to simply say that EASTL is thread-safe or thread-unsafe. However, we can say that with respect to thread safety that EASTL does the right thing. + +Individual EASTL containers are not thread-safe. That is, access to an instance of a container from multiple threads at the same time is unsafe if any of those accesses are modifying operations. A given container can be read from multiple threads simultaneously as well as any other standalone data structure. If a user wants to be able to have modifying access an instance of a container from multiple threads, it is up to the user to ensure that proper thread synchronization occurs. This usually means using a mutex. + +EASTL classes other than containers are the same as containers with respect to thread safety. EASTL functions (e.g. algorithms) are inherently thread-safe as they have no instance data and operate entirely on the stack. As of this writing, no EASTL function allocates memory and thus doesn't bring thread safety issues via that means. + +The user may well need to be concerned about thread safety with respect to memory allocation. If the user modifies containers from multiple threads, then allocators are going to be accessed from multiple threads. If an allocator is shared across multiple container instances (of the same type of container or not), then mutexes (as discussed above) the user uses to protect access to indivudual instances will not suffice to provide thread safety for allocators used across multiple instances. The conventional solution here is to use a mutex within the allocator if it is exected to be used by multiple threads. + +EASTL uses neither static nor global variables and thus there are no inter-instance dependencies that would make thread safety difficult for the user to implement. + +## Container Design + +All EASTL containers follow a set of consistent conventions. Here we define the prototypical container which has the minimal functionality that all (non-adapter) containers must have. Some containers (e.g. stack) are explicitly adapter containers and thus wrap or inherit the properties of the wrapped container in a way that is implementation specific. + +```cpp +template + +class container + +{ + +public: + + typedef container this_type; + + typedef +T + value_type; + + typedef T* + pointer; + + typedef const T* + const_pointer; + + typedef +T& reference; + + + typedef const +T& const_reference; + + + typedef +ptrdiff_t difference_type; + + + typedef +impl_defined size_type; + + + typedef impl-defined + iterator; + + typedef impl-defined + const_iterator; + + typedef reverse_iterator reverse_iterator; + + typedef reverse_iterator reverse_const_iterator; + + typedef Allocator + allocator_type; + + + +public: + + container(const +allocator_type& allocator = allocator_type()); + + container(const this_type& +x); + + + + this_type& +operator=(this_type& x); + + void swap(this_type& x); + + void reset(); + + + + allocator_type& get_allocator(); + + void set_allocator(allocator_type& allocator); + + + + iterator begin(); + + const_iterator begin() const; + + iterator end(); + + const_iterator end() const; + + + + bool validate() const; + int validate_iterator(const_iterator i) +const; + + +protected: + + allocator_type mAllocator; + +}; + + + +template + +bool operator==(const container& a, const container& b); + + + +template + +bool operator!=(const container& a, const +container& +b); +``` + +Notes: + +* Swapped containers do not swap their allocators. +* Newly constructed empty containers do no memory allocation. Some STL and other container libraries allocate an initial node from the class memory allocator. EASTL containers by design never do this. If a container needs an initial node, that node should be made part of the container itself or be a static empty node object. +* Empty containers (new or otherwise) contain no constructed objects, including those that might be in an 'end' node. Similarly, no user object (e.g. of type T) should be constructed unless required by the design and unless documented in the cotainer/algorithm contract.  +* The reset function is a special extension function which unilaterally resets the container to an empty state without freeing the memory of the contained objects. This is useful for very quickly tearing down a container built into scratch memory. No memory is allocated by reset, and the container has no allocatedmemory after the reset is executed. +* The validate and validate_iterator functions provide explicit container and iterator validation. EASTL provides an option to do implicit automatic iterator and container validation, but full validation (which can be potentially extensive) has too much of a performance cost to execute implicitly, even in a debug build. So EASTL provides these explicit functions which can be called by the user at the appropriate time and in optimized builds as well as debug builds. + +## Allocator Design + +The most significant difference between EASTL and standard C++ STL is that standard STL containers are templated on an allocator class with the interface defined in std::allocator. std::allocator is defined in the C++ standard as this: + +```cpp +// Standard C++ allocator + + + + template + +class allocator + +{ + +public: + + typedef size_t size_type; + + typedef ptrdiff_t difference_type; + + typedef T* pointer; + + typedef const T* const_pointer; + + typedef T& + reference; + + typedef const +T& const_reference; + + typedef T value_type; + + + + template + + struct rebind { typedef allocator other; }; + + + + allocator() throw(); + + allocator(const allocator&) throw(); + + template + + allocator(const allocator&) throw(); + + + + ~allocator() +throw(); + + + + pointer + address(reference x) const; + + const_pointer address(const_reference x) +const; + + pointer allocate(size_type, typename +allocator::const_pointer hint = 0); + + void deallocate(pointer p, +size_type n); + + size_type max_size() const +throw(); + + void construct(pointer p, +const T& val); + + void destroy(pointer +p); + +}; +``` + +Each STL container needs to have an allocator templated on container type T associated with it. The problem with this is that allocators for containers are defined at the class level and not the instance level. This makes it painful to define custom allocators for containers and adds to code bloat. Also, it turns out that the containers don't actually use allocator but instead use allocator\::rebind\::other. Lastly, you cannot access this allocator after the container is constructed. There are some good academic reasons why the C++ standard works this way, but it results in a lot of unnecessary pain and makes concepts like memory tracking much harder to implement. + +What EASTL does is use a more familiar memory allocation pattern whereby there is only one allocator class interface and it is used by all containers. Additionally EASTL containers let you access their allocators and query them, name them, change them, etc. + +EASTL has chosen to make allocators not be copied between containers during container swap and assign operations. This means that if container A swaps its contents with container B, both containers retain their original allocators. Similarly, assigning container A to container B causes container B to retain its original allocator. Containers that are equivalent should report so via operator==; EASTL will do a smart swap if allocators are equal, and a brute-force swap otherwise. + +```cpp +// EASTL allocator + +class allocator +{ +public: +    allocator(const char* pName = NULL); + +    void* allocate(size_t n, int flags = 0); +    void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0); +    void  deallocate(void* p, size_t n); + +    const char* get_name() const; +    void        set_name(const char* pName); +}; + +allocator* GetDefaultAllocator(); +``` + +## Fixed Size Container Design + +EASTL supplies a set of fixed-size containers that the user can use, though the user can also implement their own versions. So in addition to class list there is class fixed_list. The fixed_list class implements a linked list via a fixed-size pool of contiguous memory which has no space overhead (unlike with a regular heap), doesn't cause fragmentation, and allocates very quickly. + +EASTL implements fixed containers via subclasses of regular containers which set the regular container's allocator to point to themselves. Thus the implementation for fixed_list is very tiny and consists of little more than constructor and allocator functions. This design has some advantages but has one small disadvantage. The primary advantages are primarily that code bloat is reduced and that the implementation is simple and the user can easily extend it. The primary disadvantage is that the parent list class ends up with a pointer to itself and thus has 4 bytes that could arguably be saved if system was designed differently. That different design would be to make the list class have a policy template parameter which specifies that it is a fixed pool container. EASTL chose not to follow the policy design because it would complicate the implementation, make it harder for the user to extend the container, and would potentially waste more memory due to code bloat than it would save due to the 4 byte savings it achieves in container instances. + +## Algorithm Design + +EASTL algorithms very much follow the philosophy of standard C++ algorithms, as this philosophy is sound and efficient. One of the primary aspects of algorithms is that they work on iterators and not containers. You will note for example that the find algorithm takes a first and last iterator as arguments and not a container. This has two primary benefits: it allows the user to specify a subrange of the container to search within and it allows the user to apply the find algorithm to sequences that aren't containers (e.g. a C array). + +EASTL algorithms are optimized at least as well as the best STL algorithms found in commercial libraries and are significantly optimized over the algorithms that come with the first-party STLs that come with compilers. Most significantly, EASTL algorithms take advantage of type traits of contained classes and take advantage of iterator types to optimize code generation. For example, if you resize an array of integers (or other "pod" type), EASTL will detect that this can be done with a memcpy instead of a slow object-by-object move as would Micrsoft STL. + +The optimizations found in EASTL algorithms and the supporting code in EASTL type traits consistts of some fairly tricky advanced C++ and while it is fairly easy to read, it requires a C++ expert (language lawyer, really) to implement confidently. The result of this is that it takes more effort to develop and maintain EASTL than it would to maintain a simpler library. However, the performance advantages have been deemed worth the tradeoff. + +## Smart Pointer Design + +EASTL implements the following smart pointer types: + +* shared_ptr +* shared_array +* weak_ptr +* instrusive_ptr +* scoped_ptr +* scoped_array +* linked_ptr +* linked_array + +All but linked_ptr/linked_array are well-known smart pointers from the Boost library. The behaviour of these smart pointers is very similar to those from Boost with two exceptions: + +* EASTL smart pointers allow you to assign an allocator to them. +* EASTL shared_ptr implements deletion via a templated parameter instead of a dynamically allocated virtual member object interface. + +With respect to assigning an allocator, this gives EASTL more control over memory allocation and tracking, as Boost smart pointers unilaterally use global operator new to allocate memory from the global heap. + +With respect to shared_ptr deletion, EASTL's current design of using a templated parameter is questionable, but does have some reason. The advantage is that EASTL avoids a heap allocation, avoids virtual function calls, and avoids templated class proliferation. The disadvantage is that EASTL shared_ptr containers which hold void pointers can't call the destructors of their contained objects unless the user manually specifies a custom deleter template parameter. This is case whereby EASTL is more efficient but less safe. We can revisit this topic in the future if it becomes an issue. + +## list::size is O(n) + +As of this writing, EASTL has three linked list classes: list, slist, and intrusive_list. In each of these classes, the size of the list is not cached in a member size variable. The result of this is that getting the size of a list is not a fast operation, as it requires traversing the list and counting the nodes. We could make the list::size function be fast by having a member mSize variable which tracks the size as we insert and delete items. There are reasons for having such functionality and reasons for not having such functionality. We currently choose to not have a member mSize variable as it would add four bytes to the class, add a tiny amount of processing to functions such as insert and erase, and would only serve to improve the size function, but no others. In the case of intrusive_list, it would do additional harm. The alternative argument is that the C++ standard states that std::list should be an O(1) operation (i.e. have a member size variable), that many C++ standard library list implementations do so, that the size is but an integer which is quick to update, and that many users expect to have a fast size function. In the final analysis, we are developing a library for game development and performance is paramount, so we choose to not cache the list size. The user can always implement a size cache himself. + +## basic_string doesn't use copy-on-write + +The primary benefit of CoW is that it allows for the sharing of string data between two string objects. Thus if you say this: + +```cpp +string a("hello"); +string b(a); +``` + +the "hello" will be shared between a and b. If you then say this: + +```cpp +a = "world"; +``` + +then `a` will release its reference to "hello" and leave b with the only reference to it. Normally this functionality is accomplished via reference counting and with atomic operations or mutexes. + +The C++ standard does not say anything about basic_string and CoW. However, for a basic_string implementation to be standards-conforming, a number of issues arise which dictate some things about how one would have to implement a CoW string. The discussion of these issues will not be rehashed here, as you can read the references below for better detail than can be provided in the space we have here. However, we can say that the C++ standard is sensible and that anything we try to do here to allow for an efficient CoW implementation would result in a generally unacceptable string interface. + +The disadvantages of CoW strings are: + +* A reference count needs to exist with the string, which increases string memory usage. +* With thread safety, atomic operations and mutex locks are expensive, especially on weaker memory systems such as console gaming platforms. +* All non-const string accessor functions need to do a sharing check the the first such check needs to detach the string. Similarly, all string assignments need to do a sharing check as well. If you access the string before doing an assignment, the assignment doesn't result in a shared string, because the string has already been detached. +* String sharing doesn't happen the large majority of the time. In some cases, the total sum of the reference count memory can exceed any memory savings gained by the strings that share representations.  + +The addition of a cow_string class is under consideration for EASTL. There are conceivably some systems which have string usage patterns which would benefit from CoW sharing. Such functionality is best saved for a separate string implementation so that the other string uses aren't penalized. + +This is a good starting HTML reference on the topic: + +> [http://www.gotw.ca/publications/optimizations.htm](http://www.gotw.ca/publications/optimizations.htm) + +Here is a well-known Usenet discussion on the topic: + +> [http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d](http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d) + +---------------------------------------------- +End of document diff --git a/doc/EASTL.natvis b/doc/EASTL.natvis new file mode 100644 index 0000000..39b3dba --- /dev/null +++ b/doc/EASTL.natvis @@ -0,0 +1,541 @@ + + + + + + + + ({(void*)mPair.mFirst} = {*mPair.mFirst}) + ({nullptr}) + + (void*)mPair.mFirst + *mPair.mFirst + + + + + ({(void*)mpValue} = {*mpValue}) + ({nullptr}) + + (void*)mpValue + *mpValue + mpRefCount->mRefCount + mpRefCount->mWeakRefCount + + + + + {((mpRefCount && mpRefCount->mRefCount) ? mpValue : nullptr)} + + mpRefCount && mpRefCount->mRefCount ? mpValue : nullptr + + + + + [{$T2}] {{}} + [{$T2}] {{ {*mValue} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)}, {*(mValue+5)} }} + [{$T2}] {{ {*mValue}, {*(mValue+1)}, {*(mValue+2)}, {*(mValue+3)}, {*(mValue+4)}, {*(mValue+5)}, ... }} + + $T2 + + $T2 + mValue + + + + + + "{mPair.mFirst.heap.mpBegin,sb}" + "{mPair.mFirst.sso.mData,sb}" + + mPair.mFirst.heap.mnSize + (mPair.mFirst.heap.mnCapacity & ~kHeapMask) + mPair.mFirst.heap.mpBegin,sb + + mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize + SSOLayout::SSO_CAPACITY + mPair.mFirst.sso.mData,sb + + !!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize & kSSOMask) + + + + + + {mPair.mFirst.heap.mpBegin,su} + {mPair.mFirst.sso.mData,su} + + mPair.mFirst.heap.mnSize + (mPair.mFirst.heap.mnCapacity & ~kHeapMask) + mPair.mFirst.heap.mpBegin,su + + mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize + SSOLayout::SSO_CAPACITY + mPair.mFirst.sso.mData,su + + !!(mPair.mFirst.sso.mRemainingSizeField.mnRemainingSize & kSSOMask) + + + + + ({first}, {second}) + + first + second + + + + + [{mpEnd - mpBegin}] {{}} + [{mpEnd - mpBegin}] {{ {*mpBegin} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)}, {*(mpBegin+5)} }} + [{mpEnd - mpBegin}] {{ {*mpBegin}, {*(mpBegin+1)}, {*(mpBegin+2)}, {*(mpBegin+3)}, {*(mpBegin+4)}, {*(mpBegin+5)}, ... }} + + mpEnd - mpBegin + mCapacityAllocator.mFirst - mpBegin + + mpEnd - mpBegin + mpBegin + + + + + + + [0] {{}} + + + [1] {{ {*mItBegin.mpCurrent} }} + + + [{(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin)}] + {{ + {*mItBegin.mpCurrent}, + ... + }} + + + (mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin) + + (mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) * $T3 + (mItEnd.mpCurrent-mItEnd.mpBegin) - (mItBegin.mpCurrent-mItBegin.mpBegin) + mItBegin.mpCurrentArrayPtr[(mItBegin.mpCurrent-mItBegin.mpBegin + $i) / $T3][(mItBegin.mpCurrent-mItBegin.mpBegin + $i) % $T3] + + + + + + {*mpCurrent} + + *mpCurrent + *(*(mpCurrentArrayPtr-1) + (mpEnd-mpBegin) - 1) + *(mpCurrent-1) + **(mpCurrentArrayPtr+1) + *(mpCurrent+1) + mpCurrent == mpBegin + mpCurrent+1 == mpEnd + + + + + + + {c} + + c + + + + + + [0] {{}} + + + [1] {{ {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext)->mValue} }} + + + [2] + {{ + {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext)->mValue}, + {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext->mpNext)->mValue} + }} + + + [?] + {{ + {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext)->mValue}, + {((eastl::ListNode<$T1>*)mNodeAllocator.mFirst.mpNext->mpNext)->mValue}, + ... + }} + + + + Content of lists will repeat indefinitely. Keep that in mind! + + + mNodeAllocator.mFirst.mpNext + mpNext + ((eastl::ListNode<$T1>*)this)->mValue + + + + + + {mValue} + + mValue + *(eastl::ListNode<$T1>*)mpNext + *(eastl::ListNode<$T1>*)mpPrev + + Content of lists will repeat indefinitely. Keep that in mind! + + + The rest of the list follows: + + + (eastl::ListNode<$T1>*)mpNext->mpNext + (eastl::ListNode<$T1>*)mpNext + mValue + + + + + + {*mpNode} + + mpNode + + + + + + [0] {{}} + + + [1] + {{ + {((eastl::SListNode<$T1>*)mNode.mpNext)->mValue} + }} + + + [2] + {{ + {((eastl::SListNode<$T1>*)mNode.mpNext)->mValue}, + {((eastl::SListNode<$T1>*)mNode.mpNext->mpNext)->mValue} + }} + + + [?] + {{ + {((eastl::SListNode<$T1>*)mNode.mpNext)->mValue}, + {((eastl::SListNode<$T1>*)mNode.mpNext->mpNext)->mValue}, + ... + }} + + + + mNode.mpNext + mpNext + ((eastl::SListNode<$T1>*)this)->mValue + + + + + + {mValue} + + mValue + *(eastl::SListNode<$T1>*)mpNext + + The rest of the list follows: + + + mpNext == nullptr ? nullptr : (eastl::SListNode<$T1>*)mpNext->mpNext + (eastl::SListNode<$T1>*)mpNext + mValue + + + + + + {*mpNode} + + *mpNode + + + + + [0] {{}} + [1] {{ {mAnchor.mpNext} }} + [?] {{ {mAnchor.mpNext}, ... }} + + + Content of intrusive lists will repeat indefinitely. Keep that in mind! + + + mAnchor.mpNext + mpNext + *this + + + + + + {*mpNode} + + *mpNode + + + + + + + [0] {{}} + + + [1] + {{ + {((eastl::rbtree_node<$T1>*)mAnchor.mpNodeLeft)->mValue} + }} + + + [{mnSize}] + {{ + {((eastl::rbtree_node<$T1>*)mAnchor.mpNodeLeft)->mValue}, + ... + }} + + + mnSize + + mnSize + mAnchor.mpNodeParent + mpNodeLeft + mpNodeRight + ((eastl::rbtree_node<$T1>*)this)->mValue + + + + + + + [0] {{}} + + + [1] + {{ + {((eastl::rbtree_node<$T2>*)mAnchor.mpNodeLeft)->mValue} + }} + + + [{mnSize}] + {{ + {((eastl::rbtree_node<$T2>*)mAnchor.mpNodeLeft)->mValue}, + ... + }} + + + mnSize + + mnSize + mAnchor.mpNodeParent + mpNodeLeft + mpNodeRight + ((eastl::rbtree_node<$T2>*)this)->mValue + + + + + + {mValue} + + mValue + + It is possible to expand parents that do not exist. + + *(eastl::rbtree_node<$T2>*)(mpNodeParent.value & (~uintptr_t(1))) + *(eastl::rbtree_node<$T2>*)mpNodeLeft + *(eastl::rbtree_node<$T2>*)mpNodeRight + + + + + {*mpNode} + + mpNode + + + + + + [{mnElementCount}] {{}} + [{mnElementCount}] {{ ... }} + + + mnBucketCount + mpBucketArray + + + + + + {mValue}, {*mpNext} + {mValue} + + + + this + mpNext + mValue + + + + + + {mpNode->mValue} + + mpNode->mValue + + + + + {*(mIterator-1)} + + mIterator-1 + + + + + {{count = {kSize}}} + + kSize + + + + + + kSize + + + bBitValue = ((mWord[iWord] >> iBitInWord) % 2) != 0 ? true : false + bBitValue + iBitInWord++ + + iWord++ + iBitInWord = 0 + + + + + + + + {c} + + c + + + + + {mpBegin,[mnCount]} + mpBegin,[mnCount] + + + + ({mFirst}, {mSecond}) + ({mSecond}) + ({mFirst}) + (empty) + (empty) + ({mFirst}, {mSecond}) + + + + + nullopt + {value()} + + value() + + + + + {$T1} to {$T2}} + + + + + {mRep} nanoseconds + + + + {mRep} microseconds + + + + {mRep} milliseconds + + + + {mRep} seconds + + + + {mRep} minutes + + + + {mRep} hours + + + + {mRep} duration with ratio = [{$T2} : {$T3}] + + + + + + empty + {mInvokeFuncPtr} + + + + + {*val} + + + + + empty + {m_storage.external_storage} + + + + + + + diff --git a/doc/FAQ.md b/doc/FAQ.md new file mode 100644 index 0000000..e80172b --- /dev/null +++ b/doc/FAQ.md @@ -0,0 +1,2290 @@ +# EASTL FAQ + +We provide a FAQ (frequently asked questions) list here for a number of commonly asked questions about EASTL and STL in general. Feel free to suggest new FAQ additions based on your own experience. + +## Information + +1. [What is EASTL?](#info1-what-is-eastl) +2. [What uses are EASTL suitable for?](#info2-what-uses-are-eastl-suitable-for) +3. [How does EASTL differ from standard C++ STL?](#info3-how-does-eastl-differ-from-standard-c-stl) +4. [Is EASTL thread-safe?](#info4-is-eastl-thread-safe) +5. [What platforms/compilers does EASTL support?](#info5-what-platformscompilers-does-eastl-support) +6. [Why is there EASTL when there is the STL?](#info6-why-is-there-eastl-when-there-is-the-stl) +7. [Can I mix EASTL with standard C++ STL?](#info7-can-i-mix-eastl-with-standard-c-stl) +8. [Where can I learn more about STL and EASTL?](#info8-where-can-i-learn-more-about-stl-and-eastl) +9. [What is the legal status of EASTL?](#info9-what-is-the-legal-status-of-eastl) +10. [Does EASTL deal with compiler exception handling settings?](#info10-does-eastl-deal-with-compiler-exception-handling-settings) +11. [What C++ language features does EASTL use (e.g. virtual functions)?](#info11-what-c-language-features-does-eastl-use-eg-virtual-functions) +12. [What compiler warning levels does EASTL support?](#info12-what-compiler-warning-levels-does-eastl-support) +13. [Is EASTL compatible with Lint?](#info13-is-eastl-compatible-with-lint) +14. [What compiler settings do I need to compile EASTL?](#info14-what-compiler-settings-do-i-need-to-compile-eastl) +15. [How hard is it to incorporate EASTL into my project?](#info15-how-hard-is-it-to-incorporate-eastl-into-my-project) +16. [Should I use EASTL instead of std STL or instead of my custom library?](#info16-should-i-use-eastl-instead-of-std-stl-or-instead-of-my-custom-library) +17. [I think I've found a bug. What do I do?](#info17-i-think-ive-found-a-bug-what-do-i-do) +18. [Can EASTL be used by third party EA developers?](#info18-can-eastl-be-used-by-third-party-ea-developers) + +## Performance + +1. [How efficient is EASTL compared to standard C++ STL implementations?](#perf1-how-efficient-is-eastl-compared-to-standard-c-stl-implementations) +2. [How efficient is EASTL in general?](#perf2-how-efficient-is-eastl-in-general) +3. [Strings don't appear to use the "copy-on-write" optimization. Why not?](#perf3-strings-dont-appear-to-use-the-copy-on-write-cow-optimization-why-not) +4. [Does EASTL cause code bloat, given that it uses templates?](#perf4-does-eastl-cause-code-bloat-given-that-it-uses-templates) +5. [Don't STL and EASTL containers fragment memory?](#perf5-dont-stl-and-eastl-containers-fragment-memory) +6. [I don't see container optimizations for equivalent scalar types such as pointer types. Why?](#perf6-i-dont-see-container-optimizations-for-equivalent-scalar-types-such-as-pointer-types-why) +7. [I've seen some STL's provide a default quick "node allocator" as the default allocator. Why doesn't EASTL do this?](#perf7-ive-seen-some-stls-provide-a-default-quick-node-allocator-as-the-default-allocator-why-doesnt-eastl-do-this) +8. [Templates sometimes seem to take a long time to compile. Why do I do about that?](#perf8-templates-sometimes-seem-to-take-a-long-time-to-compile-why-do-i-do-about-that) +9. [How do I assign a custom allocator to an EASTL container?](#cont8-how-do-i-assign-a-custom-allocator-to-an-eastl-container) +10. [How well does EASTL inline?](#perf10-how-well-does-eastl-inline) +11. [How do I control function inlining?](#perf11-how-do-i-control-function-inlining) +12. [C++ / EASTL seems to bloat my .obj files much more than C does.](#perf12-c--eastl-seems-to-bloat-my-obj-files-much-more-than-c-does) +13. [What are the best compiler settings for EASTL?](#perf13-what-are-the-best-compiler-settings-for-eastl) + +## Problems + +1. [I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?](#prob1-im-getting-screwy-behavior-in-sorting-algorithms-or-sorted-containers-whats-wrong) +2. [I am getting compiler warnings (e.g. C4244, C4242 or C4267) that make no sense. Why?](#prob2-i-am-getting-compiler-warnings-eg-c4244-c4242-or-c4267-that-make-no-sense-why) +3. [I am getting compiler warning C4530, which complains about exception handling and "unwind semantics." What gives?](#prob3-i-am-getting-compiler-warning-c4530-which-complains-about-exception-handling-and-unwind-semantics-what-gives) +4. [Why are tree-based containers hard to read with a debugger?](#prob4-why-are-tree-based-eastl-containers-hard-to-read-with-a-debugger) +5. [The EASTL source code is sometimes rather complicated looking. Why is that?](#prob5-the-eastl-source-code-is-sometimes-rather-complicated-looking-why-is-that) +6. [When I get compilation errors, they are very long and complicated looking. What do I do?](#prob6-when-i-get-compilation-errors-they-are-very-long-and-complicated-looking-what-do-i-do) +7. [Templates sometimes seem to take a long time to compile. Why do I do about that?](#prob7-templates-sometimes-seem-to-take-a-long-time-to-compile-why-do-i-do-about-that) +8. [I get the compiler error: "template instantiation depth exceeds maximum of 17. use -ftemplate-depth-NN to increase the maximum"](#prob8-i-get-the-compiler-error-template-instantiation-depth-exceeds-maximum-of-17-use--ftemplate-depth-nn-to-increase-the-maximum) +9. [I'm getting errors about min and max while compiling.](#prob9-im-getting-errors-about-min-and-max-while-compiling) +10. [C++ / EASTL seems to bloat my .obj files much more than C does.](#prob10-c--eastl-seems-to-bloat-my-obj-files-much-more-than-c-does) +11. [I'm getting compiler errors regarding operator new being previously defined.](#prob11-im-getting-compiler-errors-regarding-placement-operator-new-being-previously-defined) +12. [I'm getting errors related to wchar_t string functions such as wcslen().](#prob12-im-getting-errors-related-to-wchar_t-string--functions-such-as-wcslen) +13. [I'm getting compiler warning C4619: there is no warning number Cxxxx (e.g. C4217).](#prob13-im-getting-compiler-warning-c4619-there-is-no-warning-number-cxxxx-eg-c4217) +14. [My stack-based fixed_vector is not respecting the object alignment requirements.](#prob14-my-stack-based-fixed_vector-is-not-respecting-the-object-alignment-requirements) +15. [I am getting compiler errors when using GCC under XCode (Macintosh/iphone).](#prob15-i-am-getting-compiler-errors-when-using-gcc-under-xcode-macintoshiphone) +16. [I am getting linker errors about Vsnprintf8 or Vsnprintf16.](#prob16-i-am-getting-linker-errors-about-vsnprintf8-or-vsnprintf16) +17. [I am getting compiler errors about UINT64_C or UINT32_C.](#prob17-i-am-getting-compiler-errors-about-uint64_c-or-uint32_c) +18. [I am getting a crash with a global EASTL container.](#prob18-i-am-getting-a-crash-with-a-global-eastl-container) +19. [Why doesn't EASTL support passing NULL to functions with pointer arguments?](#prob19-why-doesnt-eastl-support-passing-null-string-functions) + +## Debug + +1. [How do I get VC++ mouse-overs to view templated data?](#debug1-how-do-i-set-the-vc-debugger-to-display-eastl-container-data-with-tooltips) +2. [How do I view containers if the visualizer/tooltip support is not present?](#debug2-how-do-i-view-containers-if-the-visualizertooltip-support-is-not-present) +3. [The EASTL source code is sometimes rather complicated looking. Why is that?](#debug3-the-eastl-source-code-is-sometimes-rather-complicated-looking-why-is-that) +4. [When I get compilation errors, they are very long and complicated looking. What do I do?](#debug4-when-i-get-compilation-errors-they-are-very-long-and-complicated-looking-what-do-i-do) +5. [How do I measure hash table balancing?](#debug5-how-do-i-measure-hash-table-balancing) + +## Containers + +1. [Why do some containers have "fixed" versions (e.g. fixed_list) but others(e.g. deque) don't have fixed versions?](#cont1-why-do-some-containers-have-fixed-versions-eg-fixed_list-but-otherseg-deque-dont-have-fixed-versions) +2. [Can I mix EASTL with standard C++ STL?](#cont2-can-i-mix-eastl-with-standard-c-stl) +3. [Why are there so many containers?](#cont3-why-are-there-so-many-containers) +4. [Don't STL and EASTL containers fragment memory?](#cont4-dont-stl-and-eastl-containers-fragment-memory) +5. [I don't see container optimizations for equivalent scalar types such as pointer types. Why?](#cont5-i-dont-see-container-optimizations-for-equivalent-scalar-types-such-as-pointer-types-why) +6. [What about alternative container and algorithm implementations (e.g. treaps, skip lists, avl trees)?](#cont6-what-about-alternative-container-and-algorithm-implementations-eg-treaps-skip-lists-avl-trees) +7. [Why are containers hard to read with a debugger?](#cont7-why-are-tree-based-eastl-containers-hard-to-read-with-a-debugger) +8. [How do I assign a custom allocator to an EASTL container?](#cont8-how-do-i-assign-a-custom-allocator-to-an-eastl-container) +9. [How do I set the VC++ debugger to display EASTL container data with tooltips?](#cont9-how-do-i-set-the-vc-debugger-to-display-eastl-container-data-with-tooltips) +10. [How do I use a memory pool with a container?](#cont10-how-do-i-use-a-memory-pool-with-a-container) +11. [How do I write a comparison (operator<()) for a struct that contains two or more members?](#cont11-how-do-i-write-a-comparison-operator-for-a-struct-that-contains-two-or-more-members) +12. [Why doesn't container X have member function Y?](#cont12-why-doesnt-container-x-have-member-function-y) +13. [How do I search a hash_map of strings via a char pointer efficiently? If I use map.find("hello") it creates a temporary string, which is inefficient.](#cont13-how-do-i-search-a-hash_map-of-strings-via-a-char-pointer-efficiently-if-i-use-mapfindhello-it-creates-a-temporary-string-which-is-inefficient) +14. [Why are set and hash_set iterators const (i.e. const_iterator)?](#cont14-why-are-set-and-hash_set-iterators-const-ie-const_iterator) +15. [How do I prevent my hash container from re-hashing?](#cont15-how-do-i-prevent-my-hash-container-from-re-hashing) +16. [Which uses less memory, a map or a hash_map?](#cont16-which-uses-less-memory-a-map-or-a-hash_map) +17. [How do I write a custom hash function?](#cont17-how-do-i-write-a-custom-hash-function) +18. [How do I write a custom compare function for a map or set?](#cont18-how-do-i-write-a-custom-compare-function-for-a-map-or-set) +19. [How do I force my vector or string capacity down to the size of the container?](#cont19-how-do-i-force-my-vector-or-string-capacity-down-to-the-size-of-the-container) +20. [How do I iterate a container while (selectively) removing items from it?](#cont20-how-do-i-iterate-a-container-while-selectively-removing-items-from-it) +21. [How do I store a pointer in a container?](#cont21-how-do-i-store-a-pointer-in-a-container) +22. [How do I make a union of two containers? difference? intersection?](#cont22-how-do-i-make-a-union-of-two-containers-difference-intersection) +23. [How do I override the default global allocator?](#cont23-how-do-i-override-the-default-global-allocator) +24. [How do I do trick X with the string container?](#cont24-how-do-i-do-trick-x-with-the-string-container) +25. [How do EASTL smart pointers compare to Boost smart pointers?](#cont25-how-do-eastl-smart-pointers-compare-to-boost-smart-pointers) +26. [How do your forward-declare an EASTL container?](#cont26-how-do-your-forward-declare-an-eastl-container) +27. [How do I make two containers share a memory pool?](#cont27-how-do-i-make-two-containers-share-a-memory-pool) +28. [Can I use a std (STL) allocator with EASTL?](#cont28-can-i-use-a-std-stl-allocator-with-eastl) +29. [What are the requirements of classes stored in containers?](#what-are-the-requirements-of-classes-stored-in-containers) + +## Algorithms + +1. [I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?](#algo1-im-getting-screwy-behavior-in-sorting-algorithms-or-sorted-containers-whats-wrong) +2. [How do I write a comparison (operator<()) for a struct that contains two or more members?](#algo2-how-do-i-write-a-comparison-operator-for-a-struct-that-contains-two-or-more-members) +3. [How do I sort something in reverse order?](#algo3-how-do-i-sort-something-in-reverse-order) +4. [I'm getting errors about min and max while compiling.](#algo4-im-getting-errors-about-min-and-max-while-compiling) +5. [Why don't algorithms take a container as an argument instead of iterators? A container would be more convenient.](#algo5-why-dont-algorithms-take-a-container-as-an-argument-instead-of-iterators-a-container-would-be-more-convenient) +6. [Given a container of pointers, how do I find an element by value (instead of by pointer)?](#algo6-given-a-container-of-pointers-how-do-i-find-an-element-by-value-instead-of-by-pointer) +7. [When do stored objects need to support opertor < vs. when do they need to support operator ==?](#algo7-when-do-stored-objects-need-to-support-operator--vs-when-do-they-need-to-support-operator-) +8. [How do I sort via pointers or array indexes instead of objects directly?](#algo8-how-do-i-sort-via-pointers-or-array-indexes-instead-of-objects-directly) + +## Iterators + +1. [What's the difference between iterator, const iterator, and const_iterator?](#iter1-whats-the-difference-between-iterator-const-iterator-and-const_iterator) +2. [How do I tell from an iterator what type of thing it is iterating?](#iter2-how-do-i-tell-from-an-iterator-what-type-of-thing-it-is-iterating) +3. [How do I iterate a container while (selectively) removing items from it?](#iter3-how-do-i-iterate-a-container-while-selectively-removing-items-from-it) +4. [What is an insert_iterator?](#iter4-what-is-an-insert_iterator) + +## Information + +### Info.1 What is EASTL? + +EASTL refers to "EA Standard Template Library." It is a C++ template library that is analogous to the template facilities of the C++ standard library, which are often referred to as the STL. EASTL consists of the following systems: + +* Containers +* Iterators +* Algorithms +* Utilities +* Smart pointers +* Type traits + +Of these, the last two (smart pointers and type traits) do not have analogs in standard C++. With respect to the other items, EASTL provides extensions and optimizations over the equivalents in standard C++ STL. + +EASTL is a professional-level implementation which outperforms commercial implementations (where functionality overlaps) and is significantly easier to read and debug. + +### Info.2 What uses are EASTL suitable for? + +EASTL is suitable for any place where templated containers and algorithms would be appropriate. Thus any C++ tools could use it and many C++ game runtimes could use it, especially 2005+ generation game platforms. EASTL has optimizations that make it more suited to the CPUs and memory systems found on console platforms. Additionally, EASTL has some type-traits and iterator-traits-derived template optimizations that make it generally more efficient than home-brew templated containers. + +### Info.3 How does EASTL differ from standard C++ STL? + +There are three kinds of ways that EASTL differs from standard STL: + +* EASTL equivalents to STL sometimes differ. +* EASTL implementations sometimes differ from STL implementations of the same thing. +* EASTL has functionality that doesn't exist in STL. + +With respect to item #1, the changes are such that they benefit game development and and not the type that could silently hurt you if you were more familiar with STL interfaces. + +With respect to item #2, where EASTL implementations differ from STL implementations it is almost always due to improvements being made in the EASTL versions or tradeoffs being made which are considered better for game development. + +With respect to item #3, there are a number of facilities that EASTL has that STL doesn't have, such as intrusive_list and slist containers, smart pointers, and type traits. All of these are facilities that assist in making more efficient game code and data. + +Ways in which EASTL is better than standard STL: + +* Has higher performance in release builds, sometimes dramatically so. +* Has significantly higher performance in debug builds, due to less call overhead. +* Has extended per-container functionality, particularly for game development. +* Has additional containers that are useful for high performance game development. +* Is easier to read, trace, and debug. +* Memory allocation is much simpler and more controllable. +* Has higher portability, as there is a single implementation for all platforms. +* Has support of object alignment, whereas such functionality is not natively supported by STL. +* We have control over it, so we can modify it as we like. +* Has stricter standards for container design and behavior, particularly as this benefits game development. + +Ways in which EASTL is worse than standard STL: + +* Standard STL implementations are currently very reliable and weather-worn, whereas EASTL is less tested. +* Standard STL is automatically available with just about every C++ compiler vendor's library. +* Standard STL is supported by the compiler vendor and somewhat by the Internet community. + +#### EASTL coverage of std STL + +* list +* vector +* deque +* string +* set +* multiset +* map +* multimap +* bitset +* queue +* stack +* priority_queue +* memory +* numeric +* algorithm (all but inplace_merge, prev_permutation, next_permutation, nth_element, includes, unique_copy) +* utility +* functional +* iterator + +EASTL additions/amendments to std STL + +* allocators work in a simpler way. +* exception handling can be disabled. +* all containers expose/declare their node size, so you can make a node allocator for them. +* all containers have reset(), which unilaterally forgets their contents. +* all containers have validate() and validate_iterator() functions. +* all containers understand and respect object alignment requirements. +* all containers guarantee no memory allocation upon being newly created as empty. +* all containers and their iterators can be viewed in a debugger (no other STL does this, believe it or not). +* linear containers guarantee linear memory. +* vector has push_back(void). +* vector has a data() function. +* vector is actually a vector of type bool. +* vector and string have set_capacity(). +* string has sprintf(), append_sprintf(), trim(), compare_i(), make_lower(), make_upper(). +* deque allows you to specify the subarray size. +* list has a push_back(void) and push_back(void) function. +* hash_map, hash_set, etc. have find_as(). + +EASTL coverage of TR1 (tr1 refers to proposed additions for the next C++ standard library, ~2008) + +* array +* type_traits (there are about 30 of these) +* unordered_set (EASTL calls it hash_set) +* unordered_multiset +* unordered_map +* unordered_multimap +* shared_ptr, shared_array, weak_ptr, scoped_ptr, scoped_array, intrusive_ptr + +EASTL additional functionality (not found elsewhere) + +* fixed_list +* fixed_slist +* fixed_vector +* fixed_string +* fixed_substring +* fixed_set +* fixed_multiset +* fixed_map +* fixed_multimap +* fixed_hash_set +* fixed_hash_multiset +* fixed_hash_map +* fixed_hash_multimap +* vector_set +* vector_multiset +* vector_map +* vector_multimap +* intrusive_list +* intrusive_slist +* intrusive_sdlist +* intrusive_hash_set +* intrusive_hash_multiset +* intrusive_hash_map +* intrusive_hash_multimap +* slist (STLPort's STL has this) +* heap +* linked_ptr, linked_array +* sparse_matrix (this is not complete as of this writing) +* ring_buffer +* compressed_pair +* call_traits +* binary_search_i, change_heap, find_first_not_of, find_last_of, find_last_not_of, identical +* comb_sort, bubble_sort, selection_sort, shaker_sort, bucket_sort +* equal_to_2, not_equal_to_2, str_equal_to, str_equal_to_i + +### Info.4 Is EASTL thread-safe? + +It's not simple enough to simply say that EASTL is thread-safe or thread-unsafe. However, we can say that with respect to thread safety that EASTL does the right thing. + +Individual EASTL containers are not thread-safe. That is, access to an instance of a container from multiple threads at the same time is unsafe if any of those accesses are modifying operations. A given container can be read from multiple threads simultaneously as well as any other standalone data structure. If a user wants to be able to have modifying access an instance of a container from multiple threads, it is up to the user to ensure that proper thread synchronization occurs. This usually means using a mutex. + +EASTL classes other than containers are the same as containers with respect to thread safety. EASTL functions (e.g. algorithms) are inherently thread-safe as they have no instance data and operate entirely on the stack. As of this writing, no EASTL function allocates memory and thus doesn't bring thread safety issues via that means. + +The user may well need to be concerned about thread safety with respect to memory allocation. If the user modifies containers from multiple threads, then allocators are going to be accessed from multiple threads. If an allocator is shared across multiple container instances (of the same type of container or not), then mutexes (as discussed above) the user uses to protect access to individual instances will not suffice to provide thread safety for allocators used across multiple instances. The conventional solution here is to use a mutex within the allocator if it is expected to be used by multiple threads. + +EASTL uses neither static nor global variables and thus there are no inter-instance dependencies that would make thread safety difficult for the user to implement. + +### Info.5 What platforms/compilers does EASTL support? + +EASTL's support depends entirely on the compiler and not on the platform. EASTL works on any C++ compiler that completely conforms the C++ language standard. Additionally, EASTL is 32 bit and 64 bit compatible. Since EASTL does not use the C or C++ standard library (with a couple small exceptions), it doesn't matter what kind of libraries are provided (or not provided) by the compiler vendor. However, given that we need to work with some compilers that aren't 100% conforming to the language standard, it will be useful to make a list here of these that are supported and those that are not: + +| Compiler | Status | Notes | +|---------|--------|-------| +| GCC 2.9x | Supported | However, GCC 2.9x has some issues that you may run into that cause you to use EASTL facilities differently than a fully compliant compiler would allow. | +| GCC 3.x+ | Supported | This compiler is used by the Mac OSX, and Linux platforms. | +| MSVC 6.0 | Not supported | This compiler is too weak in the area of template and namespace support. | +| MSVC 7.0+ | Supported | This compiler is used by the PC and Win CE platforms | +| Borland 5.5+ | Not supported | Borland can successfully compile many parts of EASTL, but not all parts. | +| EDG | Supported | This is the compiler front end to some other compilers, such as Intel, and Comeau C++. | +| IBM XL 5.0+ | Supported | This compiler is sometimes used by PowerPC platforms such as Mac OSX and possibly future console platforms. | + +### Info.6 Why is there EASTL when there is the STL? + +The STL is largely a fine library for general purpose C++. However, we can improve upon it for our uses and gain other advantages as well. The primary motivations for the existence of EASTL are the following: + +* Some STL implementations (especially Microsoft STL) have inferior performance characteristics that make them unsuitable for game development. EASTL is faster than all existing STL implementations. +* The STL is sometimes hard to debug, as most STL implementations use cryptic variable names and unusual data structures. +* STL allocators are sometimes painful to work with, as they have many requirements and cannot be modified once bound to a container. +* The STL includes excess functionality that can lead to larger code than desirable. It's not very easy to tell programmers they shouldn't use that functionality. +* The STL is implemented with very deep function calls. This results is unacceptable performance in non-optimized builds and sometimes in optimized builds as well. +* The STL doesn't support alignment of contained objects. +* STL containers won't let you insert an entry into a container without supplying an entry to copy from. This can be inefficient. +* Useful STL extensions (e.g. slist, hash_map, shared_ptr) found in existing STL implementations such as STLPort are not portable because they don't exist in other versions of STL or aren't consistent between STL versions. +* The STL lacks useful extensions that game programmers find useful (e.g. intrusive_list) but which could be best optimized in a portable STL environment. +* The STL has specifications that limit our ability to use it efficiently. For example, STL vectors are not guaranteed to use contiguous memory and so cannot be safely used as an array. +* The STL puts an emphasis on correctness before performance, whereas sometimes you can get significant performance gains by making things less academcially pure. +* STL containers have private implementations that don't allow you to work with their data in a portable way, yet sometimes this is an important thing to be able to do (e.g. node pools). +* All existing versions of STL allocate memory in empty versions of at least some of their containers. This is not ideal and prevents optimizations such as container memory resets that can greatly increase performance in some situations. +* The STL is slow to compile, as most modern STL implementations are very large. +* There are legal issues that make it hard for us to freely use portable STL implementations such as STLPort. +* We have no say in the design and implementation of the STL and so are unable to change it to work for our needs. +* Note that there isn't actually anything in the C++ standard called "STL." STL is a term that merely refers to the templated portion of the C++ standard library. + +### Info.7 Can I mix EASTL with standard C++ STL? + +This is possible to some degree, though the extent depends on the implementation of C++ STL. One of things that makes interoperability is something called iterator categories. Containers and algorithms recognize iterator types via their category and STL iterator categories are not recognized by EASTL and vice versa. + +Things that you definitely can do: + +* #include both EASTL and standard STL headers from the same .cpp file. +* Use EASTL containers to hold STL containers. +* Construct an STL reverse_iterator from an EASTL iterator. +* Construct an EASTL reverse_iterator from an STL iterator. + +Things that you probably will be able to do, though a given std STL implementation may prevent it: + +* Use STL containers in EASTL algorithms. +* Use EASTL containers in STL algorithms. +* Construct or assign to an STL container via iterators into an EASTL container. +* Construct or assign to an EASTL container via iterators into an STL container. +* +Things that you would be able to do if the given std STL implementation is bug-free: + +* Use STL containers to hold EASTL containers. Unfortunately, VC7.x STL has a confirmed bug that prevents this. Similarly, STLPort versions prior to v5 have a similar but. + +Things that you definitely can't do: + +* Use an STL allocator directly with an EASTL container (though you can use one indirectly). +* Use an EASTL allocator directly with an STL container (though you can use one indirectly). + +### Info.8 Where can I learn more about STL and EASTL? + +EASTL is close enough in philosophy and functionality to standard C++ STL that most of what you read about STL applies to EASTL. This is particularly useful with respect to container specifications. It would take a lot of work to document EASTL containers and algorithms in fine detail, whereas most standard STL documentation applies as-is to EASTL. We won't cover the differences here, as that's found in another FAQ entry. + +That being said, we provide a list of sources for STL documentation that may be useful to you, especially if you are less familiar with the concepts of STL and template programming in general. + +* The SGI STL web site. Includes a good STL reference. +* CodeProject STL introduction. +* Scott Meyers Effective STL book. +* The Microsoft online STL documentation. Microsoft links go bad every couple months, so try searching for STL at the * Microsoft MSDN site. +* The Dinkumware online STL documentation. +* The C++ standard, which is fairly readable. You can buy an electronic version for about $18 and in the meantime you can make do with draft revisions of it off the Internet by searching for "c++ draft standard". +* STL performance tips, by Pete Isensee +* STL algorithms vs. hand-written loops, by Scott Meyers. + +### Info.9 What is the legal status of EASTL? + +EASTL is usable for all uses within Electronic Arts, both for internal usage and for shipping products for all platforms. All source code was written by a single EA engineer. Any externally derived code would be explicitly stated as such and approved by the legal department if such code ever gets introduced. As of EASTL v1.0, the red_black_tree.cpp file contains two functions derived from the original HP STL and have received EA legal approval for usage in any product. + +### Info.10 Does EASTL deal with compiler exception handling settings? + +EASTL has automatic knowledge of the compiler's enabling/disabling of exceptions. If your compiler is set to disable exceptions, EASTL automatically detects so and executes without them. Also, you can force-enable or force-disable that setting to override the automatic behavior by #defining EASTL_EXCEPTIONS_ENABLED to 0 or 1. See EASTL's config.h for more information. + +### Info.11 What C++ language features does EASTL use (e.g. virtual functions)? + +EASTL uses the following C++ language features: + +* Template functions, classes, member functions. +* Multiple inheritance. +* Namespaces. +* Operator overloading. + +EASTL does not use the following C++ language features: + +* Virtual functions / interfaces. +* RTTI (dynamic_cast). +* Global and static variables. There are a couple class static const variables, but they act much like enums. +* Volatile declarations +* Template export. +* Virtual inheritance. + +EASTL may use the following C++ language features: + +* Try/catch. This is an option that the user can enable and it defaults to whatever the compiler is set to use. +* Floating point math. Hash containers have one floating point calculation, but otherwise floating point is not used. + +Notes: + +* EASTL uses rather little of the standard C or C++ library and uses none of the C++ template library (STL) and iostream library. The memcpy family of functions is one example EASTL C++ library usage. +* EASTL never uses global new / delete / malloc / free. All allocations are done via user-specified allocators, though a default allocator definition is available. + + +### Info.12 What compiler warning levels does EASTL support? + +For VC++ EASTL should compile without warnings on level 4, and should compile without warnings for "warnings disabled by default" except C4242, C4514, C4710, C4786, and C4820. These latter warnings are somewhat draconian and most EA projects have little choice but to leave them disabled. + +For GCC, EASTL should compile without warnings with -Wall. Extensive testing beyond that hasn't been done. + +However, due to the nature of templated code generation and due to the way compilers compile templates, unforeseen warnings may occur in user code that may or may not be addressible by modifying EASTL. + +### Info.13 Is EASTL compatible with Lint? + +As of EASTL 1.0, minimal lint testing has occurred. Testing with the November 2005 release of Lint (8.00t) demonstrated bugs in Lint that made its analysisnot very useful. For example, Lint seems to get confused about the C++ typename keyword and spews many errors with code that uses it. We will work with the makers of Lint to get this resolved so that Lint can provide useful information about EASTL. + +### Info.14 What compiler settings do I need to compile EASTL? + +EASTL consists mostly of header files with templated C++ code, but there are also a few .cpp files that need to be compiled and linked in order to use some of the modules. EASTL will compile in just about any environment. As mentioned elsewhere in this FAQ, EASTL can be compiled at the highest warning level of most compilers, transparently deals with compiler exception handling settings, is savvy to most or all compilation language options (e.g. wchar_t is built-in or not, for loop variables are local or not), and has almost no platform-specific or compiler-specific code. For the most part, you can just drop it in and it will work. The primary thing that needs to be in place is that EASTL .cpp files need to be compiled with the same struct padding/alignment settings as other code in the project. This of course is the same for just about any C++ source code library. + +See the Performance section of this FAQ for a discussion of the optimal compiler settings for EASTL performance. + +### Info.15 How hard is it to incorporate EASTL into my project? + +It's probably trivial. + +EASTL has only one dependency: EABase. And EASTL auto-configures itself for most compiler environments and for the most typical configuration choices. Since it is fairly highly warning-free, you won't likely need to modify your compiler warning settings, even if they're pretty stict. EASTL has a few .cpp files which need to be compiled if you want to use the modules associated with those files. You can just compile those files with your regular compiler settings. Alternatively, you can use one of the EASTL project files. + +In its default configuration, the only thing you need to provide to make EASTL work is to define implementations of the following operator new functions: + +```cpp +#include + +void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line); +void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line); +``` +The flags and debugFlags arguments correspond to PPMalloc/RenderWare GeneralAllocator/GeneralAllocatorDebug Malloc equivalents. + +### Info.16 Should I use EASTL instead of std STL or instead of my custom library? + +There are reasons you may want to use EASTL; there are reasons you may not want to use it. Ditto for std STL or any other library. Here we present a list of reasons (+ and -) for why you might want to use one or another. However, it should be noted that while EASTL contains functionality found in std STL, it has another ~40% of functionality not found in std STL, so EASTL and std STL (and whatever other template library you may have) are not mutually exclusive. + +**EASTL** +* \+ Has higher performance than any commercial STL, especially on console platforms. +* \+ Has extended functionality tailored for game development. +* \+ Is highly configurable, and we own it so it can be amended at will. Std STL is owned by a third party committee. +* \+ Is much easier to read and debug than other similar libraries, especiallly std STL. + + +* \- Is highly unit tested, but does not have the same level as std STL. +* \- Is more complicated than many users' lite template libraries, and may put off some beginners. +* \- EASTL + +**Std STL** + +* \+ Is highly portable; your STL code will likely compile and run anywhere. +* \+ Works without the need to install or download any package to use it. It just works. +* \+ Is highly reliable and supported by the compiler vendor. You can have confidence in it. +* \+ Some std STL versions (e.g. STLPort, VC8 STL) have better runtime debug checking than EASTL. + + +* \- Has (sometimes greatly) variable implementations, behavior, and performance between implementations. +* \- Is usually hard to read and debug. +* \- Doesn't support some of the needs of game development, such as aligned allocations, named allocations, intrusive containers, etc. +* \- Is not as efficient as EASTL, especially on console platforms. + +**Your own library** +(please forgive us for implying there may be weaknesses in your libraries) + +* \+ You have control over it and can make it work however you want. +* \+ You can fix bugs in it on the spot and have the fix in your codebase immediately. +* \+ Your own library can be highly integrated into your application code or development environment. + + +* \- Many custom libraries don't have the same level of testing as libraries such as std STL or EASTL. +* \- Many custom libraries don't have the same breadth or depth as std STL or especially EASTL. +* \- Many custom libraries don't have the level of performance tuning that std STL or especially EASTL has. + +### Info.17 I think I've found a bug. What do I do? + +**Verify that you indeed have a bug** + +There are various levels of bugs that can occur, which include the following: + +* Compiler warnings generated by EASTL. +* Compiler errors generated by EASTL (failure to compile well-formed code). +* Runtime misbehavior by EASTL (function does the wrong thing). +* Runtime crash or data corruption by EASTL. +* Mismatch between EASTL documentation and behavior. +* Mismatch between EASTL behavior and user's expections (mis-design). + +Any of the above items can be the fault of EASTL. However, the first four can also be the fault of the user. Your primary goal in verifying a potential bug is to determine if it is an EASTL bug or a user bug. Template errors can sometimes be hard to diagnose. It's probably best if you first show the problem to somebody you know to make sure you are not missing something obvious. Creating a reproducible case may be useful in helping convince yourself, but as is mentioned below, this is not required in order to report the bug. + +**Report the bug** + +The first place to try is the standard EA centralized tech support site. As of this writing (10/2005), that tech site is http://eatech/. Due to the frequent technology churn that seems to occur within Electronic Arts, the bug reporting system in place when you read this may not be the one that was in place when this FAQ entry was written. If the tech site route fails, consider directly contacting the maintainer of the EASTL package. + +In reporting a bug, it is nice if there is a simple reproducible case that can be presented. However, such a case requires time to create, and so you are welcome to initially simply state what you think the bug is without producing a simple reproducible case. It may be that this is a known bug or it may be possible to diagnose the bug without a reproducible case. If more information is needed then the step of trying to produce a reproducible case may be necessary. + +### Info.18 Can EASTL be used by third party EA developers? + +EASTL and other core technologies authored by EA (and not licensed from other companies) can be used in source and binary form by designated 3rd parties. The primary case where there is an issue is if the library contains platform specific code for a platform that the 3rd party is not licensed for. In that case the platform-specific code would need to be removed. This doesn’t apply to EASTL, nor many of the other core tech packages. + +## Performance + +### Perf.1 How efficient is EASTL compared to standard C++ STL implementations? + +With respect to the functionality that is equivalent between EASTL and standard STL, the short answer to this is that EASTL is as at least as efficient as othe STL implementations and in a number of aspects is more so. EASTL has functionality such as intrusive_list and linked_ptr that don't exist in standard STL but are explicitly present to provide significant optimizations over standard STL. + +The medium length answer is that EASTL is significantly more efficient than Dinkumware STL, and Microsoft Windows STL. EASTL is generally more efficient than Metrowerks STL, but Metrowerks has a few tricks up its sleeve which EASTL doesn't currently implement. EASTL is roughly equal in efficiency to STLPort and GCC 3.x+ STL, though EASTL has some optimizations that these do not. + +The long answer requires a breakdown of the functionality between various versions of the STL. + +### Perf.2 How efficient is EASTL in general? + +This question is related to the question, "How efficient are templates?" If you understand the effects of templates then you can more or less see the answer for EASTL. Templates are more efficient than the alternative when they are used appropriately, but can be less efficient than the alternative when used under circumstances that don't call for them. The strength of templates is that the compiler sees all the code and data types at compile time and can often reduce statements to smaller and faster code than with conventional non-templated code. The weakness of templates is that the sometimes produce more code and can result in what is often called "code bloat". However, it's important to note that unused template functions result in no generated nor linked code, so if you have a templated class with 100 functions but you only use one, only that one function will be compiled. + +EASTL is a rather efficient implementation of a template library and pulls many tricks of the trade in terms of squeezing optimal performance out of the compiler. The only way to beat it is to write custom code for the data types you are working with, and even then people are sometimes surprised to find that their hand-implemented algorithm works no better or even worse than the EASTL equivalent. But certainly there are ways to beat templates, especially if you resort to assembly language programming and some kinds of other non-generic tricks. + +### Perf.3 Strings don't appear to use the "copy-on-write" (CoW) optimization. Why not? + +**Short answer** +CoW provides a benefit for a small percentage of uses but provides a disadvantage for the large majority of uses. + +**Long answer** +The primary benefit of CoW is that it allows for the sharing of string data between two string objects. Thus if you say this: + +```cpp +string a("hello"); +string b(a); +``` + +the "hello" will be shared between a and b. If you then say this: + +```cpp +a = "world"; +``` + +then *a* will release its reference to "hello" and leave b with the only reference to it. Normally this functionality is accomplished via reference counting and with atomic operations or mutexes. + +The C++ standard does not say anything about basic_string and CoW. However, for a basic_string implementation to be standards-conforming, a number of issues arise which dictate some things about how one would have to implement a CoW string. The discussion of these issues will not be rehashed here, as you can read the references below for better detail than can be provided in the space we have here. However, we can say that the C++ standard is sensible and that anything we try to do here to allow for an efficient CoW implementation would result in a generally unacceptable string interface. + +The disadvantages of CoW strings are: + +* A reference count needs to exist with the string, which increases string memory usage. +* With thread safety, atomic operations and mutex locks are expensive, especially on weaker memory systems such as console gaming platforms. +* All non-const string accessor functions need to do a sharing check the the first such check needs to detach the string. Similarly, all string assignments need to do a sharing check as well. If you access the string before doing an assignment, the assignment doesn't result in a shared string, because the string has already been detached. +* String sharing doesn't happen the large majority of the time. In some cases, the total sum of the reference count memory can exceed any memory savings gained by the strings that share representations. + +The addition of a cow_string class is under consideration for EASTL. There are conceivably some systems which have string usage patterns which would benefit from CoW sharing. Such functionality is best saved for a separate string implementation so that the other string uses aren't penalized. + +References + +This is a good starting HTML reference on the topic: + http://www.gotw.ca/publications/optimizations.htm + +Here is a well-known Usenet discussion on the topic: + http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d + +### Perf.4 Does EASTL cause code bloat, given that it uses templates? + +The reason that templated functions and classes might cause an increase in code size because each template instantiation theoretically creates a unique piece of code. For example, when you compile this code: + +```cpp +template +const T min(const T a, const T b) + { return b < a ? b : a; } + +int i = min(3, 4); +double d = min(3.0, 4.0); +``` + +the compiler treats it as if you wrote this: + +```cpp +int min(const int a, const int b) + { return b < a ? b : a; } + +double min(const double a, const double b) + { return b < a ? b : a; } +``` + +Imagine this same effect happening with containers such as list and map and you can see how it is that templates can cause code proliferation. + +A couple things offset the possibility of code proliferation: inlining and folding. In practice the above 'min' function would be converted to inlined functions by the compiler which occupy only a few CPU instructions. In many of the simplest cases the inlined version actually occupies less code than the code required to push parameters on the stack and execute a function call. And they will execute much faster as well. + +Code folding (a.k.a. "COMDAT folding", "duplicate stripping", "ICF" / "identical code folding") is a compiler optimization whereby the compiler realizes that two independent functions have compiled to the same code and thus can be reduced to a single function. The Microsoft VC++ compiler (Since VS2005), and GCC (v 4.5+) can do these kinds of optimizations on all platforms. This can result, for example, in all templated containers of pointers (e.g. vector, vector, etc.) to be linked as a single implementation. This folding occurs at a function level and so individual member functions can be folded while other member functions are not. A side effect of this optimization is that you aren't likely to gain much much declaring containers of void* instead of the pointer type actually contained. + +The above two features reduce the extent of code proliferation, but certainly don't eliminate it. What you need to think about is how much code might be generated vs. what your alternatives are. Containers like vector can often inline completely away, whereas more complicated containers such as map can only partially be inlined. In the case of map, if you need an such a container for your Widgets, what alternatives do you have that would be more efficient than instantiating a map? This is up to you to answer. + +It's important to note that C++ compilers will throw away any templated functions that aren't used, including unused member functions of templated classes. However, some argue that by having many functions available to the user that users will choose to use that larger function set rather than stick with a more restricted set. + +Also, don't be confused by syntax bloat vs. code bloat. In looking at templated libraries such as EASTL you will notice that there is sometimes a lot of text in the definition of a template implementation. But the actual underlying code is what you need to be concerned about. + +There is a good Usenet discussion on this topic at: http://groups.google.com/group/comp.lang.c++.moderated/browse_frm/thread/2b00649a935997f5 + +### Perf.5 Don't STL and EASTL containers fragment memory? + +They only fragment memory if you use them in a way that does so. This is no different from any other type of container used in a dynamic way. There are various solutions to this problem, and EASTL provides additional help as well: + +* For vectors, use the reserve function (or the equivalent constructor) to set aside a block of memory for the container. The container will not reallocate memory unless you try grow beyond the capacity you reserve. +* EASTL has "fixed" variations of containers which allow you to specify a fixed block of memory which the container uses for its memory. The container will not allocate any memory with these types of containers and all memory will be cache-friendly due to its locality. +* You can assign custom allocators to containers instead of using the default global allocator. You would typically use an allocator that has its own private pool of memory. +* Where possible, add all a container's elements to it at once up front instead of adding them over time. This avoids memory fragmentation and increase cache coherency. + +### Perf.6 I don't see container optimizations for equivalent scalar types such as pointer types. Why? + +Metrowerks (and no other, as of this writing) STL has some container specializations for type T* which maps them to type void*. The idea is that a user who declares a list of Widget* and a list of Gadget* will generate only one container: a list of void*. As a result, code generation will be smaller. Often this is done only in optimized builds, as such containers are harder to view in debug builds due to type information being lost. + +The addition of this optimization is under consideration for EASTL, though it might be noted that optimizing compilers such as VC++ are already capable of recognizing duplicate generated code and folding it automatically as part of link-time code generation (LTCG) (a.k.a. "whole program optimization"). This has been verified with VC++, as the following code and resulting disassembly demonstrate: + +```cpp +eastl::list intPtrList; +eastl::list toPtrList; + +eastl_size_t n1 = intPtrList.size(); +eastl_size_t n2 = toPtrList.size(); + +0042D288 lea edx,[esp+14h] +0042D28C call eastl::list::size (414180h) +0042D291 push eax +0042D292 lea edx,[esp+24h] +0042D296 call eastl::list::size (414180h) +``` + +Note that in the above case the compiler folded the two implementations of size() into a single implementation. + +### Perf.7 I've seen some STL's provide a default quick "node allocator" as the default allocator. Why doesn't EASTL do this? + +**Short answer** + +This is a bad, misguided idea. + +**Long answer** + +These node allocators implement a heap for all of STL with buckets for various sizes of allocations and implemented fixed-size pools for each of these buckets. These pools are attractive at first because they do well in STL comparison benchmarks, especially when thread safety is disabled. Such benchmarks make it impossible to truly compare STL implementations because you have two different allocators in use and in some cases allocator performance can dominate the benchmark. However, the real problem with these node allocators is that they badly fragment and waste memory. The technical discussion of this topic is outside the scope of this FAQ, but you can learn more about it by researching memory management on the Internet. Unfortunately, the people who implement STL libraries are generally not experts on the topic of memory management. A better approach, especially for game development, is for the user to decide when fixed-size pools are appropriate and use them via custom allocator assignment to containers. + +### Perf.8 Templates sometimes seem to take a long time to compile. Why do I do about that? + +C++ compilers are generally slower than C compilers, and C++ templates are generally slower to compile than regular C++ code. EASTL has some extra functionality (such as type_traits and algorithm specializations) that is not found in most other template libraries and significantly improves performance and usefulness but adds to the amount of code that needs to be compiled. Ironically, we have a case where more source code generates faster and smaller object code. + +The best solution to the problem is to use pre-compiled headers, which are available on all modern ~2002+) compilers, such as VC6.0+, GCC 3.2+, and Metrowerks 7.0+. In terms of platforms this means all 2002+ platforms. + +Some users have been speeding up build times by creating project files that put all the source code in one large .cpp file. This has an effect similar to pre-compiled headers. It can go even faster than pre-compiled headers but has downsides in the way of convenience and portability. + +### Perf.10 How well does EASTL inline? + +EASTL is written in such as way as to be easier to inline than typical templated libraries such as STL. How is this so? It is so because EASTL reduces the inlining depth of many functions, particularly the simple ones. In doing so it makes the implementation less "academic" but entirely correct. An example of this is the vector operator[] function, which is implemented like so with Microsoft STL: + +```cpp +reference operator[](size_type n) { + return *(begin() + n); +} +``` + +EASTL implements the function directly, like so: + +```cpp +reference operator[](size_type n) { + return *(mpBegin + n); +} +``` + +Both implementations are correct, but hte EASTL implementation will run faster in debug builds, be easier to debug, and will be more likely to be inlined when the usage of this function is within a hierarchy of other functions being inlined. It is not so simple to say that the Microsoft version will always inline in an optimized build, as it could be part of a chain and cause the max depth to be exceeded. + +That being said, EASTL appears to inline fairly well under most circumstances, including with GCC, which is the poorest of the compilers in its ability to inline well. + +### Perf.11 How do I control function inlining? + +Inlining is an important topic for templated code, as such code often relies on the compiler being able to do good function inlining for maximum performance. GCC, VC++, and Metrowerks are discussed here. We discuss compilation-level inlining and function-level inling here, though the latter is likely to be of more use to the user of EASTL, as it can externally control how EASTL is inlined. A related topic is GCC's template expansion depth, discussed elsewhere in this FAQ. We provide descriptions of inlining options here but don't currently have any advice on how to best use these with EASTL. + +Compilation-Level Inlining -- VC++ + +VC++ has some basic functionality to control inlining, and the compiler is pretty good at doing aggressive inlining when optimizing on for all platforms. + +> **#pragma inline_depth( [0... 255] )** +> +> Controls the number of times inline expansion can occur by controlling the number of times that a series of function calls can be expanded (from 0 to 255 times). This pragma controls the inlining of functions marked inline and or inlined automatically under the /Ob2 option. The inline_depth pragma controls the number of times a series of function calls can be expanded. For example, if the inline depth is 4, and if A calls B and B then calls C, all three calls will be expanded inline. However, if the closest inline expansion is 2, only A and B are expanded, and C remains as a function call. + +> **#pragma inline_recursion( [{on | off}] )** +> +> Controls the inline expansion of direct or mutually recursive function calls. Use this pragma to control functions marked as inline and or functions that the compiler automatically expands under the /Ob2 option. Use of this pragma requires an /Ob compiler option setting of either 1 or 2. The default state for inline_recursion is off. The inline_recursion pragma controls how recursive functions are expanded. If inline_recursion is off, and if an inline function calls itself (either directly or indirectly), the function is expanded only once. If inline_recursion is on, the function is expanded multiple times until it reaches the value set by inline_depth, the default value of 8, or a capacity limit. + +Compilation-Level Inlining -- GCC + +GCC has a large set of options to control function inlining. Some options are available only in GCC 3.0 and later and thus not present on older platforms. + + +> **-fno-default-inline** +> +> Do not make member functions inline by default merely because they are defined inside the class scope (C++ only). Otherwise, when you specify -O, member functions defined inside class scope are compiled inline by default; i.e., you don't need to add 'inline' in front of the member function name. +> +> **-fno-inline** +> +> Don't pay attention to the inline keyword. Normally this option is used to keep the compiler from expanding any functions inline. Note that if you are not optimizing, no functions can be expanded inline. +> +> **-finline-functions** +> +> Integrate all simple functions into their callers. The compiler heuristically decides which functions are simple enough to be worth integrating in this way. If all calls to a given function are integrated, and the function is declared static, then the function is normally not output as assembler code in its own right. Enabled at level -O3. +> +> **-finline-limit=n** +> +> By default, GCC limits the size of functions that can be inlined. This flag allows the control of this limit for functions that are explicitly marked as inline (i.e., marked with the inline keyword or defined within the class definition in c++). n is the size of functions that can be inlined in number of pseudo instructions (not counting parameter handling). pseudo-instructions are an internal representation of function size. The default value of n is 600. Increasing this value can result in more inlined code at the cost of compilation time and memory consumption. Decreasing usually makes the compilation faster and less code will be inlined (which presumably means slower programs). This option is particularly useful for programs that use inlining heavily such as those based on recursive templates with C++. +> +> Inlining is actually controlled by a number of parameters, which may be specified individually by using --param name=value. The -finline-limit=n option sets some of these parameters as follows: +> +> ``` +> max-inline-insns-single +> is set to n/2. +> max-inline-insns-auto +> is set to n/2. +> min-inline-insns +> is set to 130 or n/4, whichever is smaller. +> max-inline-insns-rtl +> is set to n. +> ``` +> +> See --param below for a documentation of the individual parameters controlling inlining. +> +> **-fkeep-inline-functions** +> +> Emit all inline functions into the object file, even if they are inlined where used. +> +> **--param name=value** +> +> In some places, GCC uses various constants to control the amount of optimization that is done. For example, GCC will not inline functions that contain more that a certain number of instructions. You can control some of these constants on the command-line using the --param option. +> +> max-inline-insns-single +> Several parameters control the tree inliner used in gcc. This number sets the maximum number of instructions (counted in GCC's internal representation) in a single function that the tree inliner will consider for inlining. This only affects functions declared inline and methods implemented in a class declaration (C++). The default value is 450. +> +> max-inline-insns-auto +> When you use -finline-functions (included in -O3), a lot of functions that would otherwise not be considered for inlining by the compiler will be investigated. To those functions, a different (more restrictive) limit compared to functions declared inline can be applied. The default value is 90. +> +>large-function-insns +> The limit specifying really large functions. For functions larger than this limit after inlining inlining is constrained by --param large-function-growth. This parameter is useful primarily to avoid extreme compilation time caused by non-linear algorithms used by the backend. This parameter is ignored when -funit-at-a-time is not used. The default value is 2700. +> +> large-function-growth +> Specifies maximal growth of large function caused by inlining in percents. This parameter is ignored when -funit-at-a-time is not used. The default value is 100 which limits large function growth to 2.0 times the original size. +> +> inline-unit-growth +> Specifies maximal overall growth of the compilation unit caused by inlining. This parameter is ignored when -funit-at-a-time is not used. The default value is 50 which limits unit growth to 1.5 times the original size. +> +> max-inline-insns-recursive +> max-inline-insns-recursive-auto +> Specifies maximum number of instructions out-of-line copy of self recursive inline function can grow into by performing recursive inlining. For functions declared inline --param max-inline-insns-recursive is taken into acount. For function not declared inline, recursive inlining happens only when -finline-functions (included in -O3) is enabled and --param max-inline-insns-recursive-auto is used. The default value is 450. +> +> max-inline-recursive-depth +> max-inline-recursive-depth-auto +> Specifies maximum recursion depth used by the recursive inlining. For functions declared inline --param max-inline-recursive-depth is taken into acount. For function not declared inline, recursive inlining happens only when -finline-functions (included in -O3) is enabled and --param max-inline-recursive-depth-auto is used. The default value is 450. +> +> inline-call-cost +> Specify cost of call instruction relative to simple arithmetics operations (having cost of 1). Increasing this cost disqualify inlinining of non-leaf functions and at same time increase size of leaf function that is believed to reduce function size by being inlined. In effect it increase amount of inlining for code having large abstraction penalty (many functions that just pass the argumetns to other functions) and decrease inlining for code with low abstraction penalty. Default value is 16. +> +> **-finline-limit=n** +> +> By default, GCC limits the size of functions that can be inlined. This flag allows the control of this limit for functions that are explicitly marked as inline (i.e., marked with the inline keyword or defined within the class definition in c++). n is the size of functions that can be inlined in number of pseudo instructions (not counting parameter handling). The default value of n is 600. Increasing this value can result in more inlined code at the cost of compilation time and memory consumption. Decreasing usually makes the compilation faster and less code will be inlined (which presumably means slower programs). This option is particularly useful for programs that use inlining heavily such as those based on recursive templates with C++. + +Inlining is actually controlled by a number of parameters, which may be specified individually by using --param name=value. The -finline-limit=n option sets some of these parameters as follows: + +``` +max-inline-insns-single + is set to n/2. +max-inline-insns-auto + is set to n/2. +min-inline-insns + is set to 130 or n/4, whichever is smaller. +max-inline-insns-rtl + is set to n. +``` + +See below for a documentation of the individual parameters controlling inlining. + +Note: pseudo instruction represents, in this particular context, an abstract measurement of function's size. In no way, it represents a count of assembly instructions and as such its exact meaning might change from one release to an another. + +GCC additionally has the -Winline compiler warning, which emits a warning whenever a function declared as inline was not inlined. + +Compilation-Level Inlining -- Metrowerks + +Metrowerks has a number of pragmas (and corresponding compiler settings) to control inlining. These include always_inline, inline_depth, inline_max_size, and inline max_total_size. + +> ``` +> #pragma always_inline on | off | reset +> ``` +> +> Controls the use of inlined functions. If you enable this pragma, the compiler ignores all inlining limits and attempts to inline all functions where it is legal to do so. This pragma is deprecated. Use the inline_depth pragma instead. +> +> ``` +> #pragma inline_depth(n) +> #pragma inline_depth(smart) +> ``` +> +> Controls how many passes are used to expand inline function. Sets the number of passes used to expand inline function calls. The number n is an integer from 0 to 1024 or the smart specifier. It also represents the distance allowed in the call chain from the last function up. For example, if d is the total depth of a call chain, then functions below (d-n) are inlined if they do not exceed the inline_max_size and inline_max_total_size settings which are discussed directly below. +> +> ``` +> #pragma inline_max_size(n); +> #pragma inline_max_total_size(n); +> ``` +> +> The first pragma sets the maximum function size to be considered for inlining; the second sets the maximum size to which a function is allowed to grow after the functions it calls are inlined. Here, n is the number of statements, operands, and operators in the function, which turns out to be roughly twice the number of instructions generated by the function. However, this number can vary from function to function. For the inline_max_size pragma, the default value of n is 256; for the inline_max_total_size pragma, the default value of n is 10000. The smart specifier is the default mode, with four passes where the passes 2-4 are limited to small inline functions. All inlineable functions are expanded if inline_depth is set to 1-1024. + +Function-Level Inlining -- VC++ + +> To force inline usage under VC++, you use this: +> +> ``` +> __forceinline void foo(){ ... } +> ``` +> +> It should be noted that __forceinline has no effect if the compiler is set to disable inlining. It merely tells the compiler that when inlining is enabled that it shouldn't use its judgment to decide if the function should be inlined but instead to always inline it. +> +> To disable inline usage under VC++, you need to use this: +> +> ``` +> #pragma inline_depth(0) // Disable inlining. +> void foo() { ... } +> #pragma inline_depth() // Restore default. +> ``` +> +> The above is essentially specifying compiler-level inlining control within the code for a specific function. + +**Function-Level Inlining -- GCC / Metrowerks** + +> To force inline usage under GCC 3.1+, you use this: +> +> `inline void foo() __attribute__((always_inline)) { ... }` +> +> or +> +> `inline __attribute__((always_inline)) void foo() { ... }` +> +> To disable inline usage under GCC 3+, you use this: +> +> `void foo() __attribute__((noinline)) { ... }` +> +> or +> +> `inline __attribute__((noinline)) void foo() { ... }` + +EABase has some wrappers for this, such as EA_FORCE_INLINE. + +### Perf.12 C++ / EASTL seems to bloat my .obj files much more than C does. + +There is no need to worry. The way most C++ compilers compile templates, they compile all seen template code into the current .obj module, which results in larger .obj files and duplicated template code in multiple .obj files. However, the linker will (and in fact must) select only a single version of any given function for the application, and these linked functions will usually be located contiguously. + +Additionally, the debug information for template definitions is usually larger than that for non-templated C++ definitions, which itself is sometimes larger than C defintions due to name decoration. + +### Perf.13 What are the best compiler settings for EASTL? + +We will discuss various aspects of this topic here. As of this writing, more EASTL research on this topic has been done on Microsoft compiler platforms (e.g. Win32) than GCC platforms. Thus currently this discussion focuses on VC++ optimization. Some of the concepts are applicable to GCC, though. EASTL has been sucessfully compiled and tested (the EASTL unit test) on our major development platforms with the highest optimization settings enabled, including GCC's infamous -O3 level. + +**Optimization Topics** + +* Function inlining. +* Optimization for speed vs. optimization for size. +* Link-time code generation (LTCG). +* Profile-guided optimization (PGO). + +**Function inlining** + +EASTL is a template library and inlining is important for optimal speed. Compilers have various options for enabling inlining and those options are discussed in this FAQ in detail. Most users will want to enable some form of inlining when compiling EASTL and other templated libraries. For users that are most concerned about the compiler's inlining increasing code size may want to try the 'inline only functions marked as inline' compiler option. Here is a table of normalized results from the benchmark project (Win32 platform): +| Inlining Disabled | Inline only 'inline' | Inline any | +|------|------|------|------| +| **Application size** | 100K | 86K | 86K | +| **Execution time** | 100 | 75 | 75 | + +The above execution times are highly simplified versions of the actual benchmark data but convey a sense of the general average behaviour that can be expected. In practice, simple functions such as vector::operator[] will execute much faster with inlining enabled but complex functions such as map::insert may execute no faster within inlining enabled. + +**Optimization for Speed / Size** + +Optimization for speed results in the compiler inlining more code than it would otherwise. This results in the inlined code executing faster than if it was not inlined. As mentioned above, basic function inlining can result in smaller code as well as faster code, but after a certain point highly inlined code becomes greater in size than less inlined code and the performance advantages of inlining start to lessen. The EASTL Benchmark project is a medium sized application that is about 80% templated and thus acts as a decent measure of the practical tradeoff between speed and size. Here is a table of normalized results from the benchmark project (Windows platform): +| Size | Speed | Speed + LTCG | Speed + LTCG + PGO | +|------|------|------|------| +| **Application size** | 80K | 100K | 98K | 98K | +| **Execution time** | 100 | 90 | 83 | 75 | + +What the above table is saying is that if you are willing to have your EASTL code be 20% larger, it will be 10% faster. Note that it doesn't mean that your app will be 20% larger, only the templated code in it like EASTL will be 20% larger. + +**Link-time code generation (LTCG)** + +LTCG is a mechanism whereby the compiler compiles the application as if it was all in one big .cpp file instead of separate .cpp files that don't see each other. Enabling LTCG optimizations is done by simply setting some compiler and linker settings and results in slower link times. The benchmark results are presented above and for the EASTL Benchmark project show some worthwhile improvement. + +**Profile-guided optimization (PGO)** + +PGO is a mechanism whereby the compiler uses profiling information from one or more runs to optimize the compilation and linking of an application. Enabling PGO optimizations is done by setting some linker settings and doing some test runs of the application, then linking the app with the test run results. Doing PGO optimizations is a somewhat time-consuming task but the benchmark results above demonstrate that for the EASTL Benchmark project that PGO is worth the effort. + +## Problems + +### Prob.1 I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong? + +It may possible that you are seeing floating point roundoff problems. Many STL algorithms require object comparisons to act consistently. However, floating point values sometimes compare differently between uses because in one situation a value might be in 32 bit form in system memory, whereas in anther situation that value might be in an FPU register with a different precision. These are difficult problems to track down and aren't the fault of EASTL or whatever similar library you might be using. There are various solutions to the problem, but the important thing is to find a way to force the comparisons to be consistent. + +The code below was an example of this happening, whereby the object pA->mPos was stored in system memory while pB->mPos was stored in a register and comparisons were inconsistent and a crash ensued. + +```cpp +class SortByDistance : public binary_function +{ +private: + Vector3 mOrigin; + +public: + SortByDistance(Vector3 origin) { + mOrigin = origin; + } + + bool operator()(WorldTreeObject* pA, WorldTreeObject* pB) const { + return ((WorldObject*)pA)->mPos - mOrigin).GetLength() + < ((WorldObject*)pB)->mPos - mOrigin).GetLength(); + } +}; +``` + +Another thing to watch out for is the following mistake: + +```cpp +struct ValuePair +{ + uint32_t a; + uint32_t b; +}; + +// Improve speed by casting the struct to uint64_t +bool operator<(const ValuePair& vp1, const ValuePair& vp2) + { return *(uint64_t*)&vp1 < *(uint64_t*)&vp2; } +``` + +The problem is that the ValuePair struct has 32 bit alignment but the comparison assumes 64 bit alignment. The code above has been observed to crash on the PowerPC 64-based machines. The resolution is to declare ValuePair as having 64 bit alignment. + +### Prob.2 I am getting compiler warnings (e.g. C4244, C4242 or C4267) that make no sense. Why? + +One cause of this occurs with VC++ when you have code compiled with the /Wp64 (detect 64 bit portability issues) option. This causes pointer types to have a hidden flag called __w64 attached to them by the compiler. So 'ptrdiff_t' is actually known by the compiler as '__w64 int', while 'int' is known by the compilers as simply 'int'. A problem occurs here when you use templates. For example, let's say we have this templated function + +``` cpp +template +T min(const T a, const T b) { + return b < a ? b : a; +} +``` + +If you compile this code: + +```cpp +ptrdiff_t a = min(ptrdiff_t(0), ptrdiff_t(1)); +int b = min((int)0, (int)1); +``` + +You will get the following warning for the second line, which is somewhat nonsensical: + +`warning C4244: 'initializing' : conversion from 'const ptrdiff_t' to 'int', possible loss of data` + +This could probably be considered a VC++ bug, but in the meantime you have little choice but to ignore the warning or disable it. + +### Prob.3 I am getting compiler warning C4530, which complains about exception handling and "unwind semantics." What gives? + +VC++ has a compiler option (/EHsc) that allows you to enable/disable exception handling stack unwinding but still enable try/catch. This is useful because it can save a lot in the way of code generation for your application. Disabling stack unwinding will decrease the size of your executable on at least the Win32 platform by 10-12%. + +If you have stack unwinding disabled, but you have try/catch statements, VC++ will generate the following warning: + +`warning C4530: C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc` + +As of EASTL v1.0, this warning has been disabled within EASTL for EASTL code. However, non-EASTL code such as std STL code may still cause this warning to be triggered. In this case there is not much you can do about this other than to disable the warning. + +### Prob.4 Why are tree-based EASTL containers hard to read with a debugger? + +**Short answer** + +Maximum performance and design mandates. + +**Long answer** + +You may notice that when you have a tree-based container (e.g. set, map) in the debugger that it isn't automatically able to recognize the tree nodes as containing instances of your contained object. You can get the debugger to do what you want with casting statements in the debug watch window, but this is not an ideal solution. The reason this is happening is that node-based containers always use an anonymous node type as the base class for container nodes. This is primarily done for performance, as it allows the node manipulation code to exist as a single non-templated library of functions and it saves memory because containers will have one or two base nodes as container 'anchors' and you don't want to allocate a node of the size of the user data when you can just use a base node. See list.h for an example of this and some additional in-code documentation on this. + +Additionally, EASTL has the design mandate that an empty container constructs no user objects. This is both for performance reasons and because it doing so would skew the user's tracking of object counts and might possibly break some expectation the user has about object lifetimes. + +Currently this debug issue exists only with tree-based containers. Other node-based containers such as list and slist use a trick to get around this problem in debug builds. + +See [Debug.2](#debug2-how-do-i-view-containers-if-the-visualizertooltip-support-is-not-present) for more. + +### Prob.5 The EASTL source code is sometimes rather complicated looking. Why is that? + +**Short answer** + +Maximum performance. + +**Long answer** +EASTL uses templates, type_traits, iterator categories, redundancy reduction, and branch reduction in order to achieve optimal performance. A side effect of this is that there are sometimes a lot of template parameters and multiple levels of function calls due to template specialization. The ironic thing about this is that this makes the code (an optimized build, at least) go faster, not slower. In an optimized build the compiler will see through the calls and template parameters and generate a direct optimized inline version. + +As an example of this, take a look at the implementation of the copy implementation in algorithm.h. If you are copying an array of scalar values or other trivially copyable values, the compiler will see how the code directs this to the memcpy function and will generate nothing but a memcpy in the final code. For non-memcpyable data types the compiler will automatically understand that in do the right thing. + +EASTL's primary objective is maximal performance, and it has been deemed worthwhile to make the code a little less obvious in order to achieve this goal. Every case where EASTL does something in an indirect way is by design and usually this is for the purpose of achieving the highest possible performance. + +### Prob.6 When I get compilation errors, they are very long and complicated looking. What do I do? + +Assuming the bugs are all worked out of EASTL, these errors really do indicate that you have something wrong. EASTL is intentionally very strict about types, as it tries to minimize the chance of users errors. Unfortunately, there is no simple resolution to the problem of long compiler errors other than to deal with them. On the other hand, once you've dealt with them a few times, you tend to realize that most of time they are the same kinds of errors and + +Top five approaches to dealing with long compilation errors: + +1. Look at the line where the compilation error occurred and ignore the text of the error and just look at obvious things that might be wrong. +2. Consider the most common typical causes of templated compilation errors and consider if any of these might be your problem. Usually one of them are. +3. Either read through the error (it's not as hard as it may look on the surface) or copy the error to a text file and remove the extraneous +4. Compile the code under GCC instead of MSVC, as GCC warnings and errors tend to be more helpful than MSVC's. Possibly also consider compiling an isolated version under Comeau C++'s free online compiler at www.comeaucomputing.com or the Dinkumware online compiler at http://dinkumware.com/exam/. +5. Try using an STL filter (http://www.bdsoft.com/tools/stlfilt.html) which automatically boils down template errors to simpler forms. We haven't tried this yet with EASTL. Also there is the more generic TextFilt (http://textfilt.sourceforge.net/). + +Top five causes of EASTL compilation errors: + +1. const-correctness. Perhaps a quarter of container template errors are due to the user not specifying const correctly. +2. Missing hash function. hash_map, hash_set, etc. require that you either specify a hash function or one exists for your class. See functional.h for examples of declarations of hash functions for common data types. +3. Missing operators. Various containers and algorithms require that certain operators exist for your contained classes. For example, list requires that you can test contained objects for equivalence (i.e. operator==), while map requires that you can test contained objects for "less-ness" (operator <). If you define a Widget class and don't have a way to compare two Widgets, you will get errors when trying to put them into a map. +4. Specifying the wrong data type. For example, it is a common mistake to forget that when you insert into a map, you need to insert a pair of objects and not just your key or value type. +5. Incorrect template parameters. When declaring a template instantiation (e.g. map >) you simply need to get the template parameters correct. Also note that when you have ">>" next to each other that you need to separate them by one space (e.g. "> >"). + +### Prob.7 Templates sometimes seem to take a long time to compile. Why do I do about that? + +C++ compilers are generally slower than C compilers, and C++ templates are generally slower to compile than regular C++ code. EASTL has some extra functionality (such as type_traits and algorithm specializations) that is not found in most other template libraries and significantly improves performance and usefulness but adds to the amount of code that needs to be compiled. Ironically, we have a case where more source code generates faster and smaller object code. + +The best solution to the problem is to use pre-compiled headers, which are available on all modern ~2002+) compilers, such as VC6.0+, GCC 3.2+, and Metrowerks 7.0+. In terms of platforms this means all 2002+ platforms. + +Some users have been speeding up build times by creating project files that put all the source code in one large .cpp file. This has an effect similar to pre-compiled headers. It can go even faster than pre-compiled headers but has downsides in the way of convenience and portability. + +### Prob.8 I get the compiler error: "template instantiation depth exceeds maximum of 17. use -ftemplate-depth-NN to increase the maximum". + +This is a GCC error that occurs when a templated function calls a templated function which calls a templated function, etc. past a depth of 17. You can use the GCC command line argument -ftemplate-depth-40 (or some other high number) to get around this. As note below, the syntax starting with GCC 4.5 has changed slightly. + +The primary reason you would encounter this with EASTL is type traits that are used by algorithms. The type traits library is a (necessarily) highly templated set of types and functions which adds at most about nine levels of inlining. The copy and copy_backward algorithms have optimized pathways that add about four levels of inlining. If you have just a few more layers on top of that in container or user code then the default limit of 17 can be exceeded. We are investigating ways to reduce the template depth in the type traits library, but only so much can be done, as most compilers don't support type traits natively. Metrowerks is the current exception. + +From the GCC documentation: + +``` +-ftemplate-depth-n + +Set the maximum instantiation depth for template classes to n. +A limit on the template instantiation depth is needed to detect +endless recursions during template class instantiation ANSI/ISO +C++ conforming programs must not rely on a maximum depth greater than 17. +Note that starting with GCC 4.5 the syntax is -ftemplate-depth=N instead of -ftemplate-depth-n. +``` + +### Prob.9 I'm getting errors about min and max while compiling. + +You need to define NOMINMAX under VC++ when this occurs, as it otherwise defines min and max macros that interfere. There may be equivalent issues with other compilers. Also, VC++ has a specific header file which defines min and max macros but which doesn't pay attention to NOMINMAX and so in that case there is nothing to do but not include that file or to undefine min and max. minmax.h is not a standard file and its min and max macros are not standard C or C++ macros or functions. + +### Prob.10 C++ / EASTL seems to bloat my .obj files much more than C does. + +There is no need to worry. The way most C++ compilers compile templates, they compile all seen template code into the current .obj module, which results in larger .obj files and duplicated template code in multiple .obj files. However, the linker will (and must) select only a single version of any given function for the application, and these linked functions will usually be located contiguously. + +### Prob.11 I'm getting compiler errors regarding placement operator new being previously defined. + +This can happen if you are attempting to define your own versions of placement new/delete. The C++ language standard does not allow the user to override these functions. Section 18.4.3 of the standard states: + +> Placement forms +> 1. These functions are reserved, a C++ program may not define functions that displace the versions in the Standard C++ library. + +You may find that #defining __PLACEMENT_NEW_INLINE seems to fix your problems under VC++, but it can fail under some circumstances and is not portable and fails with other compilers, which don't have an equivalent workaround. + +### Prob.12 I'm getting errors related to wchar_t string functions such as wcslen(). + +EASTL requires EABase-related items that the following be so. If not, then EASTL gets confused about what types it can pass to wchar_t related functions. + +* The #define EA_WCHAR_SIZE is equal to sizeof(wchar_t). +* If sizeof(wchar_t) == 2, then char16_t is typedef'd to wchar_t. +* If sizeof(wchar_t) == 4, then char32_t is typedef'd to wchar_t. + +EABase v2.08 and later automatically does this for most current generation and all next generation platforms. With GCC 2.x, the user may need to predefine EA_WCHAR_SIZE to the appropriate value, due to limitations with the GCC compiler. Note that GCC defaults to sizeof(wchar_t) ==4, but it can be changed to 2 with the -fshort_wchar compiler command line argument. If you are using EASTL without EABase, you will need to make sure the above items are correctly defined. + +### Prob.13 I'm getting compiler warning C4619: there is no warning number Cxxxx (e.g. C4217). + +Compiler warning C4619 is a VC++ warning which is saying that the user is attempting to enable or disable a warning which the compiler doesn't recognize. This warning only occurs if the user has the compiler set to enable warnings that are normally disabled, regardless of the warning level. The problem, however, is that there is no easy way for user code to tell what compiler warnings any given compiler version will recognize. That's why Microsoft normally disables this warning. + +The only practical solution we have for this is for the user to disable warning 4619 globally or an a case-by-case basis. EA build systems such as nant/framework 2's eaconfig will usually disable 4619. In general, global enabling of 'warnings that are disabled by default' often result in quandrys such as this. + +### Prob.14 My stack-based fixed_vector is not respecting the object alignment requirements. + +EASTL fixed_* containers rely on the compiler-supplied alignment directives, such as that implemented by EA_PREFIX_ALIGN. This is normally a good thing because it allows the memory to be local with the container. However, as documented by Microsoft at http://msdn2.microsoft.com/en-us/library/83ythb65(VS.71).aspx, this doesn't work for stack variables. The two primary means of working around this are: + +* Use something like AlignedObject<> from the EAStdC package's EAAllocator.h file. +* Use eastl::vector with a custom allocator and have it provide aligned memory. EASTL automatically recognizes that the objects are aligned and will call the aligned version of your allocator allocate() function. You can get this aligned memory from the stack, if you need it, somewhat like how AlignedObject<> works. + +### Prob.15 I am getting compiler errors when using GCC under XCode (Macintosh/iphone). + +The XCode environment has a compiler option which causes it to evaluate include directories recursively. So if you specify /a/b/c as an include directory, it will consider all directories underneath c to also be include directories. This option is enabled by default, though many XCode users disable it, as it is a somewhat dangerous option. The result of enabling this option with EASTL is that is used by the compiler when you say #include . The solution is to disable this compiler option. It's probably a good idea to disable this option anyway, as it typically causes problems for users yet provides minimal benefits. + +### Prob.16 I am getting linker errors about Vsnprintf8 or Vsnprintf16. + +EASTL requires the user to provide a function called Vsnprintf8 if the string::sprintf function is used. vsnprintf is not a standard C function, but most C standard libraries provide some form of it, though in some ways their implementations differ, especially in what the return value means. Also, most implementations of vsnprintf are slow, mostly due to mutexes related to locale functionality. And you can't really use vendor vsnprintf on an SPU due to the heavy standard library size. EASTL is stuck because it doesn't want to depend on something with these problems. EAStdC provides a single consistent fast lightweight, yet standards-conforming, implementation in the form of Vsnprintf(char8_t*, ...), but EASTL can't have a dependency on EAStdC. So the user must provide an implementation, even if all it does is call EAStdC's Vsnprintf or the vendor vsnprintf for that matter. + +Example of providing Vsnprintf8 via EAStdC: + +```cpp +#include + +int Vsnprintf8(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments) +{ + return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); +} + +int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments) +{ + return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments); +} +``` + +Example of providing Vsnprintf8 via C libraries: + +```cpp +#include + +int Vsnprintf8(char8_t* p, size_t n, const char8_t* pFormat, va_list arguments) +{ + #ifdef _MSC_VER + return vsnprintf_s(p, n, _TRUNCATE, pFormat, arguments); + #else + return vsnprintf(p, n, pFormat, arguments); + #endif +} + +int Vsnprintf16(char16_t* p, size_t n, const char16_t* pFormat, va_list arguments) +{ + #ifdef _MSC_VER + return vsnwprintf_s(p, n, _TRUNCATE, pFormat, arguments); + #else + return vsnwprintf(p, n, pFormat, arguments); // Won't work on Unix because its libraries implement wchar_t as int32_t. + #endif +} +``` + +### Prob.17 I am getting compiler errors about UINT64_C or UINT32_C. + +This is usually an order-of-include problem that comes about due to the implementation of __STDC_CONSTANT_MACROS in C++ Standard libraries. The C++ header file defineds UINT64_C only if __STDC_CONSTANT_MACROS has been defined by the user or the build system; the compiler doesn't automatically define it. The failure you are seeing occurs because user code is #including a system header before #including EABase and without defining __STDC_CONSTANT_MACROS itself or globally. EABase defines __STDC_CONSTANT_MACROS and #includes the appropriate system header. But if the system header was already previously #included and __STDC_CONSTANT_MACROS was not defined, then UINT64_C doesn't get defined by anybody. + +The real solution that the C++ compiler and standard library wants is for the app to globally define __STDC_CONSTANT_MACROS itself in the build. + +### Prob.18 I am getting a crash with a global EASTL container. + +This usually due to compiler's lack of support for global (and static) C++ class instances. The crash is happening because the global variable exists but its constructor was not called on application startup and it's member data is zeroed bytes. To handle this you need to manually initialize such variables. There are two primary ways: + +Failing code: + +```cpp +eastl::list gIntList; // Global variable. + +void DoSomething() +{ + gIntList.push_back(1); // Crash. gIntList was never constructed. +} +``` + +Declaring a pointer solution: + +```cpp +eastl::list* gIntList = NULL; + +void DoSomething() +{ + if(!gIntList) // Or move this to an init function. + gIntList = new eastl::list; + + gIntList->push_back(1); // Success +} +``` + +Manual constructor call solution: + +```cpp +eastl::list gIntList; + +void InitSystem() +{ + new(&gIntList) eastl::list; +} + +void DoSomething() +{ + gIntList.push_back(1); // Success +} +``` + +### Prob.19 Why doesn't EASTL support passing NULL string functions? + +The primary argument is to make functions safer for use. Why crash on NULL pointer access when you can make the code safe? That's a good argument. The counter argument, which EASTL currently makes, is: + +> It breaks consistency with the C++ STL library and C libraries, which require strings to be valid. +> +> It makes the coder slower and bigger for all users, though few need NULL checks. +The specification for how to handle NULL is simple for some cases but not simple for others. Operator < below a case where the proper handling of it in a consistent way is not simple, as all comparison code (<, >, ==, !=, >=, <=) in EASTL must universally and consistently handle the case where either or both sides are NULL. A NULL string seems similar to an empty string, but doesn't always work out so simply. +> +> What about other invalid string pointers? NULL is merely one invalid value of many, with its only distinction being that sometimes it's intentionally NULL (as opposed to being NULL due to not being initialized). +How and where to implement the NULL checks in such a way as to do it efficiently is not always simple, given that public functions call public functions. +> +> It's arguable (and in fact the the intent of the C++ standard library) that using pointers that are NULL is a user/app mistake. If we really want to be safe then we should be using string objects for everything. You may not entirely buy this argument in practice, but on the other hand one might ask why is the caller of EASTL using a NULL pointer in the first place? The answer of course is that somebody gave it to him. + +## Debug + +### Debug.1 How do I set the VC++ debugger to display EASTL container data with tooltips? + +See [Cont.9](#cont9-how-do-i-set-the-vc-debugger-to-display-eastl-container-data-with-tooltips) + +### Debug.2 How do I view containers if the visualizer/tooltip support is not present? + +Here is a table of answers about how to manually inspect containers in the debugger. + +| Container | Approach | +|------|------| +| slist
fixed_slist | slist is a singly-linked list. Look at the slist mNode variable. You can walk the list by looking at mNode.mpNext, etc. | +| list
fixed_list | list is a doubly-linked list. Look at the list mNode variable. You can walk the list forward by looking at mNode.mpNext, etc. and backward by looking at mpPrev, etc. | +| intrusive_list
intrusive_slist† | Look at the list mAnchor node. This lets you walk forward and backward in the list via mpNext and mpPrev. | +| array | View the array mValue member in the debugger. It's simply a C style array. | +| vector
fixed_vector | View the vector mpBegin value in the debugger. If the string is long, use ", N" to limit the view length, as with someVector.mpBegin, 32 | +| vector_set
vector_multiset
vector_map
vector_multimap | These are containers that are implemented as a sorted vector, deque, or array. They are searched via a standard binary search. You can view them the same way you view a vector or deque. | +| deque | deque is implemented as an array of arrays, where the arrays implement successive equally-sized segments of the deque. The mItBegin deque member points the deque begin() position. | +| bitvector | Look at the bitvector mContainer variable. If it's a vector, then see vector above. | +| bitset | Look at the bitset mWord variable. The bitset is nothing but one or more uint32_t mWord items. | +| set
multiset
fixed_set
fixed_multiset | The set containers are implemented as a tree of elements. The set mAnchor.mpNodeParent points to the top of the tree; the mAnchor.mpNodeLeft points to the far left node of the tree (set begin()); the mAnchor.mpNodeRight points to the right of the tree (set end()). | +| map
multimap
fixed_map
fixed_multimap | The map containers are implemented as a tree of pairs, where pair.first is the map key and pair.second is the map value. The map mAnchor.mpNodeParent points to the top of the tree; the mAnchor.mpNodeLeft points to the far left node of the tree (map begin()); the mAnchor.mpNodeRight points to the right of the tree (map end()). | +| hash_map
hash_multimap
fixed_hash_map
fixed_hash_multimap | hash tables in EASTL are implemented as an array of singly-linked lists. The array is the mpBucketArray member. Each element in the list is a pair, where the first element of the pair is the map key and the second is the map value. | +| intrusive_hash_map
intrusive_hash_multimap
intrusive_hash_set
intrusive_hash_multiset | intrusive hash tables in EASTL are implemented very similarly to regular hash tables. See the hash_map and hash_set entries for more info. | +| hash_set
hash_multiset
fixed_hash_set
fixed_hash_map | hash tables in EASTL are implemented as an array of singly-linked lists. The array is the mpBucketArray member. | +| basic_string
fixed_string
fixed_substring | View the string mpBegin value in the debugger. If the string is long, use ", N" to limit the view length, as with someString.mpBegin, 32 | +| heap | A heap is an array of data (e.g. EASTL vector) which is organized in a tree whereby the highest priority item is array[0], The next two highest priority items are array[1] and [2]. Underneath [1] in priority are items [3] and [4], and underneath item [2] in priority are items [5] and [6]. etc. | +| stack | View the stack member c value in the debugger. That member will typically be a list or deque. | +| queue | View the queue member c value in the debugger. That member will typically be a list or deque. | +| priority_queue | View the priority_queue member c value in the debugger. That member will typically be a vector or deque which is organized as a heap. See the heap section above for how to view a heap. | +| smart_ptr | View the mpValue member. | + +### Debug.3 The EASTL source code is sometimes rather complicated looking. Why is that? + +**Short answer** + +Maximum performance. + +**Long answer** + +EASTL uses templates, type_traits, iterator categories, redundancy reduction, and branch reduction in order to achieve optimal performance. A side effect of this is that there are sometimes a lot of template parameters and multiple levels of function calls due to template specialization. The ironic thing about this is that this makes the code (an optimized build, at least) go faster, not slower. In an optimized build the compiler will see through the calls and template parameters and generate a direct optimized inline version. + +As an example of this, take a look at the implementation of the copy implementation in algorithm.h. If you are copying an array of scalar values or other trivially copyable values, the compiler will see how the code directs this to the memcpy function and will generate nothing but a memcpy in the final code. For non-memcpyable data types the compiler will automatically understand that in do the right thing. + +EASTL's primary objective is maximal performance, and it has been deemed worthwhile to make the code a little less obvious in order to achieve this goal. Every case where EASTL does something in an indirect way is by design and usually this is for the purpose of achieving the highest possible performance. + +### Debug.4 When I get compilation errors, they are very long and complicated looking. What do I do? + +Assuming the bugs are all worked out of EASTL, these errors really do indicate that you have something wrong. EASTL is intentionally very strict about types, as it tries to minimize the chance of users errors. Unfortunately, there is no simple resolution to the problem of long compiler errors other than to deal with them. On the other hand, once you've dealt with them a few times, you tend to realize that most of time they are the same kinds of errors and + +Top five approaches to dealing with long compilation errors: + +1.Look at the line where the compilation error occurred and ignore the text of the error and just look at obvious things that might be wrong. +2. Consider the most common typical causes of templated compilation errors and consider if any of these might be your problem. Usually one of them are. +3. Either read through the error (it's not as hard as it may look on the surface) or copy the error to a text file and remove the extraneous +4. Compile the code under GCC instead of MSVC, as GCC warnings and errors tend to be more helpful than MSVC's. Possibly also consider compiling an isolated version under Comeau C++'s free online compiler at www.comeaucomputing.com or the Dinkumware online compiler at http://dinkumware.com/exam/. +5. Try using an STL filter (http://www.bdsoft.com/tools/stlfilt.html) which automatically boils down template errors to simpler forms. We haven't tried this yet with EASTL. Also there is the more generic TextFilt (http://textfilt.sourceforge.net/). + +Top five causes of EASTL compilation errors: + +1. const-correctness. Perhaps a quarter of container template errors are due to the user not specifying const correctly. +2. Missing hash function. hash_map, hash_set, etc. require that you either specify a hash function or one exists for your class. See functional.h for examples of declarations of hash functions for common data types. +3. Missing operators. Various containers and algorithms require that certain operators exist for your contained classes. For example, list requires that you can test contained objects for equivalence (i.e. operator==), while map requires that you can test contained objects for "less-ness" (operator <). If you define a Widget class and don't have a way to compare two Widgets, you will get errors when trying to put them into a map. +4. Specifying the wrong data type. For example, it is a common mistake to forget that when you insert into a map, you need to insert a pair of objects and not just your key or value type. +5. Incorrect template parameters. When declaring a template instantiation (e.g. map >) you simply need to get the template parameters correct. Also note that when you have ">>" next to each other that you need to separate them by one space (e.g. "> >"). + +### Debug.5 How do I measure hash table balancing? + +The following functionality lets you spelunk hash container layout. + +* There is the load_factor function which tells you the overall hashtable load, but doesn't tell you if a load is unevenly distributed. +* You can control the load factor and thus the automated bucket redistribution with set_load_factor. +* The local_iterator begin(size_type n) and local_iterator end(size_type) functions lets you iterate each bucket individually. You can use this to examine the elements in a bucket. +* You can use the above to get the size of any bucket, but there is also simply the bucket_size(size_type n) function. +* The bucket_count function tells you the count of buckets. So with this you can completely visualize the layout of the hash table. +* There is also iterator find_by_hash(hash_code_t c), for what it's worth. + +The following function draws an ASCII bar graph of the hash table for easy visualization of bucket distribution: + +```cpp +#include +#include +#include + +template +void VisualizeHashTableBuckets(const HashTable& h) +{ + eastl_size_t bucketCount = h.bucket_count(); + eastl_size_t largestBucketSize = 0; + + for(eastl_size_t i = 0; i < bucketCount; i++) + largestBucketSize = eastl::max_alt(largestBucketSize, h.bucket_size(i)); + + YourPrintFunction("\n --------------------------------------------------------------------------------\n"); + + for(eastl_size_t i = 0; i < bucketCount; i++) + { + const eastl_size_t k = h.bucket_size(i) * 80 / largestBucketSize; + + char buffer[16]; + sprintf(buffer, "%3u|", (unsigned)i); + YourPrintFunction(buffer); + + for(eastl_size_t j = 0; j < k; j++) + YourPrintFunction("*"); + + YourPrintFunction("\n"); + } + + YourPrintFunction(" --------------------------------------------------------------------------------\n"); +} +``` + +This results in a graph that looks like the following (with one horizontal bar per bucket). This hashtable has a large number of collisions in each of its 10 buckets. + +``` + ------------------------------------------------------ + 0|******************************************** + 1|************************************************ + 2|*************************************** + 3|******************************************** + 4|***************************************************** + 5|************************************************* + 6|**************************************** + 7|*********************************************** + 8|******************************************** + 9|************************************** +10|******************************************** + ----------------------------------------------------- +``` + +## Containers + +### Cont.1 Why do some containers have "fixed" versions (e.g. fixed_list) but others(e.g. deque) don't have fixed versions? + +Recall that fixed containers are those that are implemented via a single contiguous block of memory and don't use a general purpose heap to allocate memory from. For example, fixed_list is a list container that implements its list by a user-configurable fixed block of memory. Such containers have an upper limit to how many items they can hold, but have the advantage of being more efficient with memory use and memory access coherency. + +The reason why some containers don't have fixed versions is that such functionality doesn't make sense with these containers. Containers which don't have fixed versions include: + +``` +array, deque, bitset, stack, queue, priority_queue, +intrusive_list, intrusive_hash_map, intrusive_hash_set, +intrusive_hash_multimap, intrusive_hash_multimap, +vector_map, vector_multimap, vector_set, vector_multiset. +``` + +Some of these containers are adapters which wrap other containers and thus there is no need for a fixed version because you can just wrap a fixed container. In the case of intrusive containers, the user is doing the allocation and so there are no memory allocations. In the case of array, the container is a primitive type which doesn't allocate memory. In the case of deque, it's primary purpose for being is to dynamically resize and thus the user would likely be better of using a fixed_vector. + +### Cont.2 Can I mix EASTL with standard C++ STL? + +This is possible to some degree, though the extent depends on the implementation of C++ STL. One of things that makes interoperability is something called iterator categories. Containers and algorithms recognize iterator types via their category and STL iterator categories are not recognized by EASTL and vice versa. + +Things that you definitely can do: + +* #include both EASTL and standard STL headers from the same .cpp file. +* Use EASTL containers to hold STL containers. +* Construct an STL reverse_iterator from an EASTL iterator. +* Construct an EASTL reverse_iterator from an STL iterator. + +Things that you probably will be able to do, though a given std STL implementation may prevent it: + +* Use STL containers in EASTL algorithms. +* Use EASTL containers in STL algorithms. +* Construct or assign to an STL container via iterators into an EASTL container. +* Construct or assign to an EASTL container via iterators into an STL container. + +Things that you would be able to do if the given std STL implementation is bug-free: + +* Use STL containers to hold EASTL containers. Unfortunately, VC7.x STL has a confirmed bug that prevents this. Similarly, STLPort versions prior to v5 have a similar but. + +Things that you definitely can't do: + +* Use an STL allocator directly with an EASTL container (though you can use one indirectly). +* Use an EASTL allocator directly with an STL container (though you can use one indirectly). + +### Cont.3 Why are there so many containers? + +EASTL has a large number of container types (e.g vector, list, set) and often has a number of variations of given types (list, slist, intrusive_list, fixed_list). The reason for this is that each container is tuned and to a specific need and there is no single container that works for all needs. The more the user is concerned about squeezing the most performance out of their system, the more the individual container variations become significant. It's important to note that having additional container types generally does not mean generating additional code or code bloat. Templates result in generated code regardless of what templated class they come from, and so for the most part you get optimal performance by choosing the optimal container for your needs. + +### Cont.4 Don't STL and EASTL containers fragment memory? + +They only fragment memory if you use them in a way that does so. This is no different from any other type of container used in a dynamic way. There are various solutions to this problem, and EASTL provides additional help as well: + +For vectors, use the reserve function (or the equivalent constructor) to set aside a block of memory for the container. The container will not reallocate memory unless you try grow beyond the capacity you reserve. +EASTL has "fixed" variations of containers which allow you to specify a fixed block of memory which the container uses for its memory. The container will not allocate any memory with these types of containers and all memory will be cache-friendly due to its locality. +You can assign custom allocators to containers instead of using the default global allocator. You would typically use an allocator that has its own private pool of memory. +Where possible, add all a container's elements to it at once up front instead of adding them over time. This avoids memory fragmentation and increase cache coherency. + +### Cont.5 I don't see container optimizations for equivalent scalar types such as pointer types. Why? + +Metrowerks (and no other, as of this writing) STL has some container specializations for type T* which maps them to type void*. The idea is that a user who declares a list of Widget* and a list of Gadget* will generate only one container: a list of void*. As a result, code generation will be smaller. Often this is done only in optimized builds, as such containers are harder to view in debug builds due to type information being lost. + +The addition of this optimization is under consideration for EASTL, though it might be noted that optimizing compilers such as VC++ are already capable of recognizing duplicate generated code and folding it automatically as part of link-time code generation (LTCG) (a.k.a. "whole program optimization"). This has been verified with VC++, as the following code and resulting disassembly demonstrate: + +```cpp +eastl::list intPtrList; +eastl::list toPtrList; + +eastl_size_t n1 = intPtrList.size(); +eastl_size_t n2 = toPtrList.size(); + +0042D288 lea edx,[esp+14h] +0042D28C call eastl::list::size (414180h) +0042D291 push eax +0042D292 lea edx,[esp+24h] +0042D296 call eastl::list::size (414180h) +``` + +Note that in the above case the compiler folded the two implementations of size() into a single implementation. + +### Cont.6 What about alternative container and algorithm implementations (e.g. treaps, skip lists, avl trees)? + +EASTL chooses to implement some alternative containers and algorithms and not others. It's a matter of whether or not the alternative provides truly complementary or improved functionality over existing containers. The following is a list of some implemented and non-implemented alternatives and the rationale behind each: + +Implemented: + +* intrusive_list, etc. -- Saves memory and improves cache locality. +* vector_map, etc. -- Saves memory and improves cache locality. +* ring_buffer -- Useful for some types of operations and has no alternative. +* shell_sort -- Useful sorting algorithm. +* sparse_matrix -- Useful for some types of operations and has no alternative. + +Not implemented: + +* skip lists (alternative to red-black tree) -- These use more memory and usually perform worse than rbtrees. +* treap (alternative to red-black tree) -- These are easier and smaller than rbtrees, but perform worse. +* avl tree (alternative to red-black tree) -- These have slightly better search performance than rbtrees, but significantly worse * * insert/remove performance. +* btree (alternative to red-black tree) -- These are no better than rbtrees. + +If you have an idea of something that should be implemented, please suggest it or even provide at least a prototypical implementation. + +### Cont.7 Why are tree-based EASTL containers hard to read with a debugger? + +**Short answer** + +Maximum performance and design mandates. + +**Long answer** + +You may notice that when you have a tree-based container (e.g. set, map) in the debugger that it isn't automatically able to recognize the tree nodes as containing instances of your contained object. You can get the debugger to do what you want with casting statements in the debug watch window, but this is not an ideal solution. The reason this is happening is that node-based containers always use an anonymous node type as the base class for container nodes. This is primarily done for performance, as it allows the node manipulation code to exist as a single non-templated library of functions and it saves memory because containers will have one or two base nodes as container 'anchors' and you don't want to allocate a node of the size of the user data when you can just use a base node. See list.h for an example of this and some additional in-code documentation on this. + +Additionally, EASTL has the design mandate that an empty container constructs no user objects. This is both for performance reasons and because it doing so would skew the user's tracking of object counts and might possibly break some expectation the user has about object lifetimes. + +Currently this debug issue exists only with tree-based containers. Other node-based containers such as list and slist use a trick to get around this problem in debug builds. + +### Cont.8 How do I assign a custom allocator to an EASTL container? + +There are two ways of doing this: + +1. Use the set_allocator function that is present in each container. +2. Specify a new allocator type via the Allocator template parameter that is present in each container. + +For item #1, EASTL expects that you provide an instance of an allocator of the type that EASTL recognizes. This is simple but has the disadvantage that all such allocators must be of the same class. The class would need to have C++ virtual functions in order to allow a given instance to act differently from another instance. + +For item #2, you specify that the container use your own allocator class. The advantage of this is that your class can be implemented any way you want and doesn't require virtual functions for differentiation from other instances. Due to the way C++ works your class would necessarily have to use the same member function names as the default allocator class type. In order to make things easier, we provide a skeleton allocator here which you can copy and fill in with your own implementation. + +```cpp +class custom_allocator +{ +public: + custom_allocator(const char* pName = EASTL_NAME_VAL("custom allocator")) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + + // Possibly do something here. + } + + custom_allocator(const allocator& x, const char* pName = EASTL_NAME_VAL("custom allocator")); + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + + // Possibly copy from x here. + } + + ~custom_allocator(); + { + // Possibly do something here. + } + + custom_allocator& operator=(const custom_allocator& x) + { + // Possibly copy from x here. + return *this; + } + + void* allocate(size_t n, int flags = 0) + { + // Implement the allocation here. + } + + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0) + { + // Implement the allocation here. + } + + void deallocate(void* p, size_t n) + { + // Implement the deallocation here. + } + + const char* get_name() const + { + #if EASTL_NAME_ENABLED + return mpName; + #else + return "custom allocator"; + #endif + } + + void set_name(const char* pName) + { + #if EASTL_NAME_ENABLED + mpName = pName; + #endif + } + +protected: + // Possibly place instance data here. + + #if EASTL_NAME_ENABLED + const char* mpName; // Debug name, used to track memory. + #endif +}; + + +inline bool operator==(const allocator& a, const allocator& b) +{ + // Provide a comparison here. +} + +inline bool operator!=(const allocator& a, const allocator& b) +{ + // Provide a negative comparison here. +} +``` + +Here's an example of how to use the above custom allocator: + +```cpp +// Declare a Widget list and have it default construct. +list widgetList; + +// Declare a Widget list and have it construct with a copy of some global allocator. +list widgetList2(gSomeGlobalAllocator); + +// Declare a Widget list and have it default construct, but assign +// an underlying implementation after construction. +list widgetList; +widgetList.get_allocator().mpIAllocator = new WidgetAllocatorImpl; +``` + +### Cont.9 How do I set the VC++ debugger to display EASTL container data with tooltips? + +Visual Studio supports this via the AutoExp.dat file, an example of which is [present](./html/AutoExp.dat) with this documentation. + +Sometimes the AutoExp.dat doesn't seem to work. Avery Lee's explanation: + +> If I had to take a guess, the problem is most likely in the cast to the concrete node type. These are always tricky because, for some strange reason, the debugger is whitespace sensitive with regard to specifying template types. You might try manually checking one of the routines of the specific map instantiation and checking that the placement of whitespace and const within the template expression still matches exactly. In some cases the compiler uses different whitespace rules depending on the value type which makes it impossible to correctly specify a single visualizer – this was the case for eastl::list<>, for which I was forced to include sections for both cases. The downside is that you have a bunch of (error) entries either way. + +### Cont.10 How do I use a memory pool with a container? + +Using custom memory pools is a common technique for decreasing memory fragmentation and increasing memory cache locality. EASTL gives you the flexibility of defining your own memory pool systems for containers. There are two primary ways of doing this: + +* Assign a custom allocator to a container. eastl::fixed_pool provides an implementation. +* Use one of the EASTL fixed containers, such as fixed_list. + +**Custom Allocator** + +In the custom allocator case, you will want to create a memory pool and assign it to the container. For purely node-based containers such as list, slist, map, set, multimap, and multiset, your pool simply needs to be able to allocate list nodes. Each of these containers has a member typedef called node_type which defines the type of node allocated by the container. So if you have a memory pool that has a constructor that takes the size of pool items and the count of pool items, you would do this (assuming that MemoryPool implements the Allocator interface): + +```cpp +typedef list WidgetList; // Declare your WidgetList type. + +MemoryPool myPool(sizeof(WidgetList::node_type), 100); // Make a pool of 100 Widget nodes. +WidgetList myList(&myPool); // Create a list that uses the pool. +``` + +In the case of containers that are array-based, such as vector and basic_string, memory pools don't work very well as these containers work on a realloc-basis instead of by adding incremental nodes. What what want to do with these containers is assign a sufficient block of memory to them and reserve() the container's capacity to the size of the memory. + +In the case of mixed containers which are partly array-based and partly node based, such as hash containers and deque, you can use a memory pool for the nodes but will need a single array block to supply for the buckets (hash containers and deque both use a bucket-like system). + +You might consider using eastl::fixed_pool as such an allocator, as it provides such functionality and allows the user to provide the actual memory used for the pool. Here is some example code: + +```cpp +char buffer[256]; + +list myList; +myList.get_allocator().init(buffer, 256); +Fixed Container +In the fixed container case, the container does all the work for you. To use a list which implements a private pool of memory, just declare it like so: + +fixed_list fixedList; // Declare a fixed_list that can hold 100 Widgets +``` + +### Cont.11 How do I write a comparison (operator<()) for a struct that contains two or more members? + +See [Algo.2](#algo2-how-do-i-write-a-comparison-operator-for-a-struct-that-contains-two-or-more-members). + +### Cont.12 Why doesn't container X have member function Y? + +Why don't the list or vector containers have a find() function? Why doesn't the vector container have a sort() function? Why doesn't the string container have a mid() function? These are common examples of such questions. + +The answer usually boils down to two reasons: + +* The functionality exists in a more centralized location elsewhere, such as the algorithms. +* The functionality can be had by using other member functions. + +In the case of find and sort functions not being part of containers, the find algorithm and sort algorithm are centralized versions that apply to any container. Additionally, the algorithms allow you to specify a sub-range of the container on which to apply the algorithm. So in order to find an element in a list, you would do this: + +`list::iterator i = find(list.begin(), list.end(), 3);` + +And in order to sort a vector, you would do this: + +```cpp +quick_sort(v.begin(), v.end()); // Sort the entire array. + +quick_sort(&v[3], &v[8]); // Sort the items at the indexes in the range of [3, 8). +``` + +In the case of functionality that can be had by using other member functions, note that EASTL follows the philosophy that duplicated functionality should not exist in a container, with exceptions being made for cases where mistakes and unsafe practices commonly happen if the given function isn't present. In the case of string not having a mid function, this is because there is a string constructor that takes a sub-range of another string. So to make a string out of the middle of another, you would do this: + +`string strMid(str, 3, 5); // Make a new string of the characters from the source range of [3, 3+5).` + +It might be noted that the EASTL string class is unique among EASTL containers in that it sometimes violates the minimum functionality rule. This is so because the std C++ string class similarly does so and EASTL aims to be compatible. + +### Cont.13 How do I search a hash_map of strings via a char pointer efficiently? If I use map.find("hello") it creates a temporary string, which is inefficient. + +The problem is illustrated with this example: + +```cpp +map swMap; + ... +map::iterator it = swMap.find("blue"); // A temporary string object is created here. +``` + +In this example, the find function expects a string object and not a string literal and so (silently!) creates a temporary string object for the duration of the find. There are two solutions to this problem: + +* Make the map a map of char pointers instead of string objects. Don't forget to write a custom compare or else the default comparison function will compare pointer values instead of string contents. +* Use the EASTL hash_map::find_as function, which allows you to find an item in a hash container via an alternative key than the one the hash table uses. + +### Cont.14 Why are set and hash_set iterators const (i.e. const_iterator)? + +The situation is illustrated with this example: + +```cpp +set intSet; + +intSet.insert(1); +set::iterator i = intSet.begin(); +*i = 2; // Error: iterator i is const. +``` + +In this example, the iterator is a regular iterator and not a const_iterator, yet the compiler gives an error when trying to change the iterator value. The reason this is so is that a set is an ordered container and changing the value would make it out of order. Thus, set and multiset iterators are always const_iterators. If you need to change the value and are sure the change will not alter the container order, use const_cast or declare mutable member variables for your contained object. This resolution is the one blessed by the C++ standardization committee. + +### Cont.15 How do I prevent my hash container from re-hashing? + +If you want to make a hashtable never re-hash (i.e. increase/reallocate its bucket count), call set_max_load_factor with a very high value such as 100000.f. + +Similarly, you can control the bucket growth factor with the rehash_policy function. By default, when buckets reallocate, they reallocate to about twice their previous count. You can control that value as with the example code here: + +```cpp +hash_set hashSet; +hashSet.rehash_policy().mfGrowthFactor = 1.5f +``` + +### Cont.16 Which uses less memory, a map or a hash_map? + +A hash_map will virtually always use less memory. A hash_map will use an average of two pointers per stored element, while a map uses three pointers per stored element. + +### Cont.17 How do I write a custom hash function? + +You can look at the existing hash functions in functional.h, but we provide a couple examples here. + +To write a specific hash function for a Widget class, you would do this: + +```cpp +struct WidgetHash { + size_t operator()(const Widget& w) const + { return w.id; } +}; + +hash_set widgetHashSet; +``` + +To write a generic (templated) hash function for a set of similar classes (in this case that have an id member), you would do this: + +```cpp +template +struct GeneralHash { + size_t operator()(const T& t) const + { return t.id; } +}; + +hash_set > widgetHashSet; +hash_set > doggetHashSet; +``` + +### Cont.18 How do I write a custom compare function for a map or set? + +The sorted containers require that an operator< exist for the stored values or that the user provide a suitable custom comparison function. A custom can be implemented like so: + +```cpp +struct WidgetLess { + bool operator()(const Widget& w1, const Widget& w2) const + { return w.id < w2.id; } +}; + +set wSet; +``` + +It's important that your comparison function must be consistent in its behaviour, else the container will either be unsorted or a crash will occur. This concept is called "strict weak ordering." + +### Cont.19 How do I force my vector or string capacity down to the size of the container? + +You can simply use the set_capacity() member function which is present in both vector and string. This is a function that is not present in std STL vector and string functions. + +```cpp +eastl::vector x; +x.set_capacity(); // Shrink x's capacity to be equal to its size. + +eastl::vector x; +x.set_capacity(0); // Completely clear x. +``` + +To compact your vector or string in a way that would also work with std STL you need to do the following. + +How to shrink a vector's capacity to be equal to its size: + +```cpp +std::vector x; +std::vector(x).swap(x); // Shrink x's capacity. +``` + +How to completely clear a std::vector (size = 0, capacity = 0, no allocation): + +```cpp +std::vector x; +std::vector().swap(x); // Completely clear x. +``` + +### Cont.20 How do I iterate a container while (selectively) removing items from it? + +All EASTL containers have an erase function which takes an iterator as an argument and returns an iterator to the next item. Thus, you can erase items from a container while iterating it like so: + +```cpp +set intSet; + +set::iterator i = intSet.begin(); + +while(i != intSet.end()) +{ + if(*i & 1) // Erase all odd integers from the container. + i = intSet.erase(i); + else + ++i; +} +``` + +### Cont.21 How do I store a pointer in a container? + +The problem with storing pointers in containers is that clearing the container will not free the pointers automatically. There are two conventional resolutions to this problem: + +Manually free pointers when removing them from containers. +Store the pointer as a smart pointer instead of a "raw"pointer. +The advantage of the former is that it makes the user's intent obvious and prevents the possibility of smart pointer "thrashing" with some containers. The disadvantage of the former is that it is more tedicous and error-prone. + +The advantage of the latter is that your code will be cleaner and will always be error-free. The disadvantage is that it is perhaps slightly obfuscating and with some uses of some containers it can cause smart pointer thrashing, whereby a resize of a linear container (e.g. vector) can cause shared pointers to be repeatedly incremented and decremented with no net effect. + +It's important that you use a shared smart pointer and not an unshared one such as C++ auto_ptr, as the latter will result in crashes upon linear container resizes. Here we provide an example of how to create a list of smart pointers: + +```cpp +list< shared_ptr > wList; + +wList.push_back(shared_ptr(new Widget)); +wList.pop_back(); // The Widget will be freed. +``` + +### Cont.22 How do I make a union of two containers? difference? intersection? + +The best way to accomplish this is to sort your container (or use a sorted container such as set) and then apply the set_union, set_difference, or set_intersection algorithms. + +### Cont.23 How do I override the default global allocator? + +There are multiple ways to accomplish this. The allocation mechanism is defined in EASTL/internal/config.h and in allocator.h/cpp. Overriding the default global allocator means overriding these files, overriding what these files refer to, or changing these files outright. Here is a list of things you can do, starting with the simplest: + +* Simply provide the following versions of operator new (which EASTL requires, actually): +```cpp +void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line); +void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line); +``` +* Predefine the config.h macros for EASTLAlloc, EASTLFree, etc. See config.h for this. +* Override config.h entirely via EASTL_USER_CONFIG_HEADER. See config.h for this. +* Provide your own version of allocator.h/cpp +* Provide your own version of config.h. + +If you redefine the allocator class, you can make it work however you want. + +Note that config.h defines EASTLAllocatorDefault, which returns the default allocator instance. As documented in config.h, this is not a global allocator which implements all container allocations but is the allocator that is used when EASTL needs to allocate memory internally. There are very few cases where EASTL allocates memory internally, and in each of these it is for a sensible reason that is documented to behave as such. + +### Cont.24 How do I do trick X with the string container? + +There seem to be many things users want to do with strings. Perhaps the most commonly requested EASTL container extensions are string class shortcut functions. While some of these requests are being considered, we provide some shortcut functions here. + +**find_and_replace** + +```cpp +template +void find_and_replace(String& s, const typename String::value_type* pFind, const typename String::value_type* pReplace) +{ + for(size_t i; (i = source.find(pFind)) != T::npos; ) + s.replace(i, eastl::CharStrlen(pFind), pReplace); +} + +Example: + find_and_replace(s, "hello", "hola"); +``` + +**trim front (multiple chars)** + +```cpp +template +void trim_front(String& s, const typename String::value_type* pValues) +{ + s.erase(0, s.find_first_not_of(pValues)); +} + +Example: + trim_front(s, " \t\n\r"); +``` + +**trim back (multiple chars)** + +```cpp +template +void trim_front(String& s, const typename String::value_type* pValues) +{ + s.resize(s.find_last_not_of(pValues) + 1); +} + +Example: + trim_back(s, " \t\n\r"); +``` + +**prepend** + +```cpp +template +void prepend(String& s, const typename String::value_type* p) +{ + s.insert(0, p); +} + +Example: + prepend(s, "log: "); +``` + +**begins_with** + +```cpp +template +bool begins_with(const String& s, const typename String::value_type* p) +{ + return s.compare(0, eastl::CharStrlen(p), p) == 0; +} + +Example: + if(begins_with(s, "log: ")) ... +``` + +**ends_with** + +```cpp +template +bool ends_with(const String& s, const typename String::value_type* p) +{ + const typename String::size_type n1 = s.size(); + const typename String::size_type n2 = eastl::CharStrlen(p); + return ((n1 >= n2) && s.compare(n1 - n2, n2, p) == 0); +} + +Example: + if(ends_with(s, "test.")) ... +``` + +**tokenize** + +Here is a simple tokenization function that acts very much like the C strtok function. + +```cpp +template +size_t tokenize(const String& s, const typename String::value_type* pDelimiters, + String* resultArray, size_t resultArraySize) +{ + size_t n = 0; + typename String::size_type lastPos = s.find_first_not_of(pDelimiters, 0); + typename String::size_type pos = s.find_first_of(pDelimiters, lastPos); + + while((n < resultArraySize) && (pos != String::npos) || (lastPos != String::npos)) + { + resultArray[n++].assign(s, lastPos, pos - lastPos); + lastPos = s.find_first_not_of(pDelimiters, pos); + pos = s.find_first_of(pDelimiters, lastPos); + } + + return n; +} + +Example: + string resultArray[32]; +tokenize(s, " \t", resultArray, 32)); +``` + +### Cont.25 How do EASTL smart pointers compare to Boost smart pointers? + +EASTL's smart pointers are nearly identical to Boost (including all that crazy member template and dynamic cast functionality in shared_ptr), but are not using the Boost source code. EA legal has already stated that it is fine to have smart pointer classes with the same names and functionality as those present in Boost. EA legal specifically looked at the smart pointer classes in EASTL for this. There are two differences between EASTL smart pointers and Boost smart pointers: + +* EASTL smart pointers don't have thread safety built-in. It was deemed that this is too much overhead and that thread safety is something best done at a higher level. By coincidence the C++ library proposal to add shared_ptr also omits the thread safety feature. FWIW, I put a thread-safe shared_ptr in EAThread, though it doesn't attempt to do all the fancy member template things that Boost shared_ptr does. Maybe I'll add that some day if people care. +* EASTL shared_ptr object deletion goes through a deletion object instead of through a virtual function interface. 95% of the time this makes no difference (aside from being more efficient), but the primary case where it matters is when you have shared_ptr and assign to is something like "new Widget". The problem is that shared_ptr doesn't know what destructor to call and so doesn't call a destructor unless you specify a custom destructor object as part of the template specification. I don't know what to say about this one, as it is less safe, but forcing everybody to have the overhead of additional templated classes and virtual destruction functions doesn't seem to be in the spirit of high performance or lean game development. + +There is the possibility of making a shared_ptr_boost which is completely identical to Boost shared_ptr. So perhaps that will be done some day. + +### Cont.26 How do your forward-declare an EASTL container? + +Here is are some examples of how to do this: + +```cpp +namespace eastl +{ + template class basic_string; + typedef basic_string string8; // Forward declare EASTL's string8 type. + + template class vector; + typedef vector CharArray; + + template class hash_set; + + template class map; +} +``` + +The forward declaration can be used to declare a pointer or reference to such a class. It cannot be used to declare an instance of a class or refer to class data, static or otherwise. Nevertheless, forward declarations for pointers and references are useful for reducing the number of header files a header file needs to include. + +### Cont.27 How do I make two containers share a memory pool? + +EASTL (and std STL) allocators are specified by value semantics and not reference semantics. Value semantics is more powerful (because a value can also be a reference, but not the other way around), but is not always what people expects if they're used to writing things the other way. + +Here is some example code: + +```cpp +struct fixed_pool_reference +{ +public: + fixed_pool_reference() + { + mpFixedPool = NULL; + } + + fixed_pool_reference(eastl::fixed_pool& fixedPool) + { + mpFixedPool = &fixedPool; + } + + fixed_pool_reference(const fixed_pool_reference& x) + { + mpFixedPool = x.mpFixedPool; + } + + fixed_pool_reference& operator=(const fixed_pool_reference& x) + { + mpFixedPool = x.mpFixedPool; + return *this; + } + + void* allocate(size_t /*n*/, int /*flags*/ = 0) + { + return mpFixedPool->allocate(); + } + + void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0) + { + return mpFixedPool->allocate(); + } + + void deallocate(void* p, size_t /*n*/) + { + return mpFixedPool->deallocate(p); + } + + const char* get_name() const + { + return "fixed_pool_reference"; + } + + void set_name(const char* /*pName*/) + { + } + +protected: + friend bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b); + friend bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b); + + eastl::fixed_pool* mpFixedPool; +}; + +inline bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b) +{ + return (a.mpFixedPool == b.mpFixedPool); +} + +inline bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b) +{ + return (a.mpFixedPool != b.mpFixedPool); +} +``` + +Example usage of the above: + +```cpp +typedef eastl::list IntList; + +IntList::node_type buffer[2]; +eastl::fixed_pool myPool(buffer, sizeof(buffer), sizeof(Int::node_type), 2); + +IntList myList1(myPool); +IntList myList2(myPool); + +myList1.push_back(37); +myList2.push_back(39); +``` + +### Cont.28 Can I use a std (STL) allocator with EASTL? + +No. EASTL allocators are similar in interface to std STL allocators, but not 100% compatible. If it was possible to make them compatible with std STL allocators but also match the design of EASTL then compatibility would exist. The primary reasons for lack of compatibility are: + +* EASTL allocators have a different allocate function signature. +* EASTL allocators have as many as four extra required functions: ctor(name), get_name(), set_name(), allocate(size, align, offset). +* EASTL allocators have an additional allocate function specifically for aligned allocations, as listed directly above. + +### What are the requirements of classes stored in containers? + +Class types stored in containers must have: + +* a public copy constructor +* a public assignment operator +* a public destructor +* an operator < that compares two such classes (sorted containers only). +* an operator == that compares two such classes (hash containers only). + +Recall that the compiler generates basic versions these functions for you when you don't implement them yourself, so you can omit any of the above if the compiler-generated version is sufficient. + +For example, the following code will act incorrectly, because the user forgot to implement an assignment operator. The compiler-generated assignment operator will assign the refCount value, which the user doesn't want, and which will be called by the vector during resizing. + +```cpp +struct NotAPod +{ + NotAPod(const NotAPod&) {} // Intentionally don't copy the refCount + + int refCount; // refCounts should not be copied between NotAPod instances. +}; + +eastl::vector v; +``` + +## Algorithms + +### Algo.1 I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong? + +It may possible that you are seeing floating point roundoff problems. Many STL algorithms require object comparisons to act consistently. However, floating point values sometimes compare differently between uses because in one situation a value might be in 32 bit form in system memory, whereas in anther situation that value might be in an FPU register with a different precision. These are difficult problems to track down and aren't the fault of EASTL or whatever similar library you might be using. There are various solutions to the problem, but the important thing is to find a way to force the comparisons to be consistent. + +The code below was an example of this happening, whereby the object pA->mPos was stored in system memory while pB->mPos was stored in a register and comparisons were inconsistent and a crash ensued. + +```cpp +class SortByDistance : public binary_function +{ +private: + Vector3 mOrigin; + +public: + SortByDistance(Vector3 origin) { + mOrigin = origin; + } + + bool operator()(WorldTreeObject* pA, WorldTreeObject* pB) const { + return ((WorldObject*)pA)->mPos - mOrigin).GetLength() + < ((WorldObject*)pB)->mPos - mOrigin).GetLength(); + } +}; +``` + +### Algo.2 How do I write a comparison (operator<()) for a struct that contains two or more members? + +For a struct with two members such as the following: + +```cpp +struct X { + Blah m1; + Blah m2; +}; +``` + +You would write the comparison function like this: + +```cpp +bool operator<(const X& a, const X& b) { + return (a.m1 == b.m1) ? (a.m2 < b.m2) : (a.m1 < b.m1); +} +``` + +or, using only operator < but more instructions: + +```cpp +bool operator<(const X& a, const X& b) { + return (a.m1 < b.m1) || (!(b.m1 < a.m1) && (a.m2 < b.m2)); +} +``` + +For a struct with three members, you would have: + +```cpp +bool operator<(const X& a, const X& b) { + if(a.m1 != b.m1) + return (a.m1 < b.m1); + if(a.m2 != b.m2) + return (a.m2 < b.m2); + return (a.mType < b.mType); +} +``` + +And a somewhat messy implementation if you wanted to use only operator <. + +Note also that you can use the above technique to implement operator < for spatial types such as vectors, points, and rectangles. You would simply treat the members of the stuct as an array of values and ignore the fact that they have spatial meaning. All operator < cares about is that things order consistently. + +```cpp +bool operator<(const Point2D& a, const Point2D& b) { + return (a.x == b.x) ? (a.y < b.y) : (a.x < b.x); +} +``` + +### Algo.3 How do I sort something in reverse order? + +Normally sorting puts the lowest value items first in the sorted range. You can change this by simply reversing the comparison. For example: + +`sort(intVector.begin(), intVector.end(), greater());` + +It's important that you use operator > instead of >=. The comparison function must return false for every case where values are equal. + +### Algo.4 I'm getting errors about min and max while compiling. + +You need to define NOMINMAX under VC++ when this occurs, as it otherwise defines min and max macros that interfere. There may be equivalent issues with other compilers. Also, VC++ has a specific header file which defines min and max macros but which doesn't pay attention to NOMINMAX and so in that case there is nothing to do but not include that file or to undefine min and max. minmax.h is not a standard file and its min and max macros are not standard C or C++ macros or functions. + +### Algo.5 Why don't algorithms take a container as an argument instead of iterators? A container would be more convenient. + +Having algorithms that use containers instead of algorithms would reduce reduce functionality with no increase in performance. This is because the use of iterators allows for the application of algorithms to sub-ranges of containers and allows for the application of algorithms to containers aren't formal C++ objects, such as C-style arrays. + +Providing additional algorithms that use containers would introduce redundancy with respect to the existing algorithms that use iterators. + +### Algo.6 Given a container of pointers, how do I find an element by value (instead of by pointer)? + +Functions such as find_if help you find a T element in a container of Ts. But if you have a container of pointers such as vector, these functions will enable you to find an element that matches a given Widget* pointer, but they don't let you find an element that matches a given Widget object. + +You can write your own iterating 'for' loop and compare values, or you can use a generic function object to do the work if this is a common task: + +```cpp +template +struct dereferenced_equal +{ + const T& mValue; + + dereferenced_equal(const T& value) : mValue(value) { } + bool operator==(const T* pValue) const { return *pValue == mValue; } +}; + +... + +find_if(container.begin(), container.end(), dereferenced_equal(someWidget)); +``` + +### Algo.7 When do stored objects need to support operator < vs. when do they need to support operator ==? + +Any object which is sorted needs to have operator < defined for it, implicitly via operator < or explicitly via a user-supplied Compare function. Sets and map containers require operator <, while sort, binary search, and min/max algorithms require operator <. + +Any object which is compareed for equality needs to have operator == defined for it, implicitly via operator == or explicitly via a user-supplied BinaryPredicate function. Hash containers required operator ==, while many of the algorithms other than those mentioned above for operator < require operator ==. + +Some algorithms and containers require neither < nor ==. Interestingly, no algorithm or container requires both < and ==. + +### Algo.8 How do I sort via pointers or array indexes instead of objects directly? + +Pointers + +```cpp +vector toArray; +vector topArray; + +for(eastl_size_t i = 0; i < 32; i++) + toArray.push_back(TestObject(rng.RandLimit(20))); +for(eastl_size_t i = 0; i < 32; i++) // This needs to be a second loop because the addresses might change in the first loop due to container resizing. + topArray.push_back(&toArray[i]); + +struct TestObjectPtrCompare +{ + bool operator()(TestObject* a, TestObject* b) + { return a->mX < a->mX; } +}; + +quick_sort(topArray.begin(), topArray.end(), TestObjectPtrCompare()); +``` + +Array indexes + +```cpp +vector toArray; +vector toiArray; + +for(eastl_size_t i = 0; i < 32; i++) +{ + toArray.push_back(TestObject(rng.RandLimit(20))); + toiArray.push_back(i); +} + +struct TestObjectIndexCompare +{ + vector* mpArray; + + TestObjectIndexCompare(vector* pArray) : mpArray(pArray) { } + TestObjectIndexCompare(const TestObjectIndexCompare& x) : mpArray(x.mpArray){ } + TestObjectIndexCompare& operator=(const TestObjectIndexCompare& x) { mpArray = x.mpArray; return *this; } + + bool operator()(eastl_size_t a, eastl_size_t b) + { return (*mpArray)[a] < (*mpArray)[b]; } +}; + +quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray)); +``` + +Array indexes (simpler version using toArray as a global variable) + +```cpp +vector toArray; +vector toiArray; + +for(eastl_size_t i = 0; i < 32; i++) +{ + toArray.push_back(TestObject(rng.RandLimit(20))); + toiArray.push_back(i); +} + +struct TestObjectIndexCompare +{ + bool operator()(eastl_size_t a, eastl_size_t b) + { return toArray[a] < toArray[b]; } +}; + +quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray)); +``` + +## Iterators + +### Iter.1 What's the difference between iterator, const iterator, and const_iterator? + +An iterator can be modified and item it points to can be modified. +A const iterator cannot be modified, but the items it points to can be modified. +A const_iterator can be modified, but the items it points to cannot be modified. +A const const_iterator cannot be modified, nor can the items it points to. + +This situation is much like with char pointers: + +| Iterator type | Pointer equivalent | +|------|------| +| iterator | char* | +| const iterator | char* const | +| const_iterator | const char* | +| const const_iterator | const char* const | + +### Iter.2 How do I tell from an iterator what type of thing it is iterating? + +Use the value_type typedef from iterator_traits, as in this example + +```cpp +template +void DoSomething(Iterator first, Iterator last) +{ + typedef typename iterator_traits::value_type; + + // use value_type +} +``` + +### Iter.3 How do I iterate a container while (selectively) removing items from it? + +All EASTL containers have an erase function which takes an iterator as an argument and returns an iterator to the next item. Thus, you can erase items from a container while iterating it like so: + +```cpp +set intSet; +set::iterator i = intSet.begin(); + +while(i != intSet.end()) +{ + if(*i & 1) // Erase all odd integers from the container. + i = intSet.erase(i); + else + ++i; +} +``` + +### Iter.4 What is an insert_iterator? + +An insert_iterator is a utility class which is like an iterator except that when you assign a value to it, the insert_iterator inserts the value into the container (via insert()) and increments the iterator. Similarly, there are front_insert_iterator and back_insert_iterator, which are similar to insert_iterator except that assigning a value to them causes then to call push_front and push_back, respectively, on the container. These utilities may seem a slightly abstract, but they have uses in generic programming. + +---------------------------------------------- +End of document diff --git a/doc/Glossary.md b/doc/Glossary.md new file mode 100644 index 0000000..550209d --- /dev/null +++ b/doc/Glossary.md @@ -0,0 +1,93 @@ +# EASTL Glossary + +This document provides definitions to various terms related to EASTL. Items that are capitalized are items that are used as template parameters. + +| | | +|------|------| +| adapter | An adapter is something that encapsulates a component to provide another interface, such as a C++ class which makes a stack out of a list. | +| algorithm | Algorithms are standalone functions which manipulate data which usually but not necessarily comes from a container. Some algorithms change the data while others don't. Examples are reverse, sort, find, and remove. | +| associative container | An associative container is a variable-sized container that supports efficient retrieval of elements (values) based on keys. It supports insertion and removal of elements, but differs from a sequence in that it does not provide a mechanism for inserting an element at a specific position. Associative containers include map, multimap, set, multiset, hash_map, hash_multimap, hash_set, hash_multiset. | +| array | An array is a C++ container which directly implements a C-style fixed array but which adds STL container semantics to it. | +| basic_string | A templated string class which is usually used to store char or wchar_t strings. | +| begin | The function used by all conventional containers to return the first item in the container. | +| BidirectionalIterator | An input iterator which is like ForwardIterator except it can be read in a backward direction as well. | +| BinaryOperation  | A function which takes two arguments and returns a value (which will usually be assigned to a third object). | +| BinaryPredicate | A function which takes two arguments and returns true if some criteria is met (e.g. they are equal). | +| binder1st, binder2nd | These are function objects which convert one function object into another.  In particular, they implement a binary function whereby you can specify one of the arguments.This is a somewhat abstract concept but has its uses. | +| bit vector | A specialized container that acts like vector but is implemented via one bit per entry. STL vector is usually implemented as a bit vector but EASTL avoids this in favor of a specific bit vector container. | +| bitset | An extensible yet efficient implementation of bit flags. Not strictly a conventional STL container and not the same thing as vector or a bit_vector, both of which are formal iterate-able containers. | +| capacity | Refers to the amount of total storage available in an array-based container such as vector, string, and array. Capacity is always >= container size and is > size in order to provide extra space for a container to grow into. | +| const_iterator | An iterator whose iterated items are cannot be modified. A const_iterator is akin to a const pointer such as 'const char*'. | +| container | A container is an object that stores other objects (its elements), and that has methods for accessing its elements. In particular, every type that is a model of container has an associated iterator type that can be used to iterate through the container's elements. | +| copy constructor | A constructor for a type which takes another object of that type as its argument. For a hypothetical Widget class, the copy constructor is of the form Widget(const Widget& src); | +| Compare | A function which takes two arguments and returns the lesser of the two. | +| deque | The name deque is pronounced "deck" and stands for "double-ended queue."

A deque is very much like a vector: like vector, it is a sequence that supports random access to elements, constant time insertion and removal of elements at the end of the sequence, and linear time insertion and removal of elements in the middle.

The main way in which deque differs from vector is that deque also supports constant time insertion and removal of elements at the beginning of the sequence. Additionally, deque does not have any member functions analogous to vector's capacity() and reserve(), and does not provide the guarantees on iterator validity that are associated with those member functions. | +| difference_type | The typedef'd type used by all conventional containers and iterators to define the distance between two iterators. It is usually the same thing as the C/C++ ptrdiff_t data type. | +| empty | The function used by all conventional containers to tell if a container has a size of zero. In many cases empty is more efficient than checking for size() == 0. | +| element | An element refers to a member of a container. | +| end | The function used by all conventional containers to return one-past the last item in the container. | +| equal_range | equal_range is a version of binary search: it attempts to find the element value in an ordered range [first, last). The value returned by equal_range is essentially a combination of the values returned by lower_bound and upper_bound: it returns a pair of iterators i and j such that i is the first position where value could be inserted without violating the ordering and j is the last position where value could be inserted without violating the ordering. It follows that every element in the range [i, j) is equivalent to value, and that [i, j) is the largest subrange of [first, last) that has this property. | +| explicit instantiation | Explicit instantiation lets you create an instantiation of a templated class or function without actually using it in your code. Since this is useful when you are creating library files that use templates for distribution, uninstantiated template definitions are not put into object files. An example of the syntax for explicit instantiation is:
`template class vector;`
`template void min(int, int);`
`template void min(int, int);` | +| ForwardIterator | An input iterator which is like InputIterator except it can be reset back to the beginning. | +| Function | A function which takes one argument and applies some operation to the target. | +| function object, functor | A function object or functor is a class that has the function-call operator (operator()) defined. | +| Generator | A function which takes no arguments and returns a value (which will usually be assigned to an object). | +| hash_map, hash_multimap, hash_set, hash_multiset | The hash containers are implementations of map, multimap, set, and multiset via a hashtable instead of via a tree. Searches are O(1) (fast) but the container is not sorted. | +| heap | A heap is a data structure which is not necessarily sorted but is organized such that the highest priority item is at the top. A heap is synonymous with a priority queue and has numerous applications in computer science. | +| InputIterator | An input iterator (iterator you read from) which allows reading each element only once and only in a forward direction. | +| intrusive_list, intrusive_hash_map, etc. | Intrusive containers are containers which don't allocate memory but instead use their contained object to manage the container's memory. While list allocates nodes (with mpPrev/mpNext pointers) that contain the list items, intrusive_list doesn't allocate nodes but instead the container items have the mpPrev/mpNext pointers. | +| intrusive_ptr | intrusive_ptr is a smart pointer which doesn't allocate memory but instead uses the contained object to manage lifetime via addref and release functions. | +| iterator | An iterator is the fundamental entity of reading and enumerating values in a container. Much like a pointer can be used to walk through a character array, an iterator is used to walk through a linked list. | +| iterator category | An iterator category defines the functionality the iterator provides. The conventional iterator categories are InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator, and OutputIterator. See the definitions of each of these for more information.Iterator category is synonymous with iterator_tag. | +| iterator_tag | See iterator category. | +| key_type, Key | A Key or key_type is the identifier used by associative (a.k.a. dictionary) containers (e.g. map, hash_map) to identify the type used to index the mapped_type. If you have a dictionary of strings that you access by an integer id, the ids are the keys and the strings are the mapped types. | +| lexicographical compare | A lexicographical compare is a comparison of two containers that compares them element by element, much like the C strcmp function compares two strings. | +| linked_ptr | A linked_ptr is a shared smart pointer which implements object lifetime via a linked list of all linked_ptrs that are referencing the object. linked_ptr, like intrusive_ptr, is a non-memory-allocating alternative to shared_ptr. | +| list | A list is a doubly linked list. It is a sequence that supports both forward and backward traversal, and (amortized) constant time insertion and removal of elements at the beginning or the end, or in the middle. Lists have the important property that insertion and splicing do not invalidate iterators to list elements, and that even removal invalidates only the iterators that point to the elements that are removed. The ordering of iterators may be changed (that is, list::iterator might have a different predecessor or successor after a list operation than it did before), but the iterators themselves will not be invalidated or made to point to different elements unless that invalidation or mutation is explicit. | +| lower_bound | lower_bound is a version of binary search: it attempts to find the element value in an ordered range [first, last). Specifically, it returns the first position where value could be inserted without violating the ordering. | +| map | Map is a sorted associative container that associates objects of type Key with objects of type T. Map is a pair associative container, meaning that its value type is pair. It is also a unique associative container, meaning that no two elements have the same key. It is implemented with a tree structure. | +| mapped_type | A mapped_type is a typedef used by associative containers to identify the container object which is accessed by a key. If you have a dictionary of strings that you access by an integer id, the ids are the keys and the strings are the mapped types. | +| member template | A member template is a templated function of a templated class. Thus with a member template function there are two levels of templating -- the class and the function. | +| multimap,  | Multimap is a sorted associative container that associates objects of type Key with objects of type T. multimap is a pair associative container, meaning that its value type is pair. It is also a multiple associative container, meaning that there is no limit on the number of elements with the same key.It is implemented with a tree structure. | +| multiset | Multiset is a sorted associative container that stores objects of type Key. Its value type, as well as its key type, is Key. It is also a multiple associative container, meaning that two or more elements may be identical. It is implemented with a tree structure. | +| node | A node is a little holder class used by many containers to hold the contained items. A linked-list, for example, defines a node which has three members: mpPrev, mpNext, and T (the contained object). | +| npos | npos is used by the string class to identify a non-existent index. Some string functions return npos to indicate that the function failed. | +| rel_ops | rel_ops refers to "relational operators" and is a set of templated functions which provide operator!= for classes that  have only operator== and provide operator > for classes that have only operator <, etc. Unfortunately, rel_ops have a habit of polluting the global operator space and creating conflicts. They must be used with discretion. | +| reverse_iterator | A reverse_iterator is an iterator which wraps a bidirectional or random access iterator and allows the iterator to be read in reverse direction. The difference between using reverse_iterators and just decrementing regular iterators is that reverse_iterators use operator++ to move backwards and thus work in any algorithm that calls ++ to move through a container. | +| OutputIterator | An output iterator (iterator you write to) which allows writing each element only once in only in a forward direction. | +| POD | POD means Plain Old Data. It refers to C++ classes which act like built-in types and C structs. These are useful to distinguish because some algorithms can be made more efficient when they can detect that they are working with PODs instead of regular classes.  | +| Predicate | A function which takes one argument returns true if the argument meets some criteria. | +| priority_queue | A priority_queue is an adapter container which implements a heap via a random access container such as vector or deque. | +| queue | A queue is an adapter container which implements a FIFO (first-in, first-out) container with which you can add items to the back and get items from the front. | +| RandomAccessIterator | An input iterator which can be addressed like an array. It is a superset of all other input iterators. | +| red-black tree | A red-black tree is a binary tree which has the property of being always balanced. The colors red and black are somewhat arbitrarily named monikers for nodes used to measure the balance of the tree. Red-black trees are considered the best all-around data structure for sorted containers. | +| scalar | A scalar is a data type which is implemented via a numerical value. In C++ this means integers, floating point values, enumerations, and pointers.  | +| scoped_ptr | A scoped_ptr is a smart pointer which is the same as C++ auto_ptr except that it cannot be copied. | +| set | Set is a sorted associative container that stores objects of type Key. Its value type, as well as its key type, is Key. It is also a unique associative container, meaning that no two elements are the same.It is implemented with a tree structure. | +| sequence | A sequence is a variable-sized container whose elements are arranged in a strict linear (though not necessarily contiguous) order. It supports insertion and removal of elements. Sequence containers include vector, deque, array, list, slist. | +| size | All conventional containers have a size member function which returns the count of elements in the container. The efficiency of the size function differs between containers. | +| size_type | The type that a container uses to define its size and counts. This is similar to the C/C++ size_t type but may be specialized for the container. | +| skip list | A skip-list is a type of container which is an alternative to a binary tree for finding data. | +| shared_ptr | A shared_ptr is a smart pointer which allows multiple references (via multiple shared_ptrs) to the same object. When the last shared_ptr goes away, the pointer is freed. shared_ptr is implemented via a shared count between all instances. | +| slist | An slist is like a list but is singly-linked instead of doubly-linked. It can only be iterated in a forward-direction. | +| smart pointer | Smart pointer is a term that identifies a family of utility classes which store pointers and free them when the class instance goes out of scope. Examples of smart pointers are shared_ptr, linked_ptr, intrusive_ptr, and scoped_ptr. | +| splice | Splicing refers to the moving of a subsequence of one Sequence into another Sequence. | +| stack | A stack is a adapter container which implements LIFO (last-in, first, out) access via another container such as a list or deque. | +| STL | Standard Template Library.  | +| StrictWeakOrdering | A BinaryPredicate that compares two objects, returning true if the first precedes the second. Like Compare but has additional requirements. Used for sorting routines.

This predicate must satisfy the standard mathematical definition of a strict weak ordering. A StrictWeakOrdering has to behave the way that "less than" behaves: if a is less than b then b is not less than a, if a is less than b and b is less than c then a is less than c, and so on. | +| string | See basic_string. | +| T | T is the template parameter name used by most containers to identify the contained element type.  | +| template parameter | A template parameter is the templated type used to define a template function or class. In the declaration 'template class vector{ },'  T is a template parameter. | +| template specialization | A template specialization is a custom version of a template which overrides the default version and provides alternative functionality, often for the purpose of providing improved or specialized functionality. | +| treap | A tree-like structure implemented via a heap. This is an alternative to a binary tree (e.g. red-black tree), skip-list, and sorted array as a mechanism for a fast-access sorted container. | +| type traits | Type traits are properties of types. If you have a templated type T and you want to know if it is a pointer, you would use the is_pointer type trait. If you want to know if the type is a POD, you would use the is_pod type trait. Type traits are very useful for allowing the implementation of optimized generic algorithms and for asserting that types have properties expected by the function or class contract. For example, you can use type_traits to tell if a type can be copied via memcpy instead of a slower element-by-element copy. | +| typename | Typename is a C++ keyword used in templated function implementations which identifies to the compiler that the following expression is a type and not a value. It is used extensively in EASTL, particularly in the algorithms. | +| UnaryOperation | A function which takes one argument and returns a value (which will usually be assigned to second object). | +| upper_bound | upper_bound is a version of binary search: it attempts to find the element value in an ordered range [first, last). Specifically, it returns the last position where value could be inserted without violating the ordering. | +| value_type, Value | A value_type is a typedef used by all containers to identify the elements they contain. In most cases value_type is simply the same thing as the user-supplied T template parameter. The primary exception is the associative containers whereby value_type is the pair of key_type and mapped_type. | +| vector | A vector is a Sequence that supports random access to elements, constant time insertion and removal of elements at the end, and linear time insertion and removal of elements at the beginning or in the middle. The number of elements in a vector may vary dynamically; memory management is automatic. Vector is the simplest of the container classes, and in many cases the most efficient. | +| vector_map, vector_multimap, vector_set, vector_multiset | These are containers that implement the functionality of map, multimap, set, and multiset via a vector or deque instead of a tree. They use less memory and find items faster, but are slower to modify and modification invalidates iterators. | +| weak_ptr | A weak_ptr is an adjunct to shared_ptr which doesn't increment the reference on the contained object but can safely tell you if the object still exists and access it if so. It has uses in preventing circular references in shared_ptrs. | + +---------------------------------------------- +End of document diff --git a/doc/Gotchas.md b/doc/Gotchas.md new file mode 100644 index 0000000..aefe362 --- /dev/null +++ b/doc/Gotchas.md @@ -0,0 +1,134 @@ +# EASTL Gotchas + +There are some cases where the EASTL design results in "gotchas" or behavior that isn't necessarily what the new user would expect. These are all situations in which this behavior may be undesirable. One might ask, "Why not change EASTL to make these gotchas go away?" The answer is that in each case making the gotchas go away would either be impossible or would compromise the functionality of the library. + +## Summary + +The descriptions here are intentionally terse; this is to make them easier to visually scan. + +1. [map::operator[] can create elements.](#mapoperator-can-create-elements) +2. [char* converts to string silently.](#char-converts-to-string-silently) +3. [char* is compared by ptr and not by contents.](#char-is-compared-by-ptr-and-not-by-contents) +4. [Iterators can be invalidated by container mutations.](#iterators-can-be-invalidated-by-container-mutations) +5. [Vector resizing may cause ctor/dtor cascades.](#vector-resizing-may-cause-ctordtor-cascades) +6. [Vector and string insert/push_back/resize can reallocate.](#vector-and-string-insertpush_backresize-can-reallocate) +7. [Deriving from containers may not work.](#deriving-from-containers-may-not-work) +8. [set::iterator is const_iterator.](#setiterator-is-const_iterator) +9. [Inserting elements means copying by value.](#inserting-elements-means-copying-by-value) +10. [Containers of pointers can leak if you aren't careful.](#containers-of-pointers-can-leak-if-you-arent-careful) +11. [Containers of auto_ptrs can crash.](#containers-of-auto_ptrs-can-crash) +12. [Remove algorithms don't actually remove elements.](#remove-algorithms-dont-actually-remove-elements) +13. [list::size() is O(n).](#listsize-is-on) +14. [vector and deque::size() may incur integer division.](#vector-and-dequesize-may-incur-integer-division) +15. [Be careful making custom Compare functions.](#be-careful-making-custom-compare-functions) +16. [Comparisons involving floating point are dangerous.](#comparisons-involving-floating-point-are-dangerous) +17. [Writing beyond string::size and vector::size is dangerous.](#writing-beyond-stringsize-and-vectorsize-is-dangerous) +18. [Container operator=() doesn't copy allocators.](#container-operator-doesnt-copy-allocators) + +## Detail + +### map::operator[] can create elements. + +By design, map operator[] creates a value for you if it isn't already present. The reason for this is that the alternative behavior would be to throw an exception, and such behavior isn't desirable. The resolution is to simply use the map::find function instead of operator[]. + +### char* converts to string silently. + +The string class has a non-explicit constructor that takes char* as an argument. Thus if you pass char* to a function that takes a string object, a temporary string will be created. In some cases this is undesirable behavior but the user may not notice it right away, as the compiler gives no warnings. The reason that the string constructor from char* is not declared explicit is that doing so would prevent the user from expressions such as: string s = "hello". In this example, no temporary string object is created, but the syntax is not possible if the char* constructor is declared explicit. Thus a decision to make the string char* constructor explicit involves tradeoffs. + +There is an EASTL configuration option called EASTL_STRING_EXPLICIT which makes the string char* ctor explicit and avoids the behaviour described above. + +### char* is compared by ptr and not by contents. + +If you have a set of strings declared as set, the find function will compare via the pointer value and not the string contents. The workaround is to make a set of string objects or, better, to supply a custom string comparison function to the set. The workaround is not to declare a global operator< for type char*, as that could cause other systems to break. + +### Iterators can be invalidated by container mutations + +With some containers, modifications of them may invalidate iterators into them. With other containers, modifications of them only an iterator if the modification involves the element that iterator refers to. Containers in the former category include vector, deque, basic_string (string), vector_map, vector_multimap, vector_set, and vector_multiset. Containers in the latter category include list, slist, map, multimap, multiset, all hash containers, and all intrusive containers. + +### Vector resizing may cause ctor/dtor cascades. + +If elements are inserted into a vector in middle of the sequence, the elements from the insertion point to the end will be copied upward. This will necessarily cause a series of element constructions and destructions as the elements are copied upward. Similarly, if an element is appended to a vector but the vector capacity is exhausted and needs to be reallocated, the entire vector will undergo a construction and destruction pass as the values are copied to the new storage. This issue exists for deque as well, though to a lesser degree. For vector, the resolution is to reserve enough space in your vector to prevent such reallocation. For deque the resolution is to set its subarray size to enough to prevent such reallocation. Another solution that can often be used is to take advantage of the has_trivial_relocate type trait, which can cause such moves to happen via memcpy instead of via ctor/dtor calls. If your class can be safely memcpy'd, you can use EASTL_DECLARE_TRIVIAL_RELOCATE to tell the compiler it can be memcpy'd. Note that built-in scalars (e.g. int) already are automatically memcpy'd by EASTL. + +### Vector and string insert/push_back/resize can reallocate. + +If you create an empty vector and use push_back to insert 100 elements, the vector will reallocate itself at least three or four times during the operation. This can be an undesirable thing. The best thing to do if possible is to reserve the size you will need up front in the vector constructor or before you add any elements. + +### Deriving from containers may not work. + +EASTL containers are not designed with the guarantee that they can be arbitrarily subclassed. This is by design and is done for performance reasons, as such guarantees would likely involve making containers use virtual functions. However, some types of subclassing can be successful and EASTL does such subclassing internally to its advantage. The primary problem with subclassing results when a parent class function calls a function that the user wants to override. The parent class cannot see the overridden function and silent unpredictable behavior will likely occur. If your derived container acts strictly as a wrapper for the container then you will likely be able to successfully subclass it. + +### set::iterator is const_iterator. + +The reason this is so is that a set is an ordered container and changing the value referred to by an iterator could make the set be out of order. Thus, set and multiset iterators are always const_iterators. If you need to change the value and are sure the change will not alter the container order, use const_cast or declare mutable member variables for your contained object. This resolution is the one blessed by the C++ standardization committee. This issue is addressed in more detail in the EASTL FAQ. + +### Inserting elements means copying by value. + +When you insert an element into a (non-intrusive) container, the container makes a copy of the element. There is no provision to take over ownership of an object from the user. The exception to this is of course when you use a container of pointers instead of a container of values. See the entry below regarding containers of pointers. Intrusive containers (e.g. intrusive_list) do in fact take over the user-provided value, and thus provide another advantage over regular containers in addition to avoiding memory allocation. + +### Containers of pointers can leak if you aren't careful. + +Containers of points don't know or care about the possibility that the pointer may have been allocated and need to be freed. Thus if you erase such elements from a container they are not freed. The resolution is to manually free the pointers when removing them or to instead use a container of smart pointers (shared smart pointers, in particular). This issue is addressed in more detail in the EASTL FAQ and the auto_ptr-related entry below. + +### Containers of auto_ptrs can crash + +We suggested above that the user can use a container of smart pointers to automatically manage contained pointers. However, you don't want to use auto_ptr, as auto_ptrs cannot be safely assigned to each other; doing so results in a stale pointer and most likely a crash. + +### Remove algorithms don't actually remove elements. + +Algorithms such as remove, remove_if, remove_heap, and unique do not erase elements from the sequences they work on. Instead, they return an iterator to the new end of the sequence and the user must call erase with that iterator in order to actually remove the elements from the container. This behavior exists because algorithms work on sequences via iterators and don't know how to work with containers. Only the container can know how to best erase its own elements. In each case, the documentation for the algorithm reminds the user of this behavior. Similarly, the copy algorithm copies elements from one sequence to another and doesn't modify the size of the destination sequence. So the destination must hold at least as many items as the source, and if it holds more items, you may want to erase the items at the end after the copy. + +### list::size() is O(n). + +By this we mean that calling size() on a list will iterate the list and add the size as it goes. Thus, getting the size of a list is not a fast operation, as it requires traversing the list and counting the nodes. We could make list::size() be fast by having a member mSize variable. There are reasons for having such functionality and reasons for not having such functionality. We currently choose to not have a member mSize variable as it would add four bytes to the class, add processing to functions such as insert and erase, and would only serve to improve the size function, but no other function. The alternative argument is that the C++ standard states that std::list should be an O(1) operation (i.e. have a member size variable), most C++ standard library list implementations do so, the size is but an integer which is quick to update, and many users expect to have a fast size function. All of this applies to slist and intrusive_list as well. + +Note that EASTL's config.h file has an option in it to cause list and slist to cache their size with an mSize variable and thus make size() O(1). This option is disabled by default. + +### vector and deque::size() may incur integer division. + +Some containers (vector and deque in particular) calculate their size by pointer subtraction. For example, the implementation of vector::size() is 'return mpEnd - mpBegin'. This looks like a harmless subtraction, but if the size of the contained object is not an even power of two then the compiler will likely need to do an integer division to calculate the value of the subtracted pointers. One might suggest that vector use mpBegin and mnSize as member variables instead of mpBegin and mpEnd, but that would incur costs in other vector operations. The suggested workaround is to iterate a vector instead of using a for loop and operator[] and for those cases where you do use a for loop and operator[], get the size once at the beginning of the loop instead of repeatedly during the condition test. + +### Be careful making custom Compare functions. + +A Compare function compares two values and returns true if the first is less than the second. This is easy to understand for integers and strings, but harder to get right for more complex structures. Many a time have people decided to come up with a fancy mechanism for comparing values and made mistakes. The FAQ has a couple entries related to this. See http://blogs.msdn.com/oldnewthing/archive/2003/10/23/55408.aspx for a story about how this can go wrong by being overly clever. + +### Comparisons involving floating point are dangerous. + +Floating point comparisons between two values that are very nearly equal can result in inconsistent results. Similarly, floating point comparisons between NaN values will always generate inconsistent results, as NaNs by definition always compare as non-equal. You thus need to be careful when using comparison functions that work with floating point values. Conversions to integral values may help the problem, but not necessarily. + +### Writing beyond string::size and vector::size is dangerous. + +A trick that often comes to mind when working with strings is to set the string capacity to some maximum value, strcpy data into it, and then resize the string when done. This can be done with EASTL, but only if you resize the string to the maximum value and not reserve the string to the maximum value. The reason is that when you resize a string from size (n) to size (n + count), the count characters are zeroed and overwrite the characters that you strcpyd. + +The following code is broken: + +```cpp +string mDataDir; + + + mDataDir.reserve(kMaxPathLength); // reserve + strcpy(&mDataDir[0], "blah/blah/blah"); + +mDataDir.resize(strlen(&mDataDir[0])); // Overwrites your blah/... with 00000... +``` + +This following code is OK: + +```cpp +string mDataDir; + + + mDataDir.resize(kMaxPathLength); // resize + strcpy(&mDataDir[0], "blah/blah/blah"); + +mDataDir.resize(strlen(&mDataDir[0])); +``` + +### Container operator=() doesn't copy allocators. + +EASTL container assignment (e.g. vector::operator=(const vector&)) doesn't copy the allocator. There are good and bad reasons for doing this, but that's how it acts. So you need to beware that you need to assign the allocator separately or make a container subclass which overrides opeator=() and does this. + +---------------------------------------------- +End of document + + + diff --git a/doc/Introduction.md b/doc/Introduction.md new file mode 100644 index 0000000..9fa8188 --- /dev/null +++ b/doc/Introduction.md @@ -0,0 +1,18 @@ +# EASTL Introduction + +EASTL stands for Electronic Arts Standard Template Library. It is a C++ template library of containers, algorithms, and iterators useful for runtime and tool development across multiple platforms. It is a fairly extensive and robust implementation of such a library and has an emphasis on high performance above all other considerations. + +## Intended Audience + +This is a short document intended to provide a basic introduction to EASTL for those new to the concept of EASTL or STL. If you are familiar with the C++ STL or have worked with other templated container/algorithm libraries, you probably don't need to read this. If you have no familiarity with C++ templates at all, then you probably will need more than this document to get you up to speed. In this case you need to understand that templates, when used properly, are powerful vehicles for the ease of creation of optimized C++ code. A description of C++ templates is outside the scope of this documentation, but there is plenty of such documentation on the Internet. See the EASTL FAQ.html document for links to information related to learning templates and STL. + +## EASTL Modules + +EASTL consists primarily of containers, algorithms, and iterators. An example of a container is a linked list, while an example of an algorithm is a sort function; iterators are the entities of traversal for containers and algorithms. EASTL containers a fairly large number of containers and algorithms, each of which is a very clean, efficient, and unit-tested implementation. We can say with some confidence that you are not likely to find better implementations of these (commercial or otherwise), as these are the result of years of wisdom and diligent work. For a detailed list of EASTL modules, see EASTL Modules.html. + +## EASTL Suitability + +What uses are EASTL suitable for? Essentially any situation in tools and shipping applications where the functionality of EASTL is useful. Modern compilers are capable of producing good code with templates and many people are using them in both current generation and future generation applications on multiple platforms from embedded systems to servers and mainframes. + +---------------------------------------------- +End of document \ No newline at end of file diff --git a/doc/Maintenance.md b/doc/Maintenance.md new file mode 100644 index 0000000..82bdb80 --- /dev/null +++ b/doc/Maintenance.md @@ -0,0 +1,195 @@ +# EASTL Maintenance + +## Introduction + +The purpose of this document is to provide some necessary background for anybody who might do work on EASTL. Writing generic templated systems like EASTL can be surprisingly tricky. There are numerous details of the C++ language that you need to understand which don't usually come into play during the day-to-day C++ coding that many people do. It is easy to make a change to some function that seems proper and works for your test case but either violates the design expectations or simply breaks under other circumstances. + +It may be useful to start with an example. Here we provide an implementation of the count algorithm which is seems simple enough. Except it is wrong and while it will compile in some cases it won't compile in others: + +```cpp +int count(InputIterator first, InputIterator last, const T& value) +{ +     int result = 0; + +     for(; first < last; ++first){ +         if(*first == value) +             ++result; +     } + +     return result; + } + ``` + +The problem is with the comparison 'first < last'. The count algorithm takes an InputIterator and operator< is not guaranteed to exist for any given InputIterator (and indeed while operator< exists for vector::iterator, it doesn't exist for list::iterator). The comparison in the above algorithm must instead be implemented as 'first != last'. If we were working with a RandomAccessIterator then 'first < last' would be valid. + +In the following sections we cover various topics of interest regarding the development and maintentance of EASTL. Unfortunately, this document can't cover every aspect of EASTL maintenance issues, but at least it should give you a sense of the kinds of issues. + +## C++ Language Standard + +First and foremost, you need to be familiar with the C++ standard. In particular, the sections of the standard related to containers, algorithms, and iterators are of prime significance. We'll talk about some of this in more detail below. Similarly, a strong understanding of the basic data types is required. What is the difference between ptrdiff_t and intptr_t; unsigned int and size_t; char and signed char? + +In addition to the C++ language standard, you'll want to be familiar with the C++ Defect Report. This is a continuously updated document which lists flaws in the original C++ language specification and the current thinking as the resolutions of those flaws. You will notice various references to the Defect Report in EASTL source code. + +Additionally, you will want to be familiar with the C++ Technical Report 1 (as of this writing there is only one). This document is the evolving addendum to the C++ standard based on both the Defect Report and based on desired additions to the C++ language and standard library. + +Additionally, you will probably want to have some familiarity with Boost. It also helps to keep an eye on comp.std.c++ Usenet discussions. However, watch out for what people say on Usenet. They tend to defend GCC, Unix, std STL, and C++ to a sometimes unreasonable degree. Many discussions ignore performance implications and concentrate only on correctness and sometimes academic correctness above usability. + +## Language Use + +Macros are (almost) not allowed in EASTL. A prime directive of EASTL is to be easier to read by users and most of the time macros are an impedence to this. So we avoid macros at all costs, even if it ends up making our development and maintenance more difficult. That being said, you will notice that the EASTL config.h file uses macros to control various options. This is an exception to the rule; when we talk about not using macros, we mean with the EASTL implementation itself. + +EASTL assumes a compliant and intelligent C++ compiler, and thus all language facilities are usable. However, we nevertheless choose to stay away from some language functionality. The primary language features we avoid are: + +* RTTI (run-time-type-identification) (this is deemed too costly) +* Template export (few compilers support this) +* Exception specifications (most compilers ignore them) + +Use of per-platform or per-compiler code should be avoided when possible but where there is a significant advantage to be gained it can and indeed should be used. An example of this is the GCC __builtin_expect feature, which allows the user to give the compiler a hint about whether an expression is true or false. This allows for the generation of code that executes faster due to more intelligent branch prediction. + +## Prime Directives + +The implementation of EASTL is guided foremost by the following directives which are listed in order of importance. + +1. Efficiency (speed and memory usage) +2. Correctness (doesn't have bugs) +3. Portability (works on all required platforms with minimal specialized code) +4. Readability (code is legible and comments are present and useful) + +Note that unlike commercial STL implementations which must put correctness above all, we put a higher value on efficiency. As a result, some functionality may have some usage limitation that is not present in other similar systems but which allows for more efficient operation, especially on the platforms of significance to us. + +Portability is significant, but not critical. Yes, EASTL must compile and run on all platforms that we will ship games for. But we don't take that to mean under all compilers that could be conceivably used for such platforms. For example, Microsoft VC6 can be used to compile Windows programs, but VC6's C++ support is too weak for EASTL and so you simply cannot use EASTL under VC6. + +Readability is something that EASTL achieves better than many other templated libraries, particularly Microsoft STL and STLPort. We make every attempt to make EASTL code clean and sensible. Sometimes our need to provide optimizations (particularly related to type_traits and iterator types) results in less simple code, but efficiency happens to be our prime directive and so it overrides all other considerations. + +## Coding Conventions + +Here we provide a list of coding conventions to follow when maintaining or adding to EASTL, starting with the three language use items from above: + +* No RTTI use. +* No use of exception specifications (e.g. appending the 'throw' declarator to a function). +* No use of exception handling itself except where explicitly required by the implementation (e.g. vector::at). +* Exception use needs to savvy to EASTL_EXCEPTIONS_ENABLED. +* No use of macros (outside of config.h). Macros make things more difficult for the user. +* No use of static or global variables. +* No use of global new, delete, malloc, or free. All memory must be user-specifyable via an Allocator parameter (default-specified or explicitly specified). +* Containers use protected member data and functions as opposed to private. This is because doing so allows subclasses to extend the container without the creation of intermediary functions. Recall from our [prime directives](#Prime_Directives) above that performance and simplicity overrule all. +* No use of multithreading primitives.  +* No use of the export keyword. +* We don't have a rule about C-style casts vs. C++ static_cast<>, etc. We would always use static_cast except that debuggers can't evaluate them and so in practice they can get in the way of debugging and tracing. However, if the cast is one that users don't tend to need to view in a debugger, C++ casts are preferred. +* No external library dependencies whatsoever, including standard STL. EASTL is dependent on only EABase and the C++ compiler.  +* All code must be const-correct. This isn't just for readability -- compilation can fail unless const-ness is used correctly everywhere.  +* Algorithms do not refer to containers; they refer only to iterators. +* Algorithms in general do not allocate memory. If such a situation arises, there should be a version of the algorithm which allows the user to provide the allocator. +* No inferior implementations. No facility should be added to EASTL unless it is of professional quality. +* The maintainer should emulate the EASTL style of code layout, regardless of the maintainer's personal preferences. When in Rome, do as the Romans do. EASTL uses 4 spaces for indents, which is how the large majority of code within EA is written. +* No major changes should be done without consulting a peer group. + +## Compiler Issues + +Historically, templates are the feature of C++ that has given C++ compilers the most fits. We are still working with compilers that don't completely and properly support templates. Luckily, most compilers are now good enough to handle what EASTL requires. Nevertheless, there are precautions we must take. + +It turns out that the biggest problem in writing portable EASTL code is that VC++ allows you to make illegal statements which are not allowed by other compilers. For example, VC++ will allow you to neglect using the typename keyword in template references, whereas GCC (especially 3.4+) requires it. + +In order to feel comfortable that your EASTL code is C++ correct and is portable, you must do at least these two things: + +* Test under at least VS2005, GCC 3.4+, GCC 4.4+, EDG, and clang. +* Test all functions that you write, as compilers will often skip the compilation of a template function if it isn't used. + +The two biggest issues to watch out for are 'typename' and a concept called "dependent names". In both cases VC++ will accept non-conforming syntax whereas most other compilers will not. Whenever you reference a templated type (and not a templated value) in a template, you need to prefix it by 'typename'. Whenever your class function refers to a base class member (data or function), you need to refer to it by "this->", "base_type::", or by placing a "using" statement in your class to declare that you will be referencing the given base class member. + +## Iterator Issues + +The most important thing to understand about iterators is the concept of iterator types and their designated properties. In particular, we need to understand the difference between InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator, and OutputIterator. These differences dictate both how we implement our algorithms and how we implement our optimizations. Please read the C++ standard for a reasonably well-implemented description of these iterator types. + +Here's an example from EASTL/algorithm.h which demonstrates how we use iterator types to optimize the reverse algorithm based on the kind of iterator passed to it: + +```cpp +template +inline void reverse_impl(BidirectionalIterator first, BidirectionalIterator last, bidirectional_iterator_tag) +{ + for(; (first != last) && (first != --last); ++first) // We are not allowed to use operator <, <=, >, >= with + iter_swap(first, last); // a generic (bidirectional or otherwise) iterator. +} + + +template +inline void reverse_impl(RandomAccessIterator first, RandomAccessIterator last, random_access_iterator_tag) +{ + for(; first < --last; ++first) // With a random access iterator, we can use operator < to more efficiently implement + iter_swap(first, last); // this algorithm. A generic iterator doesn't necessarily have an operator < defined. +} + + +template +inline void reverse(BidirectionalIterator first, BidirectionalIterator last) +{ + typedef typename iterator_traits::iterator_category IC; + reverse_impl(first, last, IC()); +} +``` + +## Exception Handling + +You will notice that EASTL uses try/catch in some places (particularly in containers) and uses the EASTL_EXCEPTIONS_ENABLED define. For starters, any EASTL code that uses try/catch should always be wrapped within #if EASTL_EXCEPTIONS_ENABLED (note: #if, not #ifdef). + +This is simple enough, but what you may be wondering is how it is that EASTL decides to use try/catch for some sections of code and not for others. EASTL follows the C++ standard library conventions with respect to exception handling, and you will see similar exception handling in standard STL. The code that you need to wrap in try/catch is code that can throw a C++ exception (not to be confused with CPU exception) and needs to have something unwound (or fixed) as a result. The important thing is that the container be in a valid state after encountering such exceptions. In general the kinds of things that require such try/catch are: + +* Memory allocation failures (which throw exceptions) +* Constructor exceptions + +Take a look at the cases in EASTL where try/catch is used and see what it is doing. + +## Type Traits + +EASTL provides a facility called type_traits which is very similar to the type_traits being proposed by the C++ TR1 (see above). type_traits are useful because they tell you about properties of types at compile time. This allows you to do things such as assert that a data type is scalar or that a data type is const. The way we put them to use in EASTL is to take advantage of them to implement different pathways for functions based on types. For example, we can copy a contiguous array of scalars much faster via memcpy than we can via a for loop, though we could not safely employ the for loop for a non-trivial C++ class. + +As mentioned in the GeneralOptimizations section below, EASTL should take advantage of type_traits information to the extent possible to achive maximum effiiciency. + +## General Optimizations + +One of the primary goals of EASTL is to achieve the highest possible efficiency. In cases where EASTL functionality overlaps standard C++ STL functionality, standard STL implementations provided by compiler vendors are a benchmark upon which EASTL strives to beat. Indeed EASTL is more efficient than all other current STL implementations (with some exception in the case of some Metrowerks STL facilities). Here we list some of the things to look for when considering optimization of EASTL code These items can be considered general optimization suggestions for any code, but this particular list applies to EASTL: + +* Take advantage of type_traits to the extent possible (e.g. to use memcpy to move data instead of a for loop when possible). +* Take advantage of iterator types to the extent possible. +* Take advantage of the compiler's expectation that if statements are expected to evaluate as true and for loop conditions are expected to evaluate as false. +* Make inline-friendly code. This often means avoiding temporaries to the extent possible. +* Minimize branching (i.e. minimize 'if' statements). Where branching is used, make it so that 'if' statements execute as true. +* Use EASTL_LIKELY/EASTL_UNLIKELY to give branch hints to the compiler when you are confident it will be beneficial. +* Use restricted pointers (EABase's EA_RESTRICT or various compiler-specific versions of __restrict). +* Compare unsigned values to < max instead of comparing signed values to >= 0 && < max. +* Employ power of 2 integer math instead of math with any kind of integer. +* Use template specialization where possible to implement improved functionality. +* Avoid function calls when the call does something trivial. This improves debug build speed (which matters) and sometimes release build speed as well, though sometimes makes the code intent less clear. A comment next to the code saying what call it is replacing makes the intent clear without sacrificing performance. + +## Unit Tests + +Writing robust templated containers and algorithms is difficult or impossible without a heavy unit test suite in place. EASTL has a pretty extensive set of unit tests for all containers and algorithms. While the successful automated unit testing of shipping application programs may be a difficult thing to pull off, unit testing of libraries such as this is of huge importance and cannot be understated. + +* When making a new unit test, start by copying one of the existing unit tests and follow its conventions. +* Test containers of both scalars and classes. +* Test algorithms on both container iterators (e.g. vector.begin()) and pointer iterators (e.g. int*). +* Make sure that algorithm or container member functions which take iterators work with the type of iterator they claim to (InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator).  +* Test for const-correctness. If a user is allowed to modify something that is supposed to be const, silent errors can go undetected. +* Make sure that unit tests cover all functions and all pathways of the tested code. This means that in writing the unit test you need to look at the source code to understand all the pathways. +* Consider using a random number generator (one is provided in the test library) to do 'monkey' testing whereby unexpected input is given to a module being tested. When doing so, make sure you seed the generator in a way that problems can be reproduced. +* While we avoid macros in EASTL user code, macros to assist in unit tests aren't considered a problem. However, consider that a number of macros could be replaced by templated functions and thus be easier to work with. +* Unit tests don't need to be efficient; feel free to take up all the CPU power and time you need to test a module sufficiently. +* EASTL containers are not thread-safe, by design. Thus there is no need to do multithreading tests as long as you stay away from the usage of static and global variables. +* Unit tests must succeed with no memory leaks and of course no memory corruption. The heap system should be configured to test for this, and heap validation functions are available to the unit tests while in the middle of runs. + +## Things to Keep in Mind + +* When referring to EASTL functions and types from EASTL code, make sure to preface the type with the EASTL namespace. If you don't do this you can get collisions due to the compiler not knowing if it should use the EASTL namespace or the namespace of the templated type for the function or type. +* Newly constructed empty containers do no memory allocation. Some STL and other container libraries allocate an initial node from the class memory allocator. EASTL containers by design never do this. If a container needs an initial node, that node should be made part of the container itself or be a static empty node object. +* Empty containers (new or otherwise) contain no constructed objects, including those that might be in an 'end' node. Similarly, no user object (e.g. of type T) should be constructed unless required by the design and unless documented in the cotainer/algorithm contract.  +* When creating a new container class, it's best to copy from an existing similar class to the extent possible. This helps keep the library consistent and resolves subtle problems that can happen in the construction of containers. +* Be very careful about tweaking the code. It's easy to think (for example) that a > could be switch to a >= where instead it is a big deal. Just about every line of code in EASTL has been thought through and has a purpose. Unit tests may or may not currently test every bit of EASTL, so you can't necessarily rely on them to give you 100% confidence in changes. If you are not sure about something, contact the original author and he will tell you for sure. +* Algorithm templates always work with iterators and not containers. A given container may of course implement an optimized form or an algorithm itself. +* Make sure everything is heavily unit tested. If somebody finds a bug, fix the bug and make a unit test to make sure the bug doesn't happen again. +* It's easy to get iterator categories confused or forgotten while implementing algorithms and containers. +* Watch out for the strictness of GCC 3.4+. There is a bit of syntax — especially related to templates — that other compilers accept but GCC 3.4+ will not. +* Don't forget to update the config.h EASTL_VERSION define before publishing. +* The vector and string classes define iterator to be T*. We want to always leave this so — at least in release builds — as this gives some algorithms an advantage that optimizers cannot get around. + +---------------------------------------------- +End of document diff --git a/doc/Modules.md b/doc/Modules.md new file mode 100644 index 0000000..fe13f0c --- /dev/null +++ b/doc/Modules.md @@ -0,0 +1,89 @@ +# EASTL Modules + +## Introduction + +We provide here a list of all top-level modules present or planned for future presence in EASTL. In some cases (e.g. algorithm), the module consists of many smaller submodules which are not described in detail here. In those cases you should consult the source code for those modules or consult the detailed documentation for those modules. This document is a high level overview and not a detailed document. + +## Module List + +| Module | Description | +|------|------| +| config | Configuration header. Allows for changing some compile-time options. | +| slist
fixed_slist | Singly-linked list.
fixed_slist is a version which is implemented via a fixed block of contiguous memory.| +| list
fixed_list | Doubly-linked list. | +| intrusive_list
intrusive_slist | List whereby the contained item provides the node implementation. | +| array | Wrapper for a C-style array which extends it to act like an STL container. | +| vector
fixed_vector | Resizable array container. +| vector_set
vector_multiset | Set implemented via a vector instead of a tree. Speed and memory use is improved but resizing is slower. | +| vector_map
vector_multimap | Map implemented via a vector instead of a tree. Speed and memory use is improved but resizing is slower. | +| deque | Double-ended queue, but also with random access. Acts like a vector but insertions and removals are efficient. | +| bit_vector | Implements a vector of bool, but the actual storage is done with one bit per bool. Not the same thing as a bitset. | +| bitset | Implements an efficient arbitrarily-sized bitfield. Note that this is not strictly the same thing as a vector of bool (bit_vector), as it is optimized to act like an arbitrary set of flags and not to be a generic container which can be iterated, inserted, removed, etc. | +| set
multiset
fixed_set
fixed_multiset | A set is a sorted unique collection, multiset is sorted but non-unique collection. | +| map
multimap
fixed_map
fixed_multimap | A map is a sorted associative collection implemented via a tree. It is also known as dictionary. | +| hash_map
hash_multimap
fixed_hash_map
fixed_hash_multimap | Map implemented via a hash table. | +| intrusive_hash_map
intrusive_hash_multimap
intrusive_hash_set
intrusive_hash_multiset | hash_map whereby the contained item provides the node implementation, much like intrusive_list. | +| hash_set
hash_multiset
fixed_hash_set
fixed_hash_map | Set implemented via a hash table. +| basic_string
fixed_string
fixed_substring | basic_string is a character string/array.
fixed_substring is a string which is a reference to a range within another string or character array.
cow_string is a string which implements copy-on-write. | +| algorithm | min/max, find, binary_search, random_shuffle, reverse, etc. | +| sort | Sorting functionality, including functionality not in STL. quick_sort, heap_sort, merge_sort, shell_sort, insertion_sort, etc. | +| numeric | Numeric algorithms: accumulate, inner_product, partial_sum, adjacent_difference, etc. | +| heap | Heap structure functionality: make_heap, push_heap, pop_heap, sort_heap, is_heap, remove_heap, etc. | +| stack | Adapts any container into a stack. | +| queue | Adapts any container into a queue. | +| priority_queue | Implements a conventional priority queue via a heap structure. | +| type_traits | Type information, useful for writing optimized and robust code. Also used for implementing optimized containers and algorithms. | +| utility | pair, make_pair, rel_ops, etc. | +| functional | Function objects. | +| iterator | Iteration for containers and algorithms. | +| smart_ptr | Smart pointers: shared_ptr, shared_array, weak_ptr, scoped_ptr, scoped_array, linked_ptr, linked_array, intrusive_ptr. | +  + +## Module Behaviour + +The overhead sizes listed here refer to an optimized release build; debug builds may add some additional overhead. Some of the overhead sizes may be off by a little bit (usually at most 4 bytes). This is because the values reported here are those that refer to when EASTL's container optimizations have been complete. These optimizations may not have been completed as you are reading this. + +| Container |Stores | Container Overhead (32 bit) | Container Overhead (64 bit) | Node Overhead (32 bit) | Node Overhead (64 bit) | Iterator category | size() efficiency | operator[] efficiency | Insert efficiency | Erase via Iterator efficiency | Find efficiency | Sort efficiency | +|------|------|------|------|------|------|------|------|------|------|------|------|------| +| slist | T | 8 | 16 | 4 | 8 | f | n | - | 1 | 1 | n | n+ | +| list | T | 12 | 24 | 8 | 16 | b | n | - | 1 | 1 | n | n log(n) | +| intrusive_slist | T | 4 | 8 | 4 | 8 | f | n | - | 1 | 1 | 1 | n+ | +| intrusive_list | T | 8 | 16 | 8 | 16 | b | n | - | 1 | 1 | 1 | n log(n) | +| array | T | 0 | 0 | 0 | 0 | r | 1 | 1 | - | - | n | n log(n) | +| vector | T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) | +| vector_set | T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 | +| vector_multiset | T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 | +| vector_map | Key, T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 | +| vector_multimap | Key, T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | log(n) | 1 | +| deque | T | 44 | 84 | 0 | 0 | r | 1 | 1 | 1 at begin or end, else n / 2 | 1 at begin or end, else n / 2 | n | n log(n) | +| bit_vector | bool | 8 | 16 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) | +| string (all types) | T | 16 | 32 | 0 | 0 | r | 1 | 1 | 1 at end, else n | 1 at end, else n | n | n log(n) | +| set | T | 24 | 44 | 16 | 28 | b | 1 | - | log(n) | log(n) | log(n) | 1 | +| multiset | T | 24 | 44 | 16 | 28 | b | 1 | - | log(n) | log(n) | log(n) | 1 | +| map | Key, T | 24 | 44 | 16 | 28 | b | 1 | log(n) | log(n) | log(n) | log(n) | 1 | +| multimap | Key, T | 24 | 44 | 16 | 28 | b | 1 | - | log(n) | log(n) | log(n) | 1 | +| hash_set | T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - | +| hash_multiset | T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - | +| hash_map | Key, T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - | +| hash_multimap | Key, T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - | +| intrusive_hash_set | T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - | +| intrusive_hash_multiset | T | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - | +| intrusive_hash_map | T (Key == T) | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - | +| intrusive_hash_multimap | T (Key == T)  | 16 | 20 | 4 | 8 | b | 1 | - | 1 | 1 | 1 | - | + +* \- means that the operation does not exist. +* 1 means amortized constant time. Also known as O(1) +* n means time proportional to the container size. Also known as O(n) +* log(n) means time proportional to the natural logarithm of the container size. Also known as O(log(n)) +* n log(n) means time proportional to log(n) times the size of the container. Also known as O(n log(n)) +* n+ means that the time is at least n, and possibly higher. +* Iterator meanings are: f = forward iterator; b = bidirectional iterator, r = random iterator. +* Overhead indicates approximate per-element overhead memory required in bytes. Overhead doesn't include possible additional overhead that may be imposed by the memory heap used to allocate nodes. General heaps tend to have between 4 and 16 bytes of overhead per allocation, depending on the heap. +* Some overhead values are dependent on the structure alignment characteristics in effect. The values reported here are those that would be in effect for a system that requires pointers to be aligned on boundaries of their size and allocations with a minimum of 4 bytes (thus one byte values get rounded up to 4). +* Some overhead values are dependent on the size_type used by containers. We assume a size_type of 4 bytes, even for 64 bit machines, as this is the EASTL default. +* Inserting at the end of a vector may cause the vector to be resized; resizing a vector is O(n). However, the amortized time complexity for vector insertions at the end is constant. +* Sort assumes the usage of the best possible sort for a large container of random data. Some sort algorithms (e.g. quick_sort) require random access iterators and so the sorting of some containers requires a different sort algorithm. We do not include bucket or radix sorts, as they are always O(n). +* Some containers (e.g. deque, hash*) have unusual data structures that make per-container and per-node overhead calculations not quite account for all memory. + +---------------------------------------------- +End of document diff --git a/doc/html/EASTL Benchmarks.html b/doc/html/EASTL Benchmarks.html new file mode 100644 index 0000000..70ff23f --- /dev/null +++ b/doc/html/EASTL Benchmarks.html @@ -0,0 +1,330 @@ + + + + EASTL Benchmarks + + + + + + + + + + +

EASTL Benchmarks

+

Introduction

+

This document provides a number of benchmark results of EASTL. + Where possible, these benchmarks are implemented as comparisons + with equivalent functionality found in other libraries such as + compiler STL libraries or other well-known libraries. These + comparison benchmarks concentrate on highlighting the differences + between implementations rather than the similarities. In many + mundane cases -- such as accessing a vector element via operator [] + -- virtually all vector/array implementations you are likely to run + into will have identical performance.
+ + + +
+ + + +It's also important to note that the platform you run on can make a + significant difference in the results. On a modern 3+GHz Windows PC + many operations are fast due to large memory caches, intelligent + branch prediction, and parallel instruction execution. However, on + embedded or console systems none of these may be the case. +
+ + + +
+ + + +While EASTL generally outperforms std STL, there are some benchmarks + here in which EASTL is slower than std STL. There are three primary +explanations of this:

+
    + + + +
  1. EASTL is making some kind of speed, memory, or design tradeoff +that results in the given speed difference. In may such cases, EASTL +goes slower on one benchmark in order to go faster on another benchmark +deemed more important. This explanation constitutes about 60% of the +cases.
  2. + + + +
  3. Compiler optimizations and resulting code generation is +coincidencally favoring one kind of implementation over another, often +when they are visually virtually identical. This explantation +constitutes about 30% of the cases.
  4. + + + +
  5. EASTL is simply not yet as optimized as it could be. This +explanation constitutes about 10% of the cases (as of this writing +there are about three such functions throughout EASTL).
  6. + + + +
+ + + +

Benchmarks

+

Below is a table of links to detailed benchmark results derived from + the Benchmark test present in the EASTL package. The detailed results + are present below the table. Additional platforms will be added as + results become available for those platforms. Debug benchmarks are + present because (lack of) debug performance can be significant for + highly templated libraries. EASTL has specific optimizations to enhance + debug performance relative to other standard libraries; in some cases + it is 10x or more faster than alternatives (though there are exceptions where EASTL is slower). Feel free to submit results + for additional compilers/platforms.
+ + + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PlatformCompilerSTL typeBuildResults
Win32VC++ 7.1Microsoft (Dinkumware)DebugDetail
Win32VC++ 7.1Microsoft (Dinkumware)ReleaseDetail
Win32VC++ 7.1STLPortDebugDetail
Win32VC++ 7.1STLPortReleaseDetail
+ + + + + + + + + + + + + +

+ + + + + Win32.VC71.MS.Debug

+
+
EASTL version: 0.96.00
Platform: Windows on X86
Compiler: Microsoft Visual C++ compiler, version 1310
Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled.
Build: Debug. Inlining disabled. STL debug features disabled.

Values are times to complete tests; smaller values are better.
Alarm indicates a greater than 10% difference.

Test VC++ EASTL Ratio Alarm
----------------------------------------------------------------------------------------
algorithm/adj_find/vector<TestObject> 33061345 6497757 5.09 *
algorithm/copy/vector<LargePOD> 5844906 4876076 1.20 *
algorithm/copy/vector<uint32_t> 1634346 166065 9.84 *
algorithm/copy_backward/vector<LargePOD> 4515974 4638892 0.97
algorithm/copy_backward/vector<uint32_t> 1821168 121746 14.96 *
algorithm/count/vector<uint64_t> 17048884 2720766 6.27 *
algorithm/equal_range/vector<uint64_t> 1111147812 448756888 2.48 *
algorithm/fill/bool[] 1728722 91936 18.80 *
algorithm/fill/char[]/'d' 1299200 33745 38.50 *
algorithm/fill/vector<char>/'d' 10205092 33796 100.00 *
algorithm/fill/vector<char>/0 10200748 33805 100.00 *
algorithm/fill/vector<uint64_t> 10416538 1399687 7.44 *
algorithm/fill/vector<void*> 10221837 1307700 7.82 *
algorithm/fill_n/bool[] 1399033 34196 40.91 *
algorithm/fill_n/char[] 1299225 33754 38.49 *
algorithm/fill_n/vector<uint64_t> 5961637 1371900 4.35 *
algorithm/find_end/string/end 16569373 2657372 6.24 *
algorithm/find_end/string/middle 16558638 20242410 0.82 *
algorithm/find_end/string/none 16811207 40480468 0.42 *
algorithm/lex_cmp/schar[] 1749674 194429 9.00 *
algorithm/lex_cmp/vector<TestObject> 32824195 5253587 6.25 *
algorithm/lex_cmp/vector<uchar> 29852034 202658 100.00 *
algorithm/lower_bound/vector<TestObject> 798624462 350027935 2.28 *
algorithm/min_element/vector<TestObject> 21675298 5314676 4.08 *
algorithm/rand_shuffle/vector<uint64_t> 84236190 43677506 1.93 *
algorithm/reverse/list<TestObject> 3007292 2105799 1.43 *
algorithm/reverse/vector<TestObject> 2974618 2124796 1.40 *
algorithm/search/string<char> 16228158 3594268 4.52 *
algorithm/search_n/string<char> 16926985 1522096 11.12 *
algorithm/unique/vector<TestObject> 54206243 9988002 5.43 *
algorithm/unique/vector<uint32_t> 26940079 1741991 15.47 *
algorithm/unique/vector<uint64_t> 47621344 5213127 9.13 *
algorithm/upper_bound/vector<uint32_t> 372381295 137901552 2.70 *

bitset<1500>/>>=/1 90196544 92539832 0.97
bitset<1500>/count 50753832 53742117 0.94
bitset<1500>/flip 86935875 85121117 1.02
bitset<1500>/reset 78153837 79922611 0.98
bitset<1500>/set() 79214968 79360658 1.00
bitset<1500>/set(i) 11300589 12199651 0.93
bitset<1500>/test 11282679 13186450 0.86 *

bitset<15>/>>=/1 10500577 6000559 1.75 *
bitset<15>/count 4000356 6399753 0.63 *
bitset<15>/flip 7268877 5647944 1.29 *
bitset<15>/reset 8564235 5800163 1.48 *
bitset<15>/set() 9935523 5914012 1.68 *
bitset<15>/set(i) 11199703 12503637 0.90 *
bitset<15>/test 10600623 12899592 0.82 *

bitset<35>/>>=/1 13076052 6599834 1.98 *
bitset<35>/count 4800384 11500330 0.42 *
bitset<35>/flip 7915439 5816313 1.36 *
bitset<35>/reset 9400049 5803180 1.62 *
bitset<35>/set() 10701152 5840316 1.83 *
bitset<35>/set(i) 11342936 12271128 0.92
bitset<35>/test 10670799 13099682 0.81 *

bitset<75>/>>=/1 14198834 17151088 0.83 *
bitset<75>/count 5795530 8576373 0.68 *
bitset<75>/flip 8516703 8922995 0.95
bitset<75>/reset 9999970 8526095 1.17 *
bitset<75>/set() 11124877 9009686 1.23 *
bitset<75>/set(i) 11300563 12531618 0.90 *
bitset<75>/test 11031913 13100523 0.84 *

deque<ValuePair>/erase 743801706 335646802 2.22 *
deque<ValuePair>/insert 742331809 341912866 2.17 *
deque<ValuePair>/iteration 29097030 16315827 1.78 *
deque<ValuePair>/operator[] 49859598 24026313 2.08 *
deque<ValuePair>/push_back 424807033 34497608 12.31 *
deque<ValuePair>/push_front 402313373 38006322 10.59 *
deque<ValuePair>/sort 725101017 581796551 1.25 *

hash_map<string, uint32_t>/clear 559462 961019 0.58 *
hash_map<string, uint32_t>/count 53377807 8091448 6.60 *
hash_map<string, uint32_t>/erase pos 613573 858084 0.72 *
hash_map<string, uint32_t>/erase range 5488748 461134 11.90 *
hash_map<string, uint32_t>/erase val 35760096 16379858 2.18 *
hash_map<string, uint32_t>/find 43490335 10324823 4.21 *
hash_map<string, uint32_t>/find_as/char* 49343818 8617139 5.73 *
hash_map<string, uint32_t>/insert 107420281 168690439 0.64 *
hash_map<string, uint32_t>/iteration 2456356 1255153 1.96 *
hash_map<string, uint32_t>/operator[] 47209502 12581624 3.75 *

hash_map<uint32_t, TestObject>/clear 533172 546449 0.98
hash_map<uint32_t, TestObject>/count 28667432 2899997 9.89 *
hash_map<uint32_t, TestObject>/erase pos 683239 538289 1.27 *
hash_map<uint32_t, TestObject>/erase range 9632676 253037 38.07 *
hash_map<uint32_t, TestObject>/erase val 25466026 7752188 3.29 *
hash_map<uint32_t, TestObject>/find 20048253 4678502 4.29 *
hash_map<uint32_t, TestObject>/insert 71085798 37686187 1.89 *
hash_map<uint32_t, TestObject>/iteration 1460318 1338317 1.09
hash_map<uint32_t, TestObject>/operator[] 23226692 7888748 2.94 *

heap (uint32_t[])/make_heap 5399966 6961305 0.78 *
heap (uint32_t[])/pop_heap 108060534 103511318 1.04
heap (uint32_t[])/push_heap 22595661 16640688 1.36 *
heap (uint32_t[])/sort_heap 93559424 83076731 1.13 *

heap (vector<TestObject>)/make_heap 91770743 21724870 4.22 *
heap (vector<TestObject>)/pop_heap 1175599317 284007398 4.14 *
heap (vector<TestObject>)/push_heap 207804541 45918046 4.53 *
heap (vector<TestObject>)/sort_heap 970394145 208321477 4.66 *

list<TestObject>/ctor(it) 805539509 760938607 1.06
list<TestObject>/ctor(n) 80959236 75106995 1.08
list<TestObject>/erase 1052543704 1044976137 1.01
list<TestObject>/find 97785267 75970884 1.29 *
list<TestObject>/insert 873895175 807051107 1.08
list<TestObject>/push_back 812797710 780742425 1.04
list<TestObject>/remove 1850600714 1436980599 1.29 *
list<TestObject>/reverse 180270465 80466636 2.24 *
list<TestObject>/size/1 440148 599642 0.73 *
list<TestObject>/size/10 439433 1329817 0.33 * EASTL intentionally implements list::size as O(n).
list<TestObject>/size/100 439595 11030060 0.04 * EASTL intentionally implements list::size as O(n).
list<TestObject>/splice 177106094 69383027 2.55 *

map<TestObject, uint32_t>/clear 508283 470807 1.08
map<TestObject, uint32_t>/count 43145354 14280357 3.02 *
map<TestObject, uint32_t>/equal_range 38594004 16520447 2.34 *
map<TestObject, uint32_t>/erase/key 33948082 16123175 2.11 *
map<TestObject, uint32_t>/erase/pos 578332 455201 1.27 * MS uses a code bloating implementation of erase.
map<TestObject, uint32_t>/erase/range 387345 284538 1.36 *
map<TestObject, uint32_t>/find 22897224 12766100 1.79 *
map<TestObject, uint32_t>/insert 61665800 47286928 1.30 *
map<TestObject, uint32_t>/iteration 1977202 745391 2.65 *
map<TestObject, uint32_t>/lower_bound 19892941 12260928 1.62 *
map<TestObject, uint32_t>/operator[] 24199084 15429634 1.57 *
map<TestObject, uint32_t>/upper_bound 19842409 12064441 1.64 *

set<uint32_t>/clear 1027625 1000901 1.03
set<uint32_t>/count 39730182 13329565 2.98 *
set<uint32_t>/equal_range 34681649 14768827 2.35 *
set<uint32_t>/erase range 841458 602030 1.40 *
set<uint32_t>/erase/pos 1380485 1084303 1.27 * MS uses a code bloating implementation of erase.
set<uint32_t>/erase/val 31617425 13344023 2.37 *
set<uint32_t>/find 19582428 10788864 1.82 *
set<uint32_t>/insert 61434014 48232086 1.27 *
set<uint32_t>/iteration 1512057 667820 2.26 *
set<uint32_t>/lower_bound 18394885 10402785 1.77 *
set<uint32_t>/upper_bound 17189083 10554425 1.63 *

sort/q_sort/TestObject[] 87088799 15037988 5.79 *
sort/q_sort/TestObject[]/sorted 21502892 3284299 6.55 *
sort/q_sort/vector<TestObject> 87962047 15004677 5.86 *
sort/q_sort/vector<TestObject>/sorted 21396523 3341163 6.40 *
sort/q_sort/vector<ValuePair> 80334589 10429161 7.70 *
sort/q_sort/vector<ValuePair>/sorted 22133295 3230553 6.85 *
sort/q_sort/vector<uint32> 72195388 5940302 12.15 *
sort/q_sort/vector<uint32>/sorted 19635171 995495 19.72 *

string<char16_t>/compare 523013373 534722089 0.98
string<char16_t>/erase/pos,n 3446597 3439492 1.00
string<char16_t>/find/p,pos,n 383873158 441902786 0.87 *
string<char16_t>/find_first_not_of/p,pos,n 174157 134131 1.30 *
string<char16_t>/find_first_of/p,pos,n 11715423 8520944 1.37 *
string<char16_t>/find_last_of/p,pos,n 1871556 1226457 1.53 *
string<char16_t>/insert/pos,p 3624877 3357058 1.08
string<char16_t>/iteration 6766787933 581916665 11.63 *
string<char16_t>/operator[] 4820827 2335579 2.06 *
string<char16_t>/push_back 59812962 6757466 8.85 *
string<char16_t>/replace/pos,n,p,n 4371279 4459713 0.98
string<char16_t>/reserve 2307530 1919386 1.20 *
string<char16_t>/rfind/p,pos,n 734826 372615 1.97 *
string<char16_t>/size 41608 28866 1.44 *
string<char16_t>/swap 1033932 1490994 0.69 *

string<char8_t>/compare 63086797 64194771 0.98
string<char8_t>/erase/pos,n 2045687 1960270 1.04
string<char8_t>/find/p,pos,n 123872549 471364764 0.26 *
string<char8_t>/find_first_not_of/p,pos,n 140013 130271 1.07
string<char8_t>/find_first_of/p,pos,n 8051906 8749994 0.92
string<char8_t>/find_last_of/p,pos,n 1318835 1230715 1.07
string<char8_t>/insert/pos,p 1770610 1724234 1.03
string<char8_t>/iteration 28112136 2544475 11.05 *
string<char8_t>/operator[] 4810525 2255841 2.13 *
string<char8_t>/push_back 54869634 6127447 8.95 *
string<char8_t>/replace/pos,n,p,n 2737578 2847900 0.96
string<char8_t>/reserve 1123395 394902 2.84 *
string<char8_t>/rfind/p,pos,n 737299 368518 2.00 *
string<char8_t>/size 42245 26801 1.58 *
string<char8_t>/swap 1036142 1491028 0.69 *

vector<uint64>/erase 56417135 55770251 1.01
vector<uint64>/insert 56617761 56100468 1.01
vector<uint64>/iteration 10413895 1291269 8.06 *
vector<uint64>/operator[] 23507193 3479390 6.76 *
vector<uint64>/push_back 34687939 13806627 2.51 *
vector<uint64>/sort 256886550 84669657 3.03 *

+ + + + + +
+ + + + + +

+ + + + + Win32.VC71.MS.Release

+
+
EASTL version: 0.96.00
Platform: Windows on X86
Compiler: Microsoft Visual C++ compiler, version 1310
Allocator: PPMalloc::GeneralAllocator. Thread safety enabled.
Build: Full optimization. Inlining enabled.

Values are times to complete tests; smaller values are better.
Alarm indicates a greater than 10% difference.

Test VC++ EASTL Ratio Alarm
----------------------------------------------------------------------------------------
algorithm/adj_find/vector<TestObject> 2783546 2750660 1.01
algorithm/copy/vector<LargePOD> 6474025 4972738 1.30 *
algorithm/copy/vector<uint32_t> 157267 173162 0.91
algorithm/copy_backward/vector<LargePOD> 4836406 4374780 1.11 *
algorithm/copy_backward/vector<uint32_t> 104780 120912 0.87 *
algorithm/count/vector<uint64_t> 1368440 1368696 1.00
algorithm/equal_range/vector<uint64_t> 114199387 102783938 1.11 *
algorithm/fill/bool[] 253215 27353 9.26 *
algorithm/fill/char[]/'d' 253164 27404 9.24 *
algorithm/fill/vector<char>/'d' 253105 27362 9.25 *
algorithm/fill/vector<char>/0 253275 27353 9.26 *
algorithm/fill/vector<uint64_t> 397001 394323 1.01
algorithm/fill/vector<void*> 547196 642362 0.85 *
algorithm/fill_n/bool[] 229177 27361 8.38 *
algorithm/fill_n/char[] 228845 27404 8.35 *
algorithm/fill_n/vector<uint64_t> 565233 1376822 0.41 *
algorithm/find_end/string/end 2107116 82356 25.59 *
algorithm/find_end/string/middle 2111672 664283 3.18 *
algorithm/find_end/string/none 2110423 1519596 1.39 *
algorithm/lex_cmp/schar[] 741021 176162 4.21 *
algorithm/lex_cmp/vector<TestObject> 2610494 2642183 0.99
algorithm/lex_cmp/vector<uchar> 697595 167866 4.16 *
algorithm/lower_bound/vector<TestObject> 62462233 58146664 1.07
algorithm/min_element/vector<TestObject> 4350385 2671227 1.63 *
algorithm/rand_shuffle/vector<uint64_t> 10868261 11300818 0.96
algorithm/reverse/list<TestObject> 483718 470024 1.03
algorithm/reverse/vector<TestObject> 476739 484322 0.98
algorithm/search/string<char> 2560387 1259496 2.03 *
algorithm/search_n/string<char> 2770991 458524 6.04 *
algorithm/unique/vector<TestObject> 4194520 4658910 0.90 *
algorithm/unique/vector<uint32_t> 538730 787924 0.68 *
algorithm/unique/vector<uint64_t> 3169829 2575636 1.23 *
algorithm/upper_bound/vector<uint32_t> 27495562 25321593 1.09

bitset<1500>/>>=/1 33464228 33469719 1.00
bitset<1500>/count 18736116 18814903 1.00
bitset<1500>/flip 19299309 18605438 1.04
bitset<1500>/reset 22200487 15262847 1.45 *
bitset<1500>/set() 14418193 17557319 0.82 *
bitset<1500>/set(i) 1599250 1599199 1.00
bitset<1500>/test 1599241 1599233 1.00

bitset<15>/>>=/1 2199222 2264442 0.97
bitset<15>/count 1399406 1399193 1.00
bitset<15>/flip 1266712 1199197 1.06
bitset<15>/reset 1399364 1399109 1.00
bitset<15>/set() 1199197 999201 1.20 *
bitset<15>/set(i) 1599258 1462952 1.09
bitset<15>/test 1599275 1599224 1.00

bitset<35>/>>=/1 2599266 1933376 1.34 *
bitset<35>/count 2599240 2592559 1.00
bitset<35>/flip 1693124 1199188 1.41 *
bitset<35>/reset 1399406 999201 1.40 *
bitset<35>/set() 1599403 1199205 1.33 *
bitset<35>/set(i) 1599241 1599190 1.00
bitset<35>/test 1599250 1599232 1.00

bitset<75>/>>=/1 4199332 4199213 1.00
bitset<75>/count 2999497 2199341 1.36 *
bitset<75>/flip 2399499 1830178 1.31 *
bitset<75>/reset 2199468 1199197 1.83 *
bitset<75>/set() 1999387 1199851 1.67 *
bitset<75>/set(i) 1599266 1599198 1.00
bitset<75>/test 1599241 1662651 0.96

deque<ValuePair>/erase 90444165 37113253 2.44 *
deque<ValuePair>/insert 93299349 36175167 2.58 *
deque<ValuePair>/iteration 2756414 2122076 1.30 *
deque<ValuePair>/operator[] 5117969 4632075 1.10
deque<ValuePair>/push_back 30300757 3060357 9.90 *
deque<ValuePair>/push_front 25498529 2808392 9.08 *
deque<ValuePair>/sort 142283047 111292464 1.28 *

hash_map<string, uint32_t>/clear 146769 389699 0.38 *
hash_map<string, uint32_t>/count 13059434 3460324 3.77 *
hash_map<string, uint32_t>/erase pos 184246 331925 0.56 *
hash_map<string, uint32_t>/erase range 382432 167237 2.29 *
hash_map<string, uint32_t>/erase val 6187898 3302114 1.87 *
hash_map<string, uint32_t>/find 11289369 3459024 3.26 *
hash_map<string, uint32_t>/find_as/char* 13559192 3662387 3.70 *
hash_map<string, uint32_t>/insert 17514012 14095176 1.24 *
hash_map<string, uint32_t>/iteration 801014 218450 3.67 *
hash_map<string, uint32_t>/operator[] 11457065 3690385 3.10 *

hash_map<uint32_t, TestObject>/clear 141865 265379 0.53 *
hash_map<uint32_t, TestObject>/count 1766045 703613 2.51 *
hash_map<uint32_t, TestObject>/erase pos 172337 218458 0.79 *
hash_map<uint32_t, TestObject>/erase range 537846 102340 5.26 *
hash_map<uint32_t, TestObject>/erase val 2220132 1441787 1.54 *
hash_map<uint32_t, TestObject>/find 1612994 1043953 1.55 *
hash_map<uint32_t, TestObject>/insert 7141547 4348056 1.64 *
hash_map<uint32_t, TestObject>/iteration 199512 169328 1.18 *
hash_map<uint32_t, TestObject>/operator[] 1831733 1519707 1.21 *

heap (uint32_t[])/make_heap 3366247 1949093 1.73 *
heap (uint32_t[])/pop_heap 57280514 53779440 1.07
heap (uint32_t[])/push_heap 9700217 7582935 1.28 *
heap (uint32_t[])/sort_heap 47227751 46131948 1.02

heap (vector<TestObject>)/make_heap 11458442 11510819 1.00
heap (vector<TestObject>)/pop_heap 122897267 119061132 1.03
heap (vector<TestObject>)/push_heap 21688481 21176220 1.02
heap (vector<TestObject>)/sort_heap 90867380 88869523 1.02

list<TestObject>/ctor(it) 74591104 69845817 1.07
list<TestObject>/ctor(n) 6243998 5838582 1.07
list<TestObject>/erase 299509298 206013676 1.45 *
list<TestObject>/find 40927185 14514243 2.82 *
list<TestObject>/insert 71277251 47234534 1.51 *
list<TestObject>/push_back 73780527 44116725 1.67 *
list<TestObject>/remove 786197776 326434612 2.41 *
list<TestObject>/reverse 49283128 25029678 1.97 *
list<TestObject>/size/1 159741 139400 1.15 *
list<TestObject>/size/10 159324 346579 0.46 * EASTL intentionally implements list::size as O(n).
list<TestObject>/size/100 159188 97235419 0.00 * EASTL intentionally implements list::size as O(n).
list<TestObject>/splice 63548584 19322931 3.29 *

map<TestObject, uint32_t>/clear 167408 170501 0.98
map<TestObject, uint32_t>/count 10213685 4748346 2.15 *
map<TestObject, uint32_t>/equal_range 9515053 5677558 1.68 *
map<TestObject, uint32_t>/erase/key 6646260 4302300 1.54 *
map<TestObject, uint32_t>/erase/pos 297135 327938 0.91 MS uses a code bloating implementation of erase.
map<TestObject, uint32_t>/erase/range 148614 163702 0.91
map<TestObject, uint32_t>/find 5637531 4767055 1.18 *
map<TestObject, uint32_t>/insert 9591128 9030349 1.06
map<TestObject, uint32_t>/iteration 323595 325261 0.99
map<TestObject, uint32_t>/lower_bound 5398239 4784089 1.13 *
map<TestObject, uint32_t>/operator[] 5631250 5141166 1.10
map<TestObject, uint32_t>/upper_bound 5436336 4762431 1.14 *

set<uint32_t>/clear 155983 156026 1.00
set<uint32_t>/count 9635965 4392146 2.19 *
set<uint32_t>/equal_range 8504157 5247832 1.62 *
set<uint32_t>/erase range 140488 119408 1.18 *
set<uint32_t>/erase/pos 260678 286697 0.91 MS uses a code bloating implementation of erase.
set<uint32_t>/erase/val 6008225 4012825 1.50 *
set<uint32_t>/find 5145432 4381945 1.17 *
set<uint32_t>/insert 8087129 8697251 0.93
set<uint32_t>/iteration 271507 304538 0.89 *
set<uint32_t>/lower_bound 4666228 4404250 1.06
set<uint32_t>/upper_bound 4623600 4402974 1.05

sort/q_sort/TestObject[] 9596169 5578652 1.72 *
sort/q_sort/TestObject[]/sorted 602463 1016132 0.59 *
sort/q_sort/vector<TestObject> 9674828 5430199 1.78 *
sort/q_sort/vector<TestObject>/sorted 606908 1111647 0.55 *
sort/q_sort/vector<ValuePair> 6284194 3423452 1.84 *
sort/q_sort/vector<ValuePair>/sorted 711629 569364 1.25 *
sort/q_sort/vector<uint32> 5453379 2916146 1.87 *
sort/q_sort/vector<uint32>/sorted 537047 419144 1.28 *

string<char16_t>/compare 435083295 251985824 1.73 *
string<char16_t>/erase/pos,n 3454842 3451858 1.00
string<char16_t>/find/p,pos,n 401954723 165298157 2.43 *
string<char16_t>/find_first_not_of/p,pos,n 131452 65374 2.01 *
string<char16_t>/find_first_of/p,pos,n 11657444 4144515 2.81 *
string<char16_t>/find_last_of/p,pos,n 1604248 567571 2.83 *
string<char16_t>/insert/pos,p 3398734 3355460 1.01
string<char16_t>/iteration 218856504 218771844 1.00
string<char16_t>/operator[] 714161 240023 2.98 *
string<char16_t>/push_back 34968235 2444897 14.30 *
string<char16_t>/replace/pos,n,p,n 4226693 4198498 1.01
string<char16_t>/reserve 1901765 390805 4.87 *
string<char16_t>/rfind/p,pos,n 195483 150985 1.29 *
string<char16_t>/size 11169 11245 0.99
string<char16_t>/swap 1459280 419807 3.48 *

string<char8_t>/compare 63071275 77209580 0.82 *
string<char8_t>/erase/pos,n 2008652 1944494 1.03
string<char8_t>/find/p,pos,n 123201023 167536164 0.74 *
string<char8_t>/find_first_not_of/p,pos,n 93372 67864 1.38 *
string<char8_t>/find_first_of/p,pos,n 7542492 3375758 2.23 *
string<char8_t>/find_last_of/p,pos,n 933972 583576 1.60 *
string<char8_t>/insert/pos,p 1737213 1750847 0.99
string<char8_t>/iteration 893834 899130 0.99
string<char8_t>/operator[] 817879 313437 2.61 *
string<char8_t>/push_back 20857734 2004410 10.41 *
string<char8_t>/replace/pos,n,p,n 2578696 2607655 0.99
string<char8_t>/reserve 915127 85289 10.73 *
string<char8_t>/rfind/p,pos,n 196103 148894 1.32 *
string<char8_t>/size 11619 11220 1.04
string<char8_t>/swap 1461056 419874 3.48 *

vector<uint64>/erase 55235116 55284587 1.00
vector<uint64>/insert 55166046 55142755 1.00
vector<uint64>/iteration 553954 509719 1.09
vector<uint64>/operator[] 1284239 798516 1.61 *
vector<uint64>/push_back 5399549 3867959 1.40 *
vector<uint64>/sort 43636314 42619952 1.02
+ + + + + +
+ + + + + +

+ + Win32.VC71.STLPort.Debug

+
+
EASTL version: 0.96.00
Platform: Windows on X86
Compiler: Microsoft Visual C++ compiler, version 1310
Allocator: PPMalloc::GeneralAllocatorDebug. Thread safety enabled.
Build: Debug. Inlining disabled. STL debug features disabled.

Values are times to complete tests; smaller values are better.
Alarm indicates a greater than 10% difference.

Test STLPort EASTL Ratio Alarm
----------------------------------------------------------------------------------------
algorithm/adj_find/vector<TestObject> 5661170 5689517 1.00
algorithm/copy/vector<LargePOD> 5573815 5124428 1.09
algorithm/copy/vector<uint32_t> 148273 125782 1.18 *
algorithm/copy_backward/vector<LargePOD> 5429791 4834510 1.12 *
algorithm/copy_backward/vector<uint32_t> 156765 163038 0.96
algorithm/count/vector<uint64_t> 2730922 2730072 1.00
algorithm/equal_range/vector<uint64_t> 639366489 452896251 1.41 *
algorithm/fill/bool[] 1299326 27361 47.49 *
algorithm/fill/char[]/'d' 27378 27361 1.00
algorithm/fill/vector<char>/'d' 34459 27361 1.26 *
algorithm/fill/vector<char>/0 1299224 27361 47.48 *
algorithm/fill/vector<uint64_t> 1400647 1400145 1.00
algorithm/fill/vector<void*> 1308779 1309085 1.00
algorithm/fill_n/bool[] 1299156 27352 47.50 *
algorithm/fill_n/char[] 1299258 27369 47.47 *
algorithm/fill_n/vector<uint64_t> 1451162 1313632 1.10
algorithm/find_end/string/end 13089999 2526412 5.18 *
algorithm/find_end/string/middle 12627412 20190101 0.63 *
algorithm/find_end/string/none 12704185 40728803 0.31 *
algorithm/lex_cmp/schar[] 1749844 195806 8.94 *
algorithm/lex_cmp/vector<TestObject> 5060968 4799882 1.05
algorithm/lex_cmp/vector<uchar> 1668354 189490 8.80 *
algorithm/lower_bound/vector<TestObject> 450240945 353437573 1.27 *
algorithm/min_element/vector<TestObject> 5861744 5326371 1.10
algorithm/rand_shuffle/vector<uint64_t> 40780449 45780090 0.89 *
algorithm/reverse/list<TestObject> 2657678 2130627 1.25 *
algorithm/reverse/vector<TestObject> 2666424 2124889 1.25 *
algorithm/search/string<char> 3110379 3613460 0.86 *
algorithm/search_n/string<char> 3061665 1521261 2.01 *
algorithm/unique/vector<TestObject> 12423684 9485439 1.31 *
algorithm/unique/vector<uint32_t> 3718699 1726596 2.15 *
algorithm/unique/vector<uint64_t> 6205110 4591631 1.35 *
algorithm/upper_bound/vector<uint32_t> 185391094 139336317 1.33 *

bitset<1500>/>>=/1 120666960 92449816 1.31 * STLPort is broken, neglects wraparound check.
bitset<1500>/count 201709793 52874726 3.81 *
bitset<1500>/flip 87360297 81737071 1.07
bitset<1500>/reset 23950178 77390323 0.31 *
bitset<1500>/set() 84608107 76912011 1.10
bitset<1500>/set(i) 18023620 12229604 1.47 *
bitset<1500>/test 18006553 13276396 1.36 *

bitset<15>/>>=/1 11935904 6012695 1.99 * STLPort is broken, neglects wraparound check.
bitset<15>/count 9368581 6022742 1.56 *
bitset<15>/flip 11600706 6533635 1.78 *
bitset<15>/reset 5830957 5874690 0.99
bitset<15>/set() 11695328 5701621 2.05 *
bitset<15>/set(i) 16363205 12570216 1.30 *
bitset<15>/test 16743172 13201452 1.27 *

bitset<35>/>>=/1 22950918 6774457 3.39 * STLPort is broken, neglects wraparound check.
bitset<35>/count 12655309 11736256 1.08
bitset<35>/flip 13738575 5800042 2.37 *
bitset<35>/reset 15561434 5800510 2.68 *
bitset<35>/set() 13564283 5600709 2.42 *
bitset<35>/set(i) 18519689 12199973 1.52 *
bitset<35>/test 18000569 13103566 1.37 *

bitset<75>/>>=/1 25579525 16669664 1.53 * STLPort is broken, neglects wraparound check.
bitset<75>/count 18740698 8480492 2.21 *
bitset<75>/flip 13555630 8300335 1.63 *
bitset<75>/reset 15200133 8200000 1.85 *
bitset<75>/set() 14408112 8001959 1.80 *
bitset<75>/set(i) 18137741 12374257 1.47 *
bitset<75>/test 18422135 13100038 1.41 *

deque<ValuePair>/erase 651933790 326443043 2.00 *
deque<ValuePair>/insert 659786183 333304660 1.98 *
deque<ValuePair>/iteration 23734592 16173706 1.47 *
deque<ValuePair>/operator[] 59126816 23911774 2.47 *
deque<ValuePair>/push_back 58056988 31859266 1.82 *
deque<ValuePair>/push_front 57780891 31743199 1.82 *
deque<ValuePair>/sort 818414195 596568113 1.37 *

hash_map<string, uint32_t>/clear 3422133 2204517 1.55 *
hash_map<string, uint32_t>/count 9869545 8624924 1.14 *
hash_map<string, uint32_t>/erase pos 3256350 2069299 1.57 *
hash_map<string, uint32_t>/erase range 3230203 1151392 2.81 *
hash_map<string, uint32_t>/erase val 16860362 15939778 1.06
hash_map<string, uint32_t>/find 10286971 9920910 1.04
hash_map<string, uint32_t>/find_as/char* 118136025 9458468 12.49 *
hash_map<string, uint32_t>/insert 188948336 174490082 1.08
hash_map<string, uint32_t>/iteration 4037049 2021036 2.00 *
hash_map<string, uint32_t>/operator[] 11472127 12887699 0.89 *

hash_map<uint32_t, TestObject>/clear 2522264 1331848 1.89 *
hash_map<uint32_t, TestObject>/count 3210739 2897063 1.11 *
hash_map<uint32_t, TestObject>/erase pos 1862281 1304783 1.43 *
hash_map<uint32_t, TestObject>/erase range 698079 579606 1.20 *
hash_map<uint32_t, TestObject>/erase val 8806722 7041298 1.25 *
hash_map<uint32_t, TestObject>/find 3604875 4709645 0.77 *
hash_map<uint32_t, TestObject>/insert 40785711 40376342 1.01
hash_map<uint32_t, TestObject>/iteration 3064088 1508834 2.03 *
hash_map<uint32_t, TestObject>/operator[] 6053742 8176906 0.74 *

heap (uint32_t[])/make_heap 5799813 5738596 1.01
heap (uint32_t[])/pop_heap 113775168 102076134 1.11 *
heap (uint32_t[])/push_heap 21649151 16854845 1.28 *
heap (uint32_t[])/sort_heap 97535213 83290735 1.17 *

heap (vector<TestObject>)/make_heap 22215557 22277063 1.00
heap (vector<TestObject>)/pop_heap 275392171 277340039 0.99
heap (vector<TestObject>)/push_heap 51479442 47342577 1.09
heap (vector<TestObject>)/sort_heap 214474736 218497540 0.98

list<TestObject>/ctor(it) 767753795 753421427 1.02
list<TestObject>/ctor(n) 74185322 73386245 1.01
list<TestObject>/erase 1021003824 1033873589 0.99
list<TestObject>/find 77666072 74917622 1.04
list<TestObject>/insert 788071150 774188737 1.02
list<TestObject>/push_back 760490154 737327348 1.03
list<TestObject>/remove 1682511938 1434771006 1.17 *
list<TestObject>/reverse 87237327 80394623 1.09
list<TestObject>/size/1 3828111 599530 6.39 *
list<TestObject>/size/10 9600605 1329535 7.22 * EASTL intentionally implements list::size as O(n).
list<TestObject>/size/100 62952334 15022551 4.19 * EASTL intentionally implements list::size as O(n).
list<TestObject>/splice 96536412 60804817 1.59 *

map<TestObject, uint32_t>/clear 1142127 1099066 1.04
map<TestObject, uint32_t>/count 19659726 14647548 1.34 *
map<TestObject, uint32_t>/equal_range 36680687 18219086 2.01 *
map<TestObject, uint32_t>/erase/key 28892154 16037774 1.80 *
map<TestObject, uint32_t>/erase/pos 1209643 1185495 1.02
map<TestObject, uint32_t>/erase/range 715402 670539 1.07
map<TestObject, uint32_t>/find 21020992 13429575 1.57 *
map<TestObject, uint32_t>/insert 59530871 51120640 1.16 *
map<TestObject, uint32_t>/iteration 972825 1191946 0.82 *
map<TestObject, uint32_t>/lower_bound 18852651 12495034 1.51 *
map<TestObject, uint32_t>/operator[] 22889573 16676736 1.37 *
map<TestObject, uint32_t>/upper_bound 18603584 12406922 1.50 *

set<uint32_t>/clear 919555 882988 1.04
set<uint32_t>/count 17561110 12461084 1.41 *
set<uint32_t>/equal_range 31522488 15230282 2.07 *
set<uint32_t>/erase range 687582 564765 1.22 *
set<uint32_t>/erase/pos 1044352 1045355 1.00
set<uint32_t>/erase/val 25525304 12940774 1.97 *
set<uint32_t>/find 17140751 10704866 1.60 *
set<uint32_t>/insert 56035051 45555664 1.23 *
set<uint32_t>/iteration 682669 640831 1.07
set<uint32_t>/lower_bound 16339932 10475740 1.56 *
set<uint32_t>/upper_bound 17779424 10652599 1.67 *

sort/q_sort/TestObject[] 17000866 14823515 1.15 *
sort/q_sort/TestObject[]/sorted 6658559 3263328 2.04 *
sort/q_sort/vector<TestObject> 17476629 14953285 1.17 *
sort/q_sort/vector<TestObject>/sorted 6667034 3327435 2.00 *
sort/q_sort/vector<ValuePair> 15391357 10820848 1.42 *
sort/q_sort/vector<ValuePair>/sorted 6617122 3232949 2.05 *
sort/q_sort/vector<uint32> 8343906 6014846 1.39 *
sort/q_sort/vector<uint32>/sorted 3039430 1003127 3.03 *

string<char16_t>/compare 1489709846 532664000 2.80 *
string<char16_t>/erase/pos,n 3528690 3439864 1.03
string<char16_t>/find/p,pos,n 2521448321 443752189 5.68 *
string<char16_t>/find_first_not_of/p,pos,n 661206 137419 4.81 *
string<char16_t>/find_first_of/p,pos,n 54746434 8521335 6.42 *
string<char16_t>/find_last_of/p,pos,n 10607778 1212414 8.75 *
string<char16_t>/insert/pos,p 3445016 3360126 1.03
string<char16_t>/iteration 580955636 579452556 1.00
string<char16_t>/operator[] 2206353 1987809 1.11 *
string<char16_t>/push_back 22421368 6007808 3.73 *
string<char16_t>/replace/pos,n,p,n 5138454 4464786 1.15 *
string<char16_t>/reserve 4922413418 335622 100.00 *
string<char16_t>/rfind/p,pos,n 1440308 380578 3.78 *
string<char16_t>/size 25355 25398 1.00
string<char16_t>/swap 2122704 1490823 1.42 *

string<char8_t>/compare 77222134 77443134 1.00
string<char8_t>/erase/pos,n 1965344 1956521 1.00
string<char8_t>/find/p,pos,n 2468091951 474205522 5.20 *
string<char8_t>/find_first_not_of/p,pos,n 660960 130211 5.08 *
string<char8_t>/find_first_of/p,pos,n 55020899 9240171 5.95 *
string<char8_t>/find_last_of/p,pos,n 10576210 1239053 8.54 *
string<char8_t>/insert/pos,p 1822756 1750880 1.04
string<char8_t>/iteration 2617889 2540148 1.03
string<char8_t>/operator[] 2254794 2256443 1.00
string<char8_t>/push_back 12463022 5210321 2.39 *
string<char8_t>/replace/pos,n,p,n 3744862 2855260 1.31 *
string<char8_t>/reserve 1372046888 218815 100.00 *
string<char8_t>/rfind/p,pos,n 1446232 366902 3.94 *
string<char8_t>/size 26859 25431 1.06
string<char8_t>/swap 2123350 1490509 1.42 *

vector<uint64>/erase 55164013 56417449 0.98
vector<uint64>/insert 55872973 56432664 0.99
vector<uint64>/iteration 1329102 1324623 1.00
vector<uint64>/operator[] 5264738 3136746 1.68 *
vector<uint64>/push_back 14903245 13171175 1.13 *
vector<uint64>/sort 88429095 88542171 1.00
+ + + + + +
+ + + + + +

+ + + + + Win32.VC71.STLPort.Release

+
+
EASTL version: 0.96.00
Platform: Windows on X86
Compiler: Microsoft Visual C++ compiler, version 1310
Allocator: PPMalloc::GeneralAllocator. Thread safety enabled.
Build: Full optimization. Inlining enabled.

Values are times to complete tests; smaller values are better.
Alarm indicates a greater than 10% difference.

Test STLPort EASTL Ratio Alarm
----------------------------------------------------------------------------------------
algorithm/adj_find/vector<TestObject> 2741046 2731441 1.00
algorithm/copy/vector<LargePOD> 6065923 5085142 1.19 *
algorithm/copy/vector<uint32_t> 158304 165555 0.96
algorithm/copy_backward/vector<LargePOD> 4710258 4896476 0.96
algorithm/copy_backward/vector<uint32_t> 146030 142630 1.02
algorithm/count/vector<uint64_t> 1395921 1406334 0.99
algorithm/equal_range/vector<uint64_t> 211692764 118969493 1.78 *
algorithm/fill/bool[] 366078 33737 10.85 *
algorithm/fill/char[]/'d' 33736 33771 1.00
algorithm/fill/vector<char>/'d' 28466 33720 0.84 *
algorithm/fill/vector<char>/0 366086 33728 10.85 *
algorithm/fill/vector<uint64_t> 466250 401591 1.16 *
algorithm/fill/vector<void*> 521603 693481 0.75 *
algorithm/fill_n/bool[] 599709 33762 17.76 *
algorithm/fill_n/char[] 599573 33711 17.79 *
algorithm/fill_n/vector<uint64_t> 434971 1374084 0.32 *
algorithm/find_end/string/end 1494742 85349 17.51 *
algorithm/find_end/string/middle 1480700 687208 2.15 *
algorithm/find_end/string/none 1540540 1546431 1.00
algorithm/lex_cmp/schar[] 921638 178797 5.15 *
algorithm/lex_cmp/vector<TestObject> 2623559 2643551 0.99
algorithm/lex_cmp/vector<uchar> 960899 183608 5.23 *
algorithm/lower_bound/vector<TestObject> 60630534 56531528 1.07
algorithm/min_element/vector<TestObject> 4209022 2768527 1.52 *
algorithm/rand_shuffle/vector<uint64_t> 13762010 15969052 0.86 *
algorithm/reverse/list<TestObject> 673387 731825 0.92
algorithm/reverse/vector<TestObject> 634576 754511 0.84 *
algorithm/search/string<char> 1262599 1387608 0.91
algorithm/search_n/string<char> 1166242 458592 2.54 *
algorithm/unique/vector<TestObject> 4912193 5336317 0.92
algorithm/unique/vector<uint32_t> 809387 809081 1.00
algorithm/unique/vector<uint64_t> 4371814 2414255 1.81 *
algorithm/upper_bound/vector<uint32_t> 31899081 29555596 1.08

bitset<1500>/>>=/1 63308136 40553560 1.56 * STLPort is broken, neglects wraparound check.
bitset<1500>/count 62523178 22799473 2.74 *
bitset<1500>/flip 20302845 19919232 1.02
bitset<1500>/reset 18892015 15403148 1.23 *
bitset<1500>/set() 15803302 17322192 0.91
bitset<1500>/set(i) 2799271 2999310 0.93
bitset<1500>/test 2999293 2799262 1.07

bitset<15>/>>=/1 1199239 3199256 0.37 * STLPort is broken, neglects wraparound check.
bitset<15>/count 3599461 2199231 1.64 *
bitset<15>/flip 1199231 1199188 1.00
bitset<15>/reset 1199188 1199180 1.00
bitset<15>/set() 1199214 1199180 1.00
bitset<15>/set(i) 2599257 1399262 1.86 *
bitset<15>/test 2599274 2599283 1.00

bitset<35>/>>=/1 6643974 4599239 1.44 * STLPort is broken, neglects wraparound check.
bitset<35>/count 5151331 5399438 0.95
bitset<35>/flip 1999404 1199273 1.67 *
bitset<35>/reset 9805285 1399313 7.01 *
bitset<35>/set() 2799279 1199248 2.33 *
bitset<35>/set(i) 2799246 1599241 1.75 *
bitset<35>/test 2999234 2999251 1.00

bitset<75>/>>=/1 7002045 6999333 1.00 STLPort is broken, neglects wraparound check.
bitset<75>/count 5999351 3002259 2.00 *
bitset<75>/flip 3599334 3599163 1.00
bitset<75>/reset 9799344 3399218 2.88 *
bitset<75>/set() 3599232 3599062 1.00
bitset<75>/set(i) 2799228 1599284 1.75 *
bitset<75>/test 2999250 2799339 1.07

deque<ValuePair>/erase 127108651 115258113 1.10
deque<ValuePair>/insert 137727889 116552332 1.18 *
deque<ValuePair>/iteration 7144182 6009899 1.19 *
deque<ValuePair>/operator[] 34241222 20535039 1.67 *
deque<ValuePair>/push_back 6585800 3932126 1.67 *
deque<ValuePair>/push_front 6805865 3993513 1.70 *
deque<ValuePair>/sort 395352323 348778188 1.13 *

hash_map<string, uint32_t>/clear 426640 447015 0.95
hash_map<string, uint32_t>/count 4359344 3883089 1.12 *
hash_map<string, uint32_t>/erase pos 584392 458142 1.28 *
hash_map<string, uint32_t>/erase range 221034 196078 1.13 *
hash_map<string, uint32_t>/erase val 3539867 3790813 0.93
hash_map<string, uint32_t>/find 3966831 3811910 1.04
hash_map<string, uint32_t>/find_as/char* 11591612 4243710 2.73 *
hash_map<string, uint32_t>/insert 16763887 16719194 1.00
hash_map<string, uint32_t>/iteration 909968 478609 1.90 *
hash_map<string, uint32_t>/operator[] 4360041 4108313 1.06

hash_map<uint32_t, TestObject>/clear 302634 283722 1.07
hash_map<uint32_t, TestObject>/count 916487 907426 1.01
hash_map<uint32_t, TestObject>/erase pos 388042 321385 1.21 *
hash_map<uint32_t, TestObject>/erase range 122680 116280 1.06
hash_map<uint32_t, TestObject>/erase val 1710931 1729529 0.99
hash_map<uint32_t, TestObject>/find 1089462 1346527 0.81 *
hash_map<uint32_t, TestObject>/insert 4560310 5072350 0.90 *
hash_map<uint32_t, TestObject>/iteration 960117 495354 1.94 *
hash_map<uint32_t, TestObject>/operator[] 1872830 1890595 0.99

heap (uint32_t[])/make_heap 3528418 3327257 1.06
heap (uint32_t[])/pop_heap 63243859 61011853 1.04
heap (uint32_t[])/push_heap 11602424 10045869 1.15 *
heap (uint32_t[])/sort_heap 52965362 48744729 1.09

heap (vector<TestObject>)/make_heap 13191456 13089711 1.01
heap (vector<TestObject>)/pop_heap 148555656 144787742 1.03
heap (vector<TestObject>)/push_heap 28696689 26618830 1.08
heap (vector<TestObject>)/sort_heap 112473989 114018643 0.99

list<TestObject>/ctor(it) 80186731 74006287 1.08
list<TestObject>/ctor(n) 6232311 6128007 1.02
list<TestObject>/erase 344556374 212877808 1.62 *
list<TestObject>/find 39859075 14591347 2.73 *
list<TestObject>/insert 86935153 56138233 1.55 *
list<TestObject>/push_back 79569180 46700641 1.70 *
list<TestObject>/remove 785786758 324201016 2.42 *
list<TestObject>/reverse 45248186 24852759 1.82 *
list<TestObject>/size/1 219844 219496 1.00
list<TestObject>/size/10 519563 519579 1.00 EASTL intentionally implements list::size as O(n).
list<TestObject>/size/100 4567194 101230266 0.05 * EASTL intentionally implements list::size as O(n).
list<TestObject>/splice 68321087 23601687 2.89 *

map<TestObject, uint32_t>/clear 168011 180540 0.93
map<TestObject, uint32_t>/count 4830439 5139287 0.94
map<TestObject, uint32_t>/equal_range 8700090 6158531 1.41 *
map<TestObject, uint32_t>/erase/key 6696776 4617038 1.45 *
map<TestObject, uint32_t>/erase/pos 309273 333183 0.93
map<TestObject, uint32_t>/erase/range 137419 136068 1.01
map<TestObject, uint32_t>/find 4773498 4931352 0.97
map<TestObject, uint32_t>/insert 9651877 9311699 1.04
map<TestObject, uint32_t>/iteration 372946 416364 0.90 *
map<TestObject, uint32_t>/lower_bound 4784234 4915797 0.97
map<TestObject, uint32_t>/operator[] 5040254 5183147 0.97
map<TestObject, uint32_t>/upper_bound 4724292 4915984 0.96

set<uint32_t>/clear 165300 173289 0.95
set<uint32_t>/count 4958654 4885086 1.02
set<uint32_t>/equal_range 8434134 5698681 1.48 *
set<uint32_t>/erase range 145554 133960 1.09
set<uint32_t>/erase/pos 299914 324760 0.92
set<uint32_t>/erase/val 6506155 4335034 1.50 *
set<uint32_t>/find 4866879 4556043 1.07
set<uint32_t>/insert 8340523 8957257 0.93
set<uint32_t>/iteration 294465 343442 0.86 *
set<uint32_t>/lower_bound 4548095 4756498 0.96
set<uint32_t>/upper_bound 4559196 4521498 1.01

sort/q_sort/TestObject[] 7316766 7013894 1.04
sort/q_sort/TestObject[]/sorted 1668439 1332885 1.25 *
sort/q_sort/vector<TestObject> 7331530 7017260 1.04
sort/q_sort/vector<TestObject>/sorted 1601629 1247120 1.28 *
sort/q_sort/vector<ValuePair> 7071643 7067869 1.00
sort/q_sort/vector<ValuePair>/sorted 2136390 1703799 1.25 *
sort/q_sort/vector<uint32> 3292891 2943627 1.12 *
sort/q_sort/vector<uint32>/sorted 653693 473612 1.38 *

string<char16_t>/compare 356579259 432760228 0.82 *
string<char16_t>/erase/pos,n 3430422 3428645 1.00
string<char16_t>/find/p,pos,n 229263402 225830975 1.02
string<char16_t>/find_first_not_of/p,pos,n 187391 81404 2.30 *
string<char16_t>/find_first_of/p,pos,n 4411831 4413532 1.00
string<char16_t>/find_last_of/p,pos,n 731655 726155 1.01
string<char16_t>/insert/pos,p 3408628 3319726 1.03
string<char16_t>/iteration 309993861 310333547 1.00
string<char16_t>/operator[] 580839 579904 1.00
string<char16_t>/push_back 3983338 2975553 1.34 *
string<char16_t>/replace/pos,n,p,n 4361095 4211504 1.04
string<char16_t>/reserve 935141729 247010 100.00 *
string<char16_t>/rfind/p,pos,n 248956 223397 1.11 *
string<char16_t>/size 13311 13107 1.02
string<char16_t>/swap 519129 579445 0.90 *

string<char8_t>/compare 76695559 76828015 1.00
string<char8_t>/erase/pos,n 1951566 1947282 1.00
string<char8_t>/find/p,pos,n 185878944 185605039 1.00
string<char8_t>/find_first_not_of/p,pos,n 196877 81600 2.41 *
string<char8_t>/find_first_of/p,pos,n 4147685 4145356 1.00
string<char8_t>/find_last_of/p,pos,n 605897 598222 1.01
string<char8_t>/insert/pos,p 1781592 1768264 1.01
string<char8_t>/iteration 921502 921272 1.00
string<char8_t>/operator[] 361250 359873 1.00
string<char8_t>/push_back 3363288 2530493 1.33 *
string<char8_t>/replace/pos,n,p,n 2682600 2633130 1.02
string<char8_t>/reserve 672517501 78387 100.00 *
string<char8_t>/rfind/p,pos,n 226202 200013 1.13 *
string<char8_t>/size 11280 11109 1.02
string<char8_t>/swap 519393 559759 0.93

vector<uint64>/erase 55184856 55192217 1.00
vector<uint64>/insert 56764267 55682726 1.02
vector<uint64>/iteration 423122 424039 1.00
vector<uint64>/operator[] 1189397 860991 1.38 *
vector<uint64>/push_back 5626609 4027317 1.40 *
vector<uint64>/sort 49227036 49231362 1.00
+ + + + + +
+ + + + + +

+ + + + + + + + +
+ + + +
+End of document
+ + + +
+ + + +
+ + + +
+ + + +
+ + + + + diff --git a/doc/html/EASTL Best Practices.html b/doc/html/EASTL Best Practices.html new file mode 100644 index 0000000..bc0792e --- /dev/null +++ b/doc/html/EASTL Best Practices.html @@ -0,0 +1,1001 @@ + + + + EASTL Best Practices + + + + + + + +

EASTL Best Practices

+

In this document we discuss best practices for using EASTL. The primary emphasis is on performance with a secondary + emphasis on correctness and maintainability. Some best practices apply only to some situations, and these will be + pointed out as we go along. In order to be easily digestible, we present these practices as a list of items in the tone + of the Effective C++ series of books.

+

Summary

+

The descriptions here are intentionally terse; this is to make them easier to visually scan.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1Consider intrusive containers.
2Consider fixed-size containers.
3Consider custom allocators.
4Consider hash tables instead of maps.
5Consider a vector_map (a.k.a. sorted vector) for unchanging data.
6Consider slist instead of list.
7Avoid redundant end() and size() in loops.
8Iterate containers instead of using operator[].
9Learn to use the string class appropriately.
10Cache list size if you want size() to be O(1).
11Use empty() instead of size() when possible.
12Know your container efficiencies.
13Use vector::reserve.
14Use vector::set_capacity to trim memory usage.
15Use swap() instead of a manually implemented version.
16Consider storing pointers instead of objects.
17Consider smart pointers instead of raw pointers.
18Use iterator pre-increment instead of post-increment.
19Make temporary references so the code can be traced/debugged.
20Consider bitvector or bitset instead of vector<bool>.
21Vectors can be treated as contiguous memory.
22Search hash_map<string> via find_as() instead of find().
23Take advantage of type_traits (e.g. EASTL_DECLARE_TRIVIAL_RELOCATE).
24Name containers to track memory usage.
25Learn the algorithms.
26Pass and return containers by reference instead of value.
27Consider using reset_lose_memory() for fast container teardown.
28Consider using fixed_substring instead of copying strings.
29Consider using vector::push_back(void).
+

Detail

+

1 + Consider intrusive containers. +

+

Intrusive containers (such as intrusive_list) differ from regular containers (such as list) in that they use the stored objects to manage the linked list instead of using nodes allocated from a memory heap. The result is better usage of memory. Additionally intrusive_list objects can be removed from their list without knowing what list they belong to. To make an intrusive_list of Widgets, you have Widget inherit from intrusive_list_node or simply have mpPrev/mpNext member variables.

+

To create an intrusive_list container, you can use the following code:

+

class Widget : public intrusive_list_node
+{ };
+
+intrusive_list<Widget> widgetList;
+widgetList.push_back(someWidget);

+

+

2 + Consider fixed-size containers. +

+

Fixed-size containers (such as fixed_list) are variations of regular containers (such as list) in that they allocate from a fixed block of local memory instead of allocating from a generic heap. The result is better usage of memory due to reduced fragmentation, better cache behavior, and faster allocation/deallocation. The presence of fixed-size containers negate the most common complaint that people have about STL: that it fragments the heap or "allocates all over the place."

+

EASTL fixed containers include:

+
    +
  • fixed_list
  • +
  • fixed_slist
  • +
  • fixed_vector
  • +
  • fixed_string
  • +
  • fixed_map
  • +
  • fixed_multimap
  • +
  • fixed_set
  • +
  • fixed_multiset
  • +
  • fixed_hash_map
  • +
  • fixed_hash_multimap
  • +
  • fixed_hash_set
  • +
  • fixed_hash_multiset
  • +
+

To create a fixed_set, you can use the following code:

+

fixed_set<int, 25> intSet; // Create a set capable of holding 25 elements.
+intSet.push_back(37);

+

+

3 + Consider custom allocators. +

+

While EASTL provides fixed-size containers in order to control container memory usage, EASTL lets you assign a custom allocator to any container. This lets you define your own memory pool. EASTL has a more flexible and powerful mechanism of doing this that standard STL, as EASTL understands object alignment requirements, allows for debug naming, allows for sharing allocators across containers, and allows dynamic allocator assignment.

+

To create a list container that uses your custom allocator and uses block naming, you can use the following code:

+

list<int> intList(pSomeAllocator, "graphics/intList");
+intList.push_back(37);

+

4 +Consider hash tables instead of maps.

+

Hash containers (such as hash_map) provide the same interface as associative containers (such as map) but have faster lookup and use less memory. The primary disadvantage relative to associative containers is that hash containers are not sorted.

+

To make a hash_map (dictionary) of integers to strings, you can use the following code:

+

hash_map<int, const char*> stringTable;
+stringTable[37] = "hello";

+

5 + Consider a vector_map (a.k.a. sorted vector) for unchanging data. +

+

You can improve speed, memory usage, and cache behavior by using a vector_map instead of a map (or vector_set instead of set, etc.). The primary disadvantage of vector_map is that insertions and removal of elements is O(n) instead of O(1). However, if your associative container is not going to be changing much or at all, you can benefit from using a vector_map. Consider calling reserve on the vector_map in order to set the desired capacity up front.

+

To make a vector_set, you can use the following code:

+

vector_set<int> intSet(16); // Create a vector_set with an initial capacity of 16.
+intSet.insert(37);

+

Note that you can use containers other than vector to implement vector_set. Here's how you do it with deque:

+

vector_set<int, less<int>, EASTLAllocatorType, deque<int> > intSet;
+intSet.insert(37);

+

6 + Consider slist instead of list. +

+

An slist is a singly-linked list; it is much like a list except that it can only be traversed in a forward direction and not a backward direction. The benefit is that each node is 4 bytes instead of 8 bytes. This is a small improvement, but if you don't need reverse iteration then it can be an improvement. There's also intrusive_slist as an option.

+

To make an slist, you can use the following code:

+

slist<int> intSlist;
+intSlist.push_front(37);

+

7 +Avoid redundant end() and size() in loops.

+

Instead of writing code like this:
+

+
for(deque<int>::iterator it = d.begin(); it != d.end(); ++it)
+    ...
+write code like this:
+
+
for(deque<int>::iterator it = d.begin(), itEnd = d.end(); it != itEnd; ++it)
+    ...
+The latter avoids a function call and return of an object (which in deque's case happens to be more than just a pointer). The above only works when the container is unchanged or for containers that have a constant end value. By "constant end value" we mean containers which can be modified but end always remains the same.
+ + + + + + + + + + + + + + + +
Constant beginNon-constant beginConstant endNon-constant end
array1string
+ vector
+ deque
+ intrusive_list
+ intrusive_slist
+ vector_map
+ vector_multimap
+ vector_set
+ vector_multiset
+ bit_vector
+ hash_map
+ hash_multimap
+ hash_set
+ hash_multiset
+ intrusive_hash_map
+ intrusive_hash_multimap
+ intrusive_hash_set
+ intrusive_hash_multiset
array
+ list
+ slist
+ intrusive_list
+ intrusive_slist
+ map
+ multimap
+ set
+ multiset
+ hash_map2
+ hash_multimap2
+ hash_set2
+ hash_multiset2
+ intrusive_hash_map
+ intrusive_hash_multimap
+ intrusive_hash_set
+ intrusive_hash_multiset
string
+ vector
+ deque
+ vector_map
+ vector_multimap
+ vector_set
+ vector_multiset
+ bit_vector
+
1 Arrays can be neither resized nor reallocated.
+ 2 Constant end if the hashtable can't/won't re-hash. Non-constant if it can re-hash.
+

8 +Iterate containers instead of using operator[]. +

+

It's faster to iterate random access containers via iterators than via operator[], though operator[] usage may look simpler.

+

Instead of doing this:

+

for(unsigned i = 0, iEnd = intVector.size(); i != iEnd; ++i)
+    intVector[i] = 37;

+

you can execute more efficiently by doing this:

+

for(vector<int>::iterator it = intVector.begin(), itEnd = intVector.end(); it != itEnd; ++it)
+    *it = 37;

+

9 +Learn to use the string class appropriately.

+

Oddly enough, the most mis-used STL container is easily the string class. The tales of string abuse could rival the 1001 Arabian Nights. Most of the abuses involve doing things in a harder way than need be. In examining the historical mis-uses of string, it is clear that many of the problems stem from the user thinking in terms of C-style string operations instead of object-oriented strings. This explains why statements such as strlen(s.c_str()) are so common, whereas the user could just use s.length() instead and be both clearer and more efficient.
+
+Here we provide a table of actual collected examples of things done and how they could have been done instead.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
What was writtenWhat could have been written

+ s = s.Left(i) + '+' + s.Right(s.length() - i - 1);
+
+
s[i] = '+';

+ string s(""); // This is the most commonly found misuse.
+
+
string s;

+ s = "";
+
+
s.clear();

+ s.c_str()[0] = 'u';
+
+
s[0] = 'u';

+ len = strlen(s.c_str());
+
+
len = s.length();

+ s = string("u");
+
s = "u";

+ puts(s + string("u"));
+
+
puts(s + "u");

+ string s(" ");
+ puts(s.c_str());
+
+
puts(" ");

+ s.sprintf("u");
+
+
s = "u";

+ char array[32];
+ sprintf(array, "%d", 10);
+ s = string(array);
+
+
s.sprintf("%d", 10);
+


+The chances are that if you want to do something with a string, there is a very basic way to do it. You don't want your code to appear in a future version of the above table.

+

10 +Cache list size if you want list::size() to be O(1).

+

EASTL's list, slist, intrusive_list, and intrusive_slist containers have a size() implementation which is O(n). That is, these containers don't keep a count (cache) of the current list size and when you call the size() function they iterate the list. This is by design and the reasoning behind it has been deeply debated and considered (and is discussed in the FAQ and the list header file). In summary, list doesn't cache its size because the only function that would benefit is the size function while many others would be negatively impacted and the memory footprint would be negatively impacted, yet list::size is not a very frequently called function in well-designed code. At the same time, nothing prevents the user from caching the size himself, though admittedly it adds some tedium and risk to the code writing process.
+
+Here's an example of caching the list size manually:
+

+
list<int> intList;
+ size_t    n = 0;
+
+ intList.push_back(37);
+ ++n;
+ intList.pop_front();
+ --n;
+

11 +Use empty() instead of size() when possible. +

+

All conventional containers have both an empty function and a size function. For all containers empty() executes with O(1) (constant time) efficiency. However, this is not so for size(), as some containers need to calculate the size and others need to do pointer subtraction (which may involve integer division) to find the size.

+

12 +Know your container efficiencies.

+

The above two practices lead us to this practice, which is a generalization of the above. + We present a table of basic information for the conventional EASTL containers. The values are described at the + bottom.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Container

empty() efficiencysize() efficiencyoperator[] efficiency

insert() efficiency

erase() efficiency

find() efficiency

sort efficiency

slist1O(n)-O(1)O(1)O(n)O(n+)

list

1n-

1

1

n

n log(n)

intrusive_slist1n-111n+
intrusive_list1n-111n log(n)
array111--nn log(n)
vector11a11 at end, else n1 at end, else nnn log(n)
vector_set11a11 at end, else n1 at end, else nlog(n)1
vector_multiset11a11 at end, else n1 at end, else nlog(n)1
vector_map11a11 at end, else n1 at end, else nlog(n)1
vector_multimap11a11 at end, else n1 at end, else nlog(n)1
deque11a11 at begin or end,
+ else n / 2
1 at begin or end,
+ else n / 2
nn log(n)
bit_vector11a11 at end, else n1 at end, else nnn log(n)
string, cow_string11a11 at end, else n1 at end, else nnn log(n)
set11-log(n)log(n)log(n)1
multiset11-log(n)log(n)log(n)1
map11log(n)log(n)log(n)log(n)1
multimap11-log(n)log(n)log(n)1
hash_set11-111-
hash_multiset11-1
11-
hash_map11-111-
hash_multimap11-111-
intrusive_hash_set11-111-
intrusive_hash_multiset11-111-
intrusive_hash_map11-111-
intrusive_hash_multimap11-111-
+


+ Notes: +

+
    +
  • - means that the operation does not exist.
  • +
  • 1 means amortized constant time. Also known as O(1)
  • +
  • n means time proportional to the container size. Also known as O(n)
  • +
  • log(n) means time proportional to the natural logarithm of the container size. Also known as O(log(n))
  • +
  • n log(n) means time proportional to log(n) times the size of the container. Also known as O(n log(n))
  • +
  • n+ means that the time is at least n, and possibly higher.
  • +
  • Inserting at the end of a vector may cause the vector to be resized; resizing a vector is O(n). However, the amortized time complexity for vector insertions at the end is constant.
  • +
  • Sort assumes the usage of the best possible sort for a large container of random data. Some sort algorithms (e.g. quick_sort) require random access iterators and so the sorting of some containers requires a different sort algorithm. We do not include bucket or radix sorts, as they are always O(n).
  • +
  • a vector, deque, string size is O(1) but involves pointer subtraction and thus integer division and so is not as efficient as containers that store the size directly.
  • +
+

13 +Use vector::reserve.

+

You can prevent vectors (and strings) from reallocating as you add items by specifying up front how many items you will be requiring. You can do this in the constructor or by calling the reserve function at any time. The capacity function returns the amount of space which is currently reserved.
+
+Here's how you could specify reserved capacity in a vector:
+

+
vector<Widget> v(37);   // Reserve space to hold up to 37 items.
+    or
+vector<Widget> v;       // This empty construction causes to memory to be allocated or reserved.
+ v.reserve(37);
+
+The EASTL vector (and string) implementation looks like this: +template <typename T>
+ class vector {
+    T* mpBegin;     // Beginning of used element memory.
+    T* mpEnd;       // End of used element memory.
+    T* mpCapacity;  // End of storage capacity. Is >= mpEnd
+
}
+Another approach to being efficient with vector memory usage is to use fixed_vector. +

14 +Use vector::set_capacity to trim memory usage.

+

A commonly asked question about vectors and strings is, "How do I reduce the capacity of a vector?" The conventional solution for std STL is to use the somewhat non-obvious trick of using vector<Widget>(v).swap(v). EASTL provides the same functionality via a member function called set_capacity() which is present in both the vector and string classes. 
+
+An example of reducing a vector is the following:

+vector<Widget> v;
+...
+
v.set_capacity();
+An example of resizing to zero and completely freeing the memory of a vector is the following:
+
+
vector<Widget> v;
+ ...
+
v.set_capacity(0);
+

15 Use swap() instead of a manually implemented version.

+

The generic swap algorithm provides a basic version for any kind of object. However, each EASTL container provides a specialization of swap which is optimized for that container. For example, the list container implements swap by simply swapping the internal member pointers and not by moving individual elements.

+

16 +Consider storing pointers instead of objects.

+

There are times when storing pointers to objects is more efficient or useful than storing objects directly in containers. It can be more efficient to store pointers when the objects are big and the container may need to construct, copy, and destruct objects during sorting or resizing. Moving pointers is usually faster than moving objects. It can be useful to store pointers instead of objects when somebody else owns the objects or the objects are in another container. It might be useful for a Widget to be in a list and in a hash table at the same time.

+

17 + Consider smart pointers instead of raw pointers. +

+

If you take the above recommendation and store objects as pointers instead of as objects, you may want to consider storing them as smart pointers instead of as regular pointers. This is particularly useful for when you want to delete the object when it is removed from the container. Smart pointers will automatically delete the pointed-to object when the smart pointer is destroyed. Otherwise, you will have to be careful about how you work with the list so that you don't generate memory leaks. Smart pointers implement a shared reference count on the stored pointer, as so any operation you do on a smart pointer container will do the right thing. Any pointer can be stored in a smart pointer, and custom new/delete mechanisms can work with smart pointers. The primary smart pointer is shared_ptr.

+

Here is an example of creating and using a shared_ptr:

+

typedef shared_ptr<Widget> WPtr;
+ list<WPtr> wList;
+
+ wList.push_back(WPtr(new Widget)); // The user may have operator new/delete overrides.
+wList.pop_back();                  // Implicitly deletes the Widget.

+

Here is an example of creating and using a shared_ptr that uses a custom allocation and deallocation mechanism:

+

typedef shared_ptr<Widget, EASTLAllocatorType, WidgetDelete> WPtr; // WidgetDelete is a custom destroyer.
+ list<WPtr> wList;
+
+ wList.push_back(WPtr(WidgetCreate(Widget))); // WidgetCreate is a custom allocator.
+wList.pop_back();                            // Implicitly calls WidgetDelete.

+

18 + Use iterator pre-increment instead of post-increment. +

+

Pre-increment (e.g. ++x) of iterators is better than post-increment (x++) when the latter is not specifically needed. It is common to find code that uses post-incrementing when it could instead use pre-incrementing; presumably this is due to post-increment looking a little better visually. The problem is that the latter constructs a temporary object before doing the increment. With built-in types such as pointers and integers, the compiler will recognize that the object is a trivial built-in type and that the temporary is not needed, but the compiler cannot do this for other types, even if the compiler sees that the temporary is not used; this is because the constructor may have important side effects and the compiler would be broken if it didn't construct the temporary object.

+

EASTL iterators are usually not trivial types and so it's best not to hope the compiler will do the best thing. Thus you should always play it safe an use pre-increment of iterators whenever post-increment is not required.

+

Here is an example of using iterator pre-increment; for loops like this should always use pre-increment:

+

for(set<int>::iterator it(intSet.begin()), itEnd(intSet.end()); it != itEnd; ++it)
+     *it = 37;

+

19 + Make temporary references so the code can be traced/debugged. +

+

Users want to be able to inspect or modify variables which are referenced by iterators. While EASTL containers and iterators are designed to make this easier than other STL implementations, it makes things very easy if the code explicitly declares a reference to the iterated element. In addition to making the variable easier to debug, it also makes code easier to read and makes the debug (and possibly release) version of the application run more efficiently.

+

Instead of doing this:

+

for(list<Widget>::iterator it = wl.begin(), itEnd = wl.end(); it != itEnd; ++it) {
+     (*it).x = 37;
+     (*it).y = 38;
+     (*it).z = 39;
+ }

+

Consider doing this:

+

for(list<Widget>::iterator it = wl.begin(), itEnd = wl.end(); it != itEnd; ++it) {
+     Widget& w = *it; // The user can easily inspect or modify w here.
+     w.x = 37;
+     w.y = 38;
+     w.z = 39;
+ }

+

20 + Consider bitvector or bitset instead of vector<bool>.

+

In EASTL, a vector of bool is exactly that. It intentionally does not attempt to make a specialization which implements a packed bit array. The bitvector class is specifically designed for this purpose. There are arguments either way, but if vector<bool> were allowed to be something other than an array of bool, it would go against user expectations and prevent users from making a true array of bool. There's a mechanism for specifically getting the bit packing, and it is bitvector.

+

Additionally there is bitset, which is not a conventional iterateable container but instead acts like bit flags. bitset may better suit your needs than bitvector if you need to do flag/bit operations instead of array operations. bitset does have an operator[], though.

+

21 +Vectors can be treated as contiguous memory.

+

EASTL vectors (and strings) guarantee that elements are present in a linear contiguous array. This means that you can use a vector as you would a C-style array by using the vector data() member function or by using &v[0].

+

To use a vector as a pointer to an array, you can use the following code:

+

struct Widget {
+     uint32_t x;
+     uint32_t y;
+ };
+
+ vector<Widget> v;
+
+ quick_sort((uint64_t*)v.data(), (uint64_t*)(v.data() + v.size()));

+

22 +Search hash_map<string> via find_as() instead of find().

+

EASTL hash tables offer a bonus function called find_as when lets you search a hash table by something other than the container type. This is particularly useful for hash tables of string objects that you want to search for by string literals (e.g. "hello") or char pointers. If you search for a string via the find function, your string literal will necessarily be converted to a temporary string object, which is inefficient.

+

To use find_as, you can use the following code:

+

hash_map<string, int> hashMap;
+ hash_map<string, int>::iterator it = hashMap.find_as("hello"); // Using default hash and compare.

+

23 +Take advantage of type_traits (e.g. EASTL_DECLARE_TRIVIAL_RELOCATE).

+

EASTL includes a fairly serious type traits library that is on par with the one found in Boost but offers some additional performance-enhancing help as well. The type_traits library provides information about class types, as opposed to class instances. For example, the is_integral type trait tells if a type is one of int, short, long, char, uint64_t, etc.
+
+There are three primary uses of type traits:

+
    +
  • Allowing for optimized operations on some data types.
  • +
  • Allowing for different logic pathways based on data types.
  • +
  • Allowing for compile-type assertions about data type expectations.
  • +
+Most of the type traits are automatically detected and implemented by the compiler. However, EASTL allows for the user to explicitly give the compiler hints about type traits that the compiler cannot know, via the EASTL_DECLARE declarations. If the user has a class that is relocatable (i.e. can safely use memcpy to copy values), the user can use the EASTL_DECLARE_TRIVIAL_RELOCATE declaration to tell the compiler that the class can be copied via memcpy. This will automatically significantly speed up some containers and algorithms that use that class.
+
+Here is an example of using type traits to tell if a value is a floating point value or not:
+
+
template <typename T>
+ DoSomething(T t) {
+    assert(is_floating_point<T>::value);
+ }
+Here is an example of declaring a class as relocatable and using it in a vector.
+
+
EASTL_DECLARE_TRIVIAL_RELOCATE(Widget); // Usually you put this at the Widget class declaration.
+ vector<Widget> wVector;
+ wVector.erase(wVector.begin());         // This operation will be optimized via using memcpy.
+The following is a full list of the currently recognized type traits. Most of these are implemented as of this writing, but if there is one that is missing, feel free to contact the maintainer of this library and request that it be completed. +
    +
  • is_void
  • +
  • is_integral
  • +
  • is_floating_point
  • +
  • is_arithmetic
  • +
  • is_fundamental
  • +
  • is_const
  • +
  • is_volatile
  • +
  • is_abstract
  • +
  • is_signed
  • +
  • is_unsigned
  • +
  • is_array
  • +
  • is_pointer
  • +
  • is_reference
  • +
  • is_member_object_pointer
  • +
  • is_member_function_pointer
  • +
  • is_member_pointer
  • +
  • is_enum
  • +
  • is_union
  • +
  • is_class
  • +
  • is_polymorphic
  • +
  • is_function
  • +
  • is_object
  • +
  • is_scalar
  • +
  • is_compound
  • +
  • is_same
  • +
  • is_convertible
  • +
  • is_base_of
  • +
  • is_empty
  • +
  • is_pod
  • +
  • is_aligned
  • +
  • has_trivial_constructor
  • +
  • has_trivial_copy
  • +
  • has_trivial_assign
  • +
  • has_trivial_destructor
  • +
  • has_trivial_relocate1
  • +
  • has_nothrow_constructor
  • +
  • has_nothrow_copy
  • +
  • has_nothrow_assign
  • +
  • has_virtual_destructor
  • +
  • alignment_of
  • +
  • rank
  • +
  • extent
  • +
+1 has_trivial_relocate is not found in Boost nor the C++ standard update proposal. However, it is very useful in allowing for the generation of optimized object moving operations. It is similar to the is_pod type trait, but goes further and allows non-pod classes to be categorized as relocatable. Such categorization is something that no compiler can do, as only the user can know if it is such. Thus EASTL_DECLARE_TRIVIAL_RELOCATE  is provided to allow the user to give the compiler a hint. +

24 +Name containers to track memory usage. +

+

All EASTL containers which allocate memory have a built-in function called set_name and have a constructor argument that lets you specify the container name. This name is used in memory tracking and allows for the categorization and measurement of memory usage. You merely need to supply a name for your container to use and it does the rest.

+

Here is an example of creating a list and naming it "collision list":

+

list<CollisionData> collisionList(allocator("collision list"));or
+ list<CollisionData> collisionList;
+collisionList.get_allocator().set_name("collision list");

+

Note that EASTL containers do not copy the name contents but merely copy the name pointer. This is done for simplicity and efficiency. A user can get around this limitation by creating a persistently present string table. Additionally, the user can get around this by declaring static but non-const strings and modifying them at runtime.

+

25 +Learn the algorithms.

+

EASTL algorithms provide a variety of optimized implementations of fundamental algorithms. Many of the EASTL algorithms are the same as the STL algorithm set, though EASTL adds additional algorithms and additional optimizations not found in STL implementations such as Microsoft's. The copy algorithm, for example, will memcpy data types that have the has_trivial_relocate type trait instead of doing an element-by-element copy.
+
+ The classifications we use here are not exactly the same as found in the C++ standard; they have been modified to be a little more intuitive. Not all the functions listed here may be yet available in EASTL as you read this. If you want some function then send a request to the maintainer. Detailed documentation for each algorithm is found in algorithm.h or the otherwise corresponding header file for the algorithm.
+
+ Search

+
    +
  • find, find_if
  • +
  • find_end
  • +
  • find_first_of
  • +
  • adjacent_find
  • +
  • binary_search
  • +
  • search, search_n
  • +
  • lower_bound
  • +
  • upper_bound
  • +
  • equal_range
  • +
+

Sort

+
    +
  • is_sorted
  • +
  • quick_sort
  • +
  • insertion_sort
  • +
  • shell_sort
  • +
  • heap_sort
  • +
  • merge_sort, merge_sort_buffer
  • +
  • merge
  • +
  • inplace_merge
  • +
  • partial_sort
  • +
  • stable_sort
  • +
  • partial_sort_copy
  • +
  • <other sort functions found in the EASTL bonus directories>
  • +
+

Modifying

+
    +
  • fill, fill_n
  • +
  • generate, generate_n
  • +
  • random_shuffle
  • +
  • swap
  • +
  • iter_swap
  • +
  • swap_ranges
  • +
  • remove, remove_if
  • +
  • remove_copy, remove_copy_if
  • +
  • replace, replace_if
  • +
  • replace_copy, replace_copy_if
  • +
  • reverse
  • +
  • reverse_copy
  • +
  • rotate
  • +
  • rotate_copy
  • +
  • partition
  • +
  • stable_partition
  • +
  • transform
  • +
  • next_permutation
  • +
  • prev_permutation
  • +
  • unique
  • +
  • unique_copy
  • +
+

Non-Modifying

+
    +
  • for_each
  • +
  • copy
  • +
  • copy_backward
  • +
  • count, count_if
  • +
  • equal
  • +
  • mismatch
  • +
  • min
  • +
  • max
  • +
  • min_element
  • +
  • max_element
  • +
  • lexicographical_compare
  • +
  • nth_element
  • +
+

Heap

+
    +
  • is_heap
  • +
  • make_heap
  • +
  • push_heap
  • +
  • pop_heap
  • +
  • change_heap
  • +
  • sort_heap
  • +
  • remove_heap
  • +
+

Set

+
    +
  • includes
  • +
  • set_difference
  • +
  • set_symmetric_difference
  • +
  • set_intersection
  • +
  • set_union
  • +
+

26 +Pass and return containers by reference instead of value.

+

If you aren't paying attention you might accidentally write code like this:

+

void DoSomething(list<Widget> widgetList) {
+     ...
+}

+

The problem with the above is that widgetList is passed by value and not by reference. Thus the a copy of the container is made and passed instead of a reference of the container being passed. This may seem obvious to some but this happens periodically and the compiler gives no warning and the code will often execute properly, but inefficiently. Of course there are some occasions where you really do want to pass values instead of references.

+

27 +Consider using reset_lose_memory() for fast container teardown.

+

EASTL containers have a reset function which unilaterally resets the container to a newly constructed state. The contents of the container are forgotten; no destructors are called and no memory is freed. This is a risky but power function for the purpose of implementing very fast temporary containers. There are numerous cases in high performance programming when you want to create a temporary container out of a scratch buffer area, use the container, and then just "vaporize" it, as it would be waste of time to go through the trouble of clearing the container and destroying and freeing the objects. Such functionality is often used with hash tables or maps and with a stack allocator (a.k.a. linear allocator).

+

Here's an example of usage of the reset function and a PPMalloc-like StackAllocator:

+

pStackAllocator->push_bookmark();
+ hash_set<Widget, less<Widget>, StackAllocator> wSet(pStackAllocator);
+<use wSet>
+ wSet.reset_lose_memory();
+ pStackAllocator->pop_bookmark();

+

+

28 +Consider using fixed_substring instead of copying strings. +

+

EASTL provides a fixed_substring class which uses a reference to a character segment instead of allocating its own string memory. This can be a more efficient way to work with strings under some circumstances.

+

Here's an example of usage of fixed_substring:

+

basic_string<char> str("hello world");
+ fixed_substring<char> sub(str, 6, 5); // sub == "world"

+

fixed_substring can refer to any character array and not just one that derives from a string object.

+

29 + Consider using vector::push_back(void).

+

EASTL provides an alternative way to insert elements into containers that avoids copy construction and/or the creation of temporaries. Consider the following code:

+

vector<Widget> widgetArray;
+ widgetArray.push_back(Widget());

+

The standard vector push_back function requires you to supply an object to copy from. This incurs the cost of the creation of a temporary and for some types of classes or situations this cost may be undesirable. It additionally requires that your contained class support copy-construction whereas you may not be able to support copy construction. As an alternative, EASTL provides a push_back(void) function which requires nothing to copy from but instead constructs the object in place in the container. So you can do this:

+

vector<Widget> widgetArray;
+ widgetArray.push_back();
+widgetArray.back().x = 0; // Example of how to reference the new object.

+

Other containers with such copy-less functions include:

+

vector::push_back()
+ deque::push_back()
+ deque::push_front()
+ list::push_back()
+ list::push_front()
+ slist::push_front()
+ map::insert(const key_type& key)
+ multimap::insert(const key_type& key)
+ hash_map::insert(const key_type& key)
+ hash_multimap::insert(const key_type& key)

+

Note that the map functions above allow you to insert a default value specified by key alone and not a value_type like with the other map insert functions.

+
+

End of document
+
+
+
+

+ + diff --git a/doc/html/EASTL Design.html b/doc/html/EASTL Design.html new file mode 100644 index 0000000..479dacc --- /dev/null +++ b/doc/html/EASTL Design.html @@ -0,0 +1,424 @@ + + + + EASTL Design + + + + + + +

EASTL Design

+

Introduction

+

EASTL (EA Standard Template Library) is designed to be a template library which encompasses and extends the + functionality of standard C++ STL while improving it in various ways useful to game development. Much of EASTL's design + is identical to standard STL, as the large majority of the STL is well-designed for many uses. The primary areas where +EASTL deviates from standard STL implementations are essentially the following:

+
    +
  • EASTL has a simplified and more flexible custom allocation scheme.
  • +
  • EASTL has significantly easier to read code.
  • +
  • EASTL has extension containers and algorithms.
  • +
  • EASTL has optimizations designed for game development.
  • +
+

Of the above items, the only one which is an incompatible difference with STL is the case of memory allocation. The + method for defining a custom allocator for EASTL is slightly different than that of standard STL, though they are 90% + similar. The 10% difference, however, is what makes EASTL generally easier and more powerful to work with than standard +STL. Containers without custom allocators act identically between EASTL and standard STL.

+

Motivations

+

Our motifications for making EASTL drive the design of EASTL. As identified in the EASTL RFC (Request for Comment), the + primary reasons for implementing a custom version of the STL are: +

+
    +
  • Some STL implementations (especially Microsoft STL) have inferior +performance characteristics that make them unsuitable for game development. EASTL is faster than all existing STL +implementations.
  • +
  • The STL is sometimes hard to debug, as most STL implementations use cryptic variable names and unusual data +structures.
  • +
  • STL allocators are sometimes painful to work with, as they have many requirements and cannot be modified once bound +to a container.
  • +
  • The STL includes excess functionality that can lead to larger code than desirable. It's not very easy to tell +programmers they shouldn't use that functionality.
  • +
  • The STL is implemented with very deep function calls. This results is unacceptable performance in non-optimized +builds and sometimes in optimized builds as well.
  • +
  • The STL doesn't support alignment of contained objects.
  • +
  • STL containers won't let you insert an entry into a container without supplying an entry to copy from. This can be +inefficient.
  • +
  • Useful STL extensions (e.g. slist, hash_map, shared_ptr) found in existing STL implementations such as STLPort are +not portable because they don't exist in other versions of STL or aren't consistent between STL versions.
  • +
  • The STL lacks useful extensions that game programmers find useful (e.g. intrusive_list) but which could be best +optimized in a portable STL environment.
  • +
  • The STL has specifications that limit our ability to use it efficiently. For example, STL vectors are not +guaranteed to use contiguous memory and so cannot be safely used as an array.
  • +
  • The STL puts an emphasis on correctness before performance, whereas sometimes you can get significant performance +gains by making things less academcially pure.
  • +
  • STL containers have private implementations that don't allow you to work with their data in a portable way, yet +sometimes this is an important thing to be able to do (e.g. node pools).
  • +
  • All existing versions of STL allocate memory in empty versions of at least some of their containers. This is not +ideal and prevents optimizations such as container memory resets that can greatly increase performance in some +situations.
  • +
  • The STL is slow to compile, as most modern STL implementations are very large.
  • +
  • There are legal issues that make it hard for us to freely use portable STL implementations such as STLPort.
  • +
  • We have no say in the design and implementation of the STL and so are unable to change it to work for our +needs.
  • +
+

Prime Directives

+

The implementation of EASTL is guided foremost by the +following directives which are listed in order of importance.

+
    +
  1. Efficiency (speed and memory usage)
  2. +
  3. Correctness
  4. +
  5. Portability
  6. +
  7. Readability
  8. +
+

Note that unlike commercial STL implementations which must put correctness above all, we put a higher value on + efficiency. As a result, some functionality may have some usage limitation that is not present in other similar systems +but which allows for more efficient operation, especially on the platforms of significance to us.

+

Portability is significant, but not critical. Yes, EASTL must compile and run on all platforms that we will ship games + for. But we don't take that to mean under all compilers that could be conceivably used for such platforms. For example, + Microsoft VC6 can be used to compile Windows programs, but VC6's C++ support is too weak for EASTL and so you simply +cannot use EASTL under VC6.

+

Readability is something that EASTL achieves better than many other templated libraries, particularly Microsoft STL and + STLPort. We make every attempt to make EASTL code clean and sensible. Sometimes our need to provide optimizations + (particularly related to type_traits and iterator types) results in less simple code, but efficiency happens to be our +prime directive and so it overrides all other considerations.

+

Thread Safety

+

It's not simple enough to simply say that EASTL is thread-safe or thread-unsafe. However, we can say that with respect +to thread safety that EASTL does the right thing.

+

Individual EASTL containers are not thread-safe. That is, access to an instance of a container from multiple + threads at the same time is unsafe if any of those accesses are modifying operations. A given container can be read + from multiple threads simultaneously as well as any other standalone data structure. If a user wants to be able to have + modifying access an instance of a container from multiple threads, it is up to the user to ensure that proper thread +synchronization occurs. This usually means using a mutex.

+

EASTL classes other than containers are the same as containers with respect to thread safety. EASTL functions (e.g. + algorithms) are inherently thread-safe as they have no instance data and operate entirely on the stack. As of this +writing, no EASTL function allocates memory and thus doesn't bring thread safety issues via that means.

+

The user may well need to be concerned about thread safety with respect to memory allocation. If the user modifies + containers from multiple threads, then allocators are going to be accessed from multiple threads. If an allocator is + shared across multiple container instances (of the same type of container or not), then mutexes (as discussed above) + the user uses to protect access to indivudual instances will not suffice to provide thread safety for allocators used + across multiple instances. The conventional solution here is to use a mutex within the allocator if it is exected to be +used by multiple threads.

+

EASTL uses neither static nor global variables and thus there are no inter-instance dependencies that would make +thread safety difficult for the user to implement.

+

Container Design

+

All EASTL containers follow a set of consistent conventions. Here we define the prototypical container which has the + minimal functionality that all (non-adapter) containers must have. Some containers (e.g. stack) are explicitly adapter + containers and thus wrap or inherit the properties of the wrapped container in a way that is implementation + specific.
+

+
template <class T, class Allocator = +EASTLAllocator>
+class container
+{
+public:
+    typedef container<T, Allocator>            this_type;
+    typedef +T                      +            value_type;
+    typedef T*                    +             pointer;
+    typedef const T*                        +   const_pointer;
+    typedef +T&                                 reference;
+ +    typedef const +T&                           const_reference;
+ +    typedef +ptrdiff_t                          difference_type;
+ +    typedef +impl_defined                       size_type;
+ +    typedef impl-defined                   +    iterator;
+    typedef impl-defined                   +    const_iterator;
+    typedef reverse_iterator<iterator>         reverse_iterator;
+    typedef reverse_iterator<const_iterator>   reverse_const_iterator;
+    typedef Allocator                  +        allocator_type;
+
+public:
+    container(
const +allocator_type& allocator = allocator_type());
+    container(const
this_type& +x);
+
+    
this_type& +operator=(this_type& x);
+    void swap(
this_type& x);
+    void reset();
+
+    allocator_type& get_allocator();
+    void            set_allocator(allocator_type& allocator);
+
+    iterator       begin();
+    const_iterator begin() const;
+    iterator       end();
+    const_iterator end() const;
+
+    bool validate() const;
    int  validate_iterator(const_iterator i) +const;

+protected:
+    allocator_type mAllocator;
+};
+
+template <class T,
class +Allocator>
+bool operator==(const container<T, Allocator>& a, const container<T,
Allocator>& b);
+
+template <class T,
class +Allocator>
+bool operator!=(const container<T,
Allocator>& a, const +container<T, Allocator>& +b);
+
+Notes: +
    +
  • Swapped containers do not swap their allocators.
  • +
  • Newly constructed empty containers do no memory allocation. Some STL and other container libraries allocate an +initial node from the class memory allocator. EASTL containers by design never do this. If a container needs an initial +node, that node should be made part of the container itself or be a static empty node object.
  • +
  • Empty containers (new or otherwise) contain no constructed objects, including those that might be in an 'end' node. +Similarly, no user object (e.g. of type T) should be constructed unless required by the design and unless documented in +the cotainer/algorithm contract. 
  • +
  • The reset function is a special extension function which unilaterally resets the container to an empty state +without freeing the memory of the contained objects. This is useful for very quickly tearing down a container built +into scratch memory. No memory is allocated by reset, and the container has no allocatedmemory after the reset is +executed.
  • +
  • The validate and validate_iterator functions provide explicit container and iterator validation. EASTL provides an option to do implicit automatic iterator and container validation, but full validation (which can be potentially extensive) has too much of a performance cost to execute implicitly, even in a debug build. So EASTL provides these explicit functions which can be called by the user at the appropriate time and in optimized builds as well as debug builds.
  • +
+

Allocator Design

+

The most significant difference between EASTL and standard C++ STL is that standard STL containers are templated on an + allocator class with the interface defined in std::allocator. std::allocator is defined in the C++ standard as + this:
+

+
// Standard C++ allocator
+
+ template <class T>
+class allocator

+{
+public:
+    typedef size_t    size_type;
+    typedef ptrdiff_t difference_type;
+    typedef T*        pointer;
+    typedef const T*  const_pointer;
+    typedef T&       + reference;
+    typedef const +T&  const_reference;
+    typedef T         value_type;
+
+    template <class U>
+    struct rebind { typedef allocator<U> other; };

+
+    allocator() throw();
+    allocator(const allocator&) throw();
+    template <class U>
+    allocator(const allocator<U>&) throw();
+
+
   ~allocator() +throw();
+
+
    pointer   +    address(reference x) const;
+    const_pointer address(const_reference x) +const;
+    pointer       allocate(size_type, typename +allocator<void>::const_pointer hint = 0);
+    void          deallocate(pointer p, +size_type n);
+    size_type     max_size() const +throw();
+    void          construct(pointer p, +const T& val);
+    void          destroy(pointer +p);
+};
+

Each STL container needs to have an allocator templated on container type T associated with it. The problem with this +is that allocators for containers are defined at the class level and not the instance level. This makes it painful to +define custom allocators for containers and adds to code bloat. Also, it turns out that the containers don't actually +use allocator<T> but instead use allocator<T>::rebind<U>::other. Lastly, you cannot access this +allocator after the container is constructed. There are some good academic reasons why the C++ standard works this way, +but it results in a lot of unnecessary pain and makes concepts like memory tracking much harder to implement.

+

What EASTL does is use a more familiar memory allocation pattern whereby there is only one allocator class interface + and it is used by all containers. Additionally EASTL containers let you access their allocators and query them, name +them, change them, etc.

+

EASTL has chosen to make allocators not be copied between containers during container swap and assign operations. This + means that if container A swaps its contents with container B, both containers retain their original allocators. + Similarly, assigning container A to container B causes container B to retain its original allocator. Containers that + are equivalent should report so via operator==; EASTL will do a smart swap if allocators are equal, and a brute-force + swap otherwise.
+

+
// EASTL allocator
+
+class allocator
+{
+public:
+    allocator(const char* pName = NULL);
+
+    void* allocate(size_t n, int flags = 0);
+    void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+    void  deallocate(void* p, size_t n);
+
+    const char* get_name() const;
+    void        set_name(const char* pName);
+};
+
+allocator* GetDefaultAllocator();
+

Fixed Size Container Design

+

EASTL supplies a set of fixed-size containers that the user can use, though the user can also implement their own + versions. So in addition to class list there is class fixed_list. The fixed_list class implements a linked list via a + fixed-size pool of contiguous memory which has no space overhead (unlike with a regular heap), doesn't cause +fragmentation, and allocates very quickly.

+

EASTL implements fixed containers via subclasses of regular containers which set the regular container's allocator to + point to themselves. Thus the implementation for fixed_list is very tiny and consists of little more + than constructor and allocator functions. This design has some advantages but has one small disadvantage. The + primary advantages are primarily that code bloat is reduced and that the implementation is simple and the user can + easily extend it. The primary disadvantage is that the parent list class ends up with a pointer to itself and thus has + 4 bytes that could arguably be saved if system was designed differently. That different design would be to make the + list class have a policy template parameter which specifies that it is a fixed pool container. EASTL chose not to + follow the policy design because it would complicate the implementation, make it harder for the user to extend the + container, and would potentially waste more memory due to code bloat than it would save due to the 4 byte savings it +achieves in container instances.

+

Algorithm Design

+

EASTL algorithms very much follow the philosophy of standard C++ algorithms, as this philosophy is sound and efficient. + One of the primary aspects of algorithms is that they work on iterators and not containers. You will note for example + that the find algorithm takes a first and last iterator as arguments and not a container. This has two primary + benefits: it allows the user to specify a subrange of the container to search within and it allows the user to apply +the find algorithm to sequences that aren't containers (e.g. a C array).

+

EASTL algorithms are optimized at least as well as the best STL algorithms found in commercial libraries and are + significantly optimized over the algorithms that come with the first-party STLs that come with compilers. Most significantly, EASTL algorithms take advantage of type traits of contained classes and + take advantage of iterator types to optimize code generation. For example, if you resize an array of integers (or other "pod" type), EASTL will detect that this can be done with a memcpy instead of a slow object-by-object move as would +Micrsoft STL.

+

The optimizations found in EASTL algorithms and the supporting code in EASTL type traits consistts of some fairly + tricky advanced C++ and while it is fairly easy to read, it requires a C++ expert (language lawyer, really) to + implement confidently. The result of this is that it takes more effort to develop and maintain EASTL than it would to +maintain a simpler library. However, the performance advantages have been deemed worth the tradeoff.

+

Smart Pointer Design

+

EASTL implements the following smart pointer types:

+
    +
  • shared_ptr
  • +
  • shared_array
  • +
  • weak_ptr
  • +
  • instrusive_ptr
  • +
  • scoped_ptr
  • +
  • scoped_array
  • +
  • linked_ptr
  • +
  • linked_array
  • +
+All but linked_ptr/linked_array are well-known smart pointers from the Boost library. The behaviour of these smart +pointers is very similar to those from Boost with two exceptions: +
    +
  • EASTL smart pointers allow you to assign an allocator to them.
  • +
  • EASTL shared_ptr implements deletion via a templated parameter instead of a dynamically allocated virtual +member object interface.
  • +
+

With respect to assigning an allocator, this gives EASTL more control over memory allocation and tracking, as Boost +smart pointers unilaterally use global operator new to allocate memory from the global heap.

+

With respect to shared_ptr deletion, EASTL's current design of using a templated parameter is questionable, but does + have some reason. The advantage is that EASTL avoids a heap allocation, avoids virtual function calls, and avoids + templated class proliferation. The disadvantage is that EASTL shared_ptr containers which hold void pointers can't call + the destructors of their contained objects unless the user manually specifies a custom deleter template parameter. This + is case whereby EASTL is more efficient but less safe. We can revisit this topic in the future if it becomes an + issue.

+

list::size is O(n)

+

As of this writing, EASTL has three linked list classes: list, slist, and intrusive_list. In each of these classes, the + size of the list is not cached in a member size variable. The result of this is that getting the size of a list is not + a fast operation, as it requires traversing the list and counting the nodes. We could make the list::size function be + fast by having a member mSize variable which tracks the size as we insert and delete items. There are reasons for + having such functionality and reasons for not having such functionality. We currently choose to not have a member mSize + variable as it would add four bytes to the class, add a tiny amount of processing to functions such as insert and + erase, and would only serve to improve the size function, but no others. In the case of intrusive_list, it would do + additional harm. The alternative argument is that the C++ standard states that std::list should be an O(1) + operation (i.e. have a member size variable), that many C++ standard library list implementations do so, that the + size is but an integer which is quick to update, and that many users expect to have a fast size function. In the final + analysis, we are developing a library for game development and performance is paramount, so we choose to not cache the +list size. The user can always implement a size cache himself.

+

basic_string doesn't use copy-on-write

+

The primary benefit of CoW is that it allows for the sharing of string data between two string objects. Thus if you say + this:

+

string a("hello");
+ string b(a);

+

the "hello" will be shared between a and b. If you then say this:

+

a = "world";

+

then a will release its reference to "hello" and leave b with the only + reference to it. Normally this functionality is accomplished via reference counting and with atomic operations or +mutexes.

+

The C++ standard does not say anything about basic_string and CoW. However, for a basic_string implementation to be + standards-conforming, a number of issues arise which dictate some things about how one would have to implement a CoW + string. The discussion of these issues will not be rehashed here, as you can read the references below for better + detail than can be provided in the space we have here. However, we can say that the C++ standard is sensible + and that anything we try to do here to allow for an efficient CoW implementation would result in a generally +unacceptable string interface.

+

The disadvantages of CoW strings are:

+
    +
  • A reference count needs to exist with the string, which increases string memory usage.
  • +
  • With thread safety, atomic operations and mutex locks are expensive, especially on weaker memory systems such +as console gaming platforms.
  • +
  • All non-const string accessor functions need to do a sharing check the the first such check needs to detach the +string. Similarly, all string assignments need to do a sharing check as well. If you access the string before doing an +assignment, the assignment doesn't result in a shared string, because the string has already been detached.
  • +
  • String sharing doesn't happen the large majority of the time. In some cases, the total sum of the reference +count memory can exceed any memory savings gained by the strings that share representations. 
  • +
+

The addition of a cow_string class is under consideration for EASTL. There are conceivably some systems which have + string usage patterns which would benefit from CoW sharing. Such functionality is best saved for a separate +string implementation so that the other string uses aren't penalized.

+

This is a good starting HTML reference on the topic:

+
+

+ http://www.gotw.ca/publications/optimizations.htm

+
+

Here is a well-known Usenet discussion on the topic:

+
+

http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d

+
+
+End of document
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ + diff --git a/doc/html/EASTL FAQ.html b/doc/html/EASTL FAQ.html new file mode 100644 index 0000000..04b1578 --- /dev/null +++ b/doc/html/EASTL FAQ.html @@ -0,0 +1,2385 @@ + + + + EASTL FAQ + + + + + + + +

EASTL FAQ

+

We provide a FAQ (frequently asked questions) list here for a number of commonly asked questions about EASTL and STL in +general. Feel free to suggest new FAQ additions based on your own experience.

+

Information

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1What is EASTL?
2What uses are EASTL suitable for?
3
How does EASTL differ from standard C++ +STL?
4Is EASTL thread-safe?
5What platforms/compilers does EASTL support?
6Why is there EASTL when there is the STL?
7Can I mix EASTL with standard C++ STL?
8Where can I learn more about STL and EASTL?
9What is the legal status of EASTL?
10Does EASTL deal with compiler exception handling settings?
11What C++ language features does EASTL use (e.g. virtual functions)?
12What compiler warning levels does EASTL support?
13Is EASTL compatible with Lint?
14What compiler settings do I need to compile EASTL?
15How hard is it to incorporate EASTL into my project?
16Should I use EASTL instead of std STL or instead of my custom library?
17I think I've found a bug. What do I do?
18Can EASTL be used by third party EA developers?
+

Performance +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1How efficient is EASTL compared to standard C++ STL implementations?
2How efficient is EASTL in general?
3Strings don't appear to use the "copy-on-write" optimization. Why not?
4Does EASTL cause code bloat, given that it uses templates?
5Don't STL and EASTL containers fragment memory?
6I don't see container optimizations for equivalent scalar types such as pointer types. +Why?
7I've seen some STL's provide a default quick "node allocator" as the default allocator. Why +doesn't EASTL do this?
8Templates sometimes seem to take a long time to compile. Why do I do about that?
9How do I assign a custom allocator to an EASTL container?
10How well does EASTL inline?
11How do I control function inlining?
12C++ / EASTL seems to bloat my .obj files much more than C does.
13What are the best compiler settings for EASTL?
+

Problems

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?
2I am getting compiler warnings (e.g. C4244, C4242 or C4267) that make no sense. Why?
3I am getting compiler warning C4530, which complains about exception handling and "unwind +semantics." What gives?
4Why are tree-based containers hard to read with a debugger?
5The EASTL source code is sometimes rather complicated looking. Why is that?
6When I get compilation errors, they are very long and complicated looking. What do I do?
7Templates sometimes seem to take a long time to compile. Why do I do about that?
8I get the compiler error: "template instantiation depth exceeds maximum of 17. use +-ftemplate-depth-NN to increase the maximum"
9I'm getting errors about min and max while compiling.
10C++ / EASTL seems to bloat my .obj files much more than C does.
11I'm getting compiler errors regarding operator new being previously defined.
12I'm getting errors related to wchar_t string  functions such as wcslen.
13I'm getting compiler warning C4619: there is no warning number Cxxxx (e.g. C4217).
14My stack-based fixed_vector is not respecting the object alignment requirements.
15I am getting compiler errors when using GCC under XCode (Macintosh/iphone).
16I am getting linker errors about Vsnprintf8 or Vsnprintf16.
17I am getting compiler errors about UINT64_C or UINT32_C.
18I am getting a crash with a global EASTL container.
19Why doesn't EASTL support passing NULL to functions with pointer arguments?
+

Debug

+ + + + + + + + + + + + + + + + + + + + + + + +
1How do I get VC++ mouse-overs to view templated data?
2How do I view containers if the visualizer/tooltip support is not present?
3The EASTL source code is sometimes rather complicated looking. Why is that?
4When I get compilation errors, they are very long and complicated looking. What do I +do?
5How do I measure hash table balancing?
+

Containers

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1Why do some containers have "fixed" versions (e.g. fixed_list) but others(e.g. deque) don't have +fixed versions?
2Can I mix EASTL with standard C++ STL?
3Why are there so many containers?
4Don't STL and EASTL containers fragment memory?
5I don't see container optimizations for equivalent scalar types such as pointer types. +Why?
6What about alternative container and algorithm implementations (e.g. treaps, skip lists, avl +trees)?
7Why are containers hard to read with a debugger?
8How do I assign a custom allocator to an EASTL container?
9How do I set the VC++ debugger to display EASTL container data with tooltips?
10How do I use a memory pool with a container?
11How do I write a comparison (operator<()) for a struct that contains two or more +members?
12Why doesn't container X have member function Y?
13How do I search a hash_map of strings via a char pointer efficiently? If I use map.find("hello") +it creates a temporary string, which is inefficient.
14Why are set and hash_set iterators const (i.e. const_iterator)?
15How do I prevent my hash container from re-hashing?
16Which uses less memory, a map or a hash_map?
17How do I write a custom hash function?
18How do I write a custom compare function for a map or set?
19How do I force my vector or string capacity down to the size of the container?
20How do I iterate a container while (selectively) removing items from it?
21How do I store a pointer in a container?
22How do I make a union of two containers? difference? intersection?
23How do I override the default global allocator?
24How do I do trick X with the string class?
25How do EASTL smart pointers compare to Boost smart pointers?
26How do your forward-declare an EASTL container?
27How do I make two containers share a memory pool?
28Can I use a std (STL) allocator with EASTL?
29 What are the requirements of classes stored in containers?
+

Algorithms

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?
2How do I write a comparison (operator<()) for a struct that contains two or more +members?
3How do I sort something in reverse order?
4I'm getting errors about min and max while compiling.
5Why don't algorithms take a container as an argument instead of iterators? A container would be +more convenient.
6Given a container of pointers, how do I find an element by value (instead of by +pointer)?
7When do stored objects need to support opertor +< vs. when do they need to support operator +==?
8How do I sort via pointers or array indexes instead of objects directly?
+

Iterators

+ + + + + + + + + + + + + + + + + + + +
1What's the difference between iterator, const iterator, and const_iterator?
2How do I tell from an iterator what type of thing it is iterating?
3How do I iterate a container while (selectively) removing items from it?
4What is an insert_iterator?
+


+ Information +

+

Info.1 +What is EASTL?

+

EASTL refers to "EA Standard Template Library." It is a C++ template library that is analogous to the template facilities of the C++ standard library, which are often referred to as the STL. EASTL consists of the following systems:

+
    +
  • Containers
  • +
  • Iterators
  • +
  • Algorithms
  • +
  • Utilities
  • +
  • Smart pointers
  • +
  • Type traits
  • +
+

EASTL provides extensions and optimizations over the equivalents in standard C++ STL.

+

EASTL is a professional-level implementation which outperforms commercial implementations (where functionality overlaps) and is significantly easier to read and debug.

+

Info.2 +What uses are EASTL suitable for?

+

EASTL is suitable for any place where templated containers and algorithms would be appropriate. Thus any C++ tools could use it and many C++ game runtimes could use it, especially 2005+ generation game platforms. EASTL has optimizations that make it more suited to the CPUs and memory systems found on console platforms. Additionally, EASTL has some type-traits and iterator-traits-derived template optimizations that make it generally more efficient than home-brew templated containers.

+

Info.3 +How does EASTL differ from standard C++ STL?

+

There are three kinds of ways that EASTL differs from standard STL:

+
    +
  1. EASTL equivalents to STL sometimes differ.
  2. +
  3. EASTL implementations sometimes differ from STL implementations of the same thing.
  4. +
  5. EASTL has functionality that doesn't exist in STL.
  6. +
+

With respect to item #1, the changes are such that they benefit game development and not the type that could silently hurt you if you were more familiar with STL interfaces.

+

With respect to item #2, where EASTL implementations differ from STL implementations it is almost always due to improvements being made in the EASTL versions or tradeoffs being made which are considered better for game development.

+

With respect to item #3, there are a number of facilities that EASTL has that STL doesn't have, such as intrusive_list and slist containers, smart pointers, and type traits. All of these are facilities that assist in making more efficient game code and data.

+

Ways in which EASTL is better than standard STL:

+
    +
  • Has higher performance in release builds, sometimes dramatically so.
  • +
  • Has significantly higher performance in debug builds, due to less call overhead.
  • +
  • Has extended per-container functionality, particularly for game development.
  • +
  • Has additional containers that are useful for high performance game development.
  • +
  • Is easier to read, trace, and debug.
  • +
  • Memory allocation is much simpler and more controllable.
  • +
  • Has higher portability, as there is a single implementation for all platforms.
  • +
  • Has support of object alignment, whereas such functionality is not natively supported by STL.
  • +
  • We have control over it, so we can modify it as we like.
  • +
  • Has stricter standards for container design and behavior, particularly as this benefits game development.
  • +
+

Ways in which EASTL is worse than standard STL:

+
    +
  • Standard STL implementations are currently very reliable and weather-worn, whereas EASTL is less tested.
  • +
  • Standard STL is automatically available with just about every C++ compiler vendor's library.
  • +
  • Standard STL is supported by the compiler vendor and somewhat by the Internet community.
  • +
+

EASTL coverage of std STL

+
    +
  • list
  • +
  • vector
  • +
  • deque
  • +
  • string
  • +
  • set
  • +
  • multiset
  • +
  • map
  • +
  • multimap
  • +
  • bitset
  • +
  • queue
  • +
  • stack
  • +
  • priority_queue
  • +
  • memory
  • +
  • numeric
  • +
  • algorithm (all but inplace_merge, prev_permutation, next_permutation, nth_element, includes, unique_copy)
  • +
  • utility
  • +
  • functional
  • +
  • iterator
  • +
  • string_view
  • +
  • variant
  • +
  • any
  • +
  • optional
  • +
+

EASTL additions/amendments to std STL

+
    +
  • allocators work in a simpler way.
  • +
  • exception handling can be disabled.
  • +
  • all containers expose/declare their node size, so you can make a node allocator for them.
  • +
  • all containers have reset_lose_memory(), which unilaterally forgets their contents.
  • +
  • all containers have validate() and validate_iterator() functions.
  • +
  • all containers understand and respect object alignment requirements.
  • +
  • all containers guarantee no memory allocation upon being newly created as empty.
  • +
  • all containers and their iterators can be viewed in a debugger (no other STL does this, believe it or not).
  • +
  • linear containers guarantee linear memory.
  • +
  • vector has push_back(void).
  • +
  • vector has a data() function.
  • +
  • vector<bool> is actually a vector of type bool.
  • +
  • vector and string have set_capacity().
  • +
  • string has sprintf(), append_sprintf(), trim(), compare_i(), make_lower(), make_upper().
  • +
  • deque allows you to specify the subarray size.
  • +
  • list has a push_back(void) and push_back(void) function.
  • +
  • hash_map, hash_set, etc. have find_as().
  • +
+

EASTL coverage of TR1 (tr1 refers to proposed additions for the next C++ standard library, ~2008)

+
    +
  • array
  • +
  • type_traits (there are about 30 of these)
  • +
  • unordered_set (EASTL calls it hash_set)
  • +
  • unordered_multiset
  • +
  • unordered_map
  • +
  • unordered_multimap
  • +
  • shared_ptr, shared_array, weak_ptr, scoped_ptr, scoped_array, intrusive_ptr
  • +
+

EASTL additional functionality (not found elsewhere)

+
    +
  • fixed_list
  • +
  • fixed_slist
  • +
  • fixed_vector
  • +
  • fixed_string
  • +
  • fixed_substring
  • +
  • fixed_set
  • +
  • fixed_multiset
  • +
  • fixed_map
  • +
  • fixed_multimap
  • +
  • fixed_hash_set
  • +
  • fixed_hash_multiset
  • +
  • fixed_hash_map
  • +
  • fixed_hash_multimap
  • +
  • fixed_function
  • +
  • vector_set
  • +
  • vector_multiset
  • +
  • vector_map
  • +
  • vector_multimap
  • +
  • intrusive_list
  • +
  • intrusive_slist
  • +
  • intrusive_sdlist
  • +
  • intrusive_hash_set
  • +
  • intrusive_hash_multiset
  • +
  • intrusive_hash_map
  • +
  • intrusive_hash_multimap
  • +
  • slist (STLPort's STL has this)
  • +
  • heap
  • +
  • linked_ptr, linked_array
  • +
  • sparse_matrix (this is not complete as of this writing)
  • +
  • ring_buffer
  • +
  • compressed_pair
  • +
  • call_traits
  • +
  • binary_search_i, change_heap, find_first_not_of, find_last_of, find_last_not_of, identical
  • +
  • comb_sort, bubble_sort, selection_sort, shaker_sort, bucket_sort
  • +
  • equal_to_2, not_equal_to_2, str_equal_to, str_equal_to_i
    +
  • +
+

Info.4 +Is EASTL thread-safe? +

+

It's not simple enough to simply say that EASTL is thread-safe or thread-unsafe. However, we can say that with respect to thread safety that EASTL does the right thing.

+

Individual EASTL containers are not thread-safe. That is, access to an instance of a container from multiple threads at the same time is unsafe if any of those accesses are modifying operations. A given container can be read from multiple threads simultaneously as well as any other standalone data structure. If a user wants to be able to have modifying access an instance of a container from multiple threads, it is up to the user to ensure that proper thread synchronization occurs. This usually means using a mutex.

+

EASTL classes other than containers are the same as containers with respect to thread safety. EASTL functions (e.g. algorithms) are inherently thread-safe as they have no instance data and operate entirely on the stack. As of this writing, no EASTL function allocates memory and thus doesn't bring thread safety issues via that means.

+

The user may well need to be concerned about thread safety with respect to memory allocation. If the user modifies containers from multiple threads, then allocators are going to be accessed from multiple threads. If an allocator is shared across multiple container instances (of the same type of container or not), then mutexes (as discussed above) the user uses to protect access to individual instances will not suffice to provide thread safety for allocators used across multiple instances. The conventional solution here is to use a mutex within the allocator if it is expected to be used by multiple threads.

+

EASTL uses neither static nor global variables and thus there are no inter-instance dependencies that would make thread safety difficult for the user to implement.

+

Info.5 +What platforms/compilers does EASTL support?

+

EASTL's support depends entirely on the compiler and not on the platform. EASTL works on any C++ compiler that completely conforms the C++ language standard. Additionally, EASTL is 32 bit and 64 bit compatible. Since EASTL does not use the C or C++ standard library (with a couple small exceptions), it doesn't matter what kind of libraries are provided (or not provided) by the compiler vendor. However, given that we need to work with some compilers that aren't 100% conforming to the language standard, it will be useful to make a list here of these that are supported and those that are not:

+
+ + + + + + + + + + + + + + + + + + + + + +
CompilerStatusNotes
GCC 3.x+Not SupportedNot officially supported due to migration to Clang.
MSVC 12.0+SupportedThis compiler is used by the Windows based platforms
Clang 4.0+SupportedThis compiler is used by the Linux based platforms
+
+

Info.6 +Why is there EASTL when there is the STL?

+

The STL is largely a fine library for general purpose C++. However, we can improve upon it for our uses and gain other advantages as well. The primary motivations for the existence of EASTL are the following:

+
    +
  • Some STL implementations (especially Microsoft STL) have inferior performance characteristics that make them unsuitable for game development. EASTL is faster than all existing STL implementations.
  • +
  • The STL is sometimes hard to debug, as most STL implementations use cryptic variable names and unusual data structures.
  • +
  • STL allocators are sometimes painful to work with, as they have many requirements and cannot be modified once bound to a container.
  • +
  • The STL includes excess functionality that can lead to larger code than desirable. It's not very easy to tell programmers they shouldn't use that functionality.
  • +
  • The STL is implemented with very deep function calls. This results is unacceptable performance in non-optimized builds and sometimes in optimized builds as well.
  • +
  • The STL doesn't support alignment of contained objects.
  • +
  • STL containers won't let you insert an entry into a container without supplying an entry to copy from. This can be inefficient.
  • +
  • Useful STL extensions (e.g. slist, hash_map, shared_ptr) found in existing STL implementations such as STLPort are not portable because they don't exist in other versions of STL or aren't consistent between STL versions.
    +
  • +
  • The STL lacks useful extensions that game programmers find useful (e.g. intrusive_list) but which could be best optimized in a portable STL environment.
  • +
  • The STL puts an emphasis on correctness before performance, whereas sometimes you can get significant performance gains by making things less academically pure.
  • +
  • STL containers have private implementations that don't allow you to work with their data in a portable way, yet sometimes this is an important thing to be able to do (e.g. node pools).
  • +
  • All existing versions of STL allocate memory in empty versions of at least some of their containers. This is not ideal and prevents optimizations such as container memory resets that can greatly increase performance in some situations.
  • +
  • The STL is slow to compile, as most modern STL implementations are very large.
    +
  • +
  • There are legal issues that make it hard for us to freely use portable STL implementations such as STLPort.
  • +
  • We have no say in the design and implementation of the STL and so are unable to change it to work for our needs.
  • +
+

Note that there isn't actually anything in the C++ standard called "STL." STL is a term that merely refers to the templated portion of the C++ standard library.

+

Info.7 +Can I mix EASTL with standard C++ STL?

+

This is possible to some degree, though the extent depends on the implementation of C++ STL. One of things that makes interoperability is something called iterator categories. Containers and algorithms recognize iterator types via their category and STL iterator categories are not recognized by EASTL and vice versa.
+
+Things that you definitely can do:

+
    +
  • #include both EASTL and standard STL headers from the same .cpp file.
  • +
  • Use EASTL containers to hold STL containers.
  • +
  • Construct an STL reverse_iterator from an EASTL iterator.
  • +
  • Construct an EASTL reverse_iterator from an STL iterator.
  • +
+

Things that you probably will be able to do, though a given std STL implementation may prevent it: +

+
    +
  • Use STL containers in EASTL algorithms.
  • +
  • Use EASTL containers in STL algorithms.
  • +
  • Construct or assign to an STL container via iterators into an EASTL container.
  • +
  • Construct or assign to an EASTL container via iterators into an STL container.
  • +
+

Things that you would be able to do if the given std STL implementation is bug-free: +

+
    +
  • Use STL containers to hold EASTL containers. Unfortunately, VC7.x STL has a confirmed bug that prevents this. Similarly, STLPort versions prior to v5 have a similar but.
  • +
+

Things that you definitely can't do:

+
    +
  • Use an STL allocator directly with an EASTL container (though you can use one indirectly).
  • +
  • Use an EASTL allocator directly with an STL container (though you can use one indirectly).
  • +
+

Info.8 +Where can I learn more about STL and EASTL? +

+

EASTL is close enough in philosophy and functionality to standard C++ STL that most of what you read about STL applies to EASTL. This is particularly useful with respect to container specifications. It would take a lot of work to document EASTL containers and algorithms in fine detail, whereas most standard STL documentation applies as-is to EASTL. We won't cover the differences here, as that's found in another FAQ entry.

+

That being said, we provide a list of sources for STL documentation that may be useful to you, especially if you are less familiar with the concepts of STL and template programming in general.

+
    +
  • The SGI STL web site. Includes a good STL reference.
  • +
  • CodeProject STL introduction.
  • +
  • Scott Meyers Effective STL book.
  • +
  • The Microsoft online STL documentation. Microsoft links go bad every couple months, so try searching for STL at the Microsoft MSDN site.
  • +
  • The Dinkumware online STL documentation. 
  • +
  • The C++ standard, which is fairly readable. You can buy an electronic version for about $18 and in the meantime you can make do with draft revisions of it off the Internet by searching for "c++ draft standard".
  • +
  • STL performance tips, by Pete Isensee
  • +
  • STL algorithms vs. hand-written loops, by Scott Meyers.
  • +
  • cppreference.com
  • +
  • isocpp.org
  • +
+

Info.9 +What is the legal status of EASTL?

+

EASTL is usable for all uses within Electronic Arts, both for internal usage and for shipping products for all platforms. Any externally derived code would be explicitly stated as such and approved by the legal department if such code ever gets introduced. As of EASTL v1.0, the red_black_tree.cpp file contains two functions derived from the original HP STL and have received EA legal approval for usage in any product.

+

Info.10 +Does EASTL deal with compiler exception handling settings?

+

EASTL has automatic knowledge of the compiler's enabling/disabling of exceptions. If your compiler is set to disable exceptions, EASTL automatically detects so and executes without them. Also, you can force-enable or force-disable that setting to override the automatic behavior by #defining EASTL_EXCEPTIONS_ENABLED to 0 or 1. See EASTL's config.h for more information.

+

Info.11 + What C++ language features does EASTL use (e.g. virtual +functions)?

+

EASTL uses the following C++ language features:

+
    +
  • Template functions, classes, member functions.
  • +
  • Multiple inheritance.
  • +
  • Namespaces.
  • +
  • Operator overloading.
  • +
+

EASTL does not use the following C++ language features: +

+
    +
  • Virtual functions / interfaces.
  • +
  • RTTI (dynamic_cast).
  • +
  • Global and static variables. There are a couple class static const variables, but they act much like enums.
  • +
  • Volatile declarations
  • +
  • Template export.
  • +
  • Virtual inheritance.
  • +
+

EASTL may use the following C++ language features: +

+
    +
  • Try/catch. This is an option that the user can enable and it defaults to whatever the compiler is set to use.
  • +
  • Floating point math. Hash containers have one floating point calculation, but otherwise floating point is not used.
  • +
+

Notes: +

+
    +
  • EASTL uses rather little of the standard C or C++ library and uses none of the C++ template library (STL) and iostream library. The memcpy family of functions is one example EASTL C++ library usage.
  • +
  • EASTL never uses global new / delete / malloc / free. All allocations are done via user-specified allocators, though a default allocator definition is available.
  • +
+

Info.12 + What compiler warning levels does EASTL support? +

+

For VC++ EASTL should compile without warnings on level 4, and should compile without warnings for "warnings disabled by default" except C4242, C4514, C4710, C4786, and C4820. These latter warnings are somewhat draconian and most EA projects have little choice but to leave them disabled.

+

For GCC, EASTL should compile without warnings with -Wall. Extensive testing beyond that hasn't been done.

+

However, due to the nature of templated code generation and due to the way compilers compile templates, unforeseen warnings may occur in user code that may or may not be addressable by modifying EASTL.

+

Info.13 + Is EASTL compatible with Lint? +

+

As of EASTL 1.0, minimal lint testing has occurred. Testing with the November 2005 release of Lint (8.00t) demonstrated bugs in Lint that made its analysis not very useful. For example, Lint seems to get confused about the C++ typename keyword and spews many errors with code that uses it. We will work with the makers of Lint to get this resolved so that Lint can provide useful information about EASTL.

+

Info.14 + What compiler settings do I need to compile EASTL? +

+

EASTL consists mostly of header files with templated C++ code, but there are also a few .cpp files that need to be compiled and linked in order to use some of the modules. EASTL will compile in just about any environment. As mentioned elsewhere in this FAQ, EASTL can be compiled at the highest warning level of most compilers, transparently deals with compiler exception handling settings, is savvy to most or all compilation language options (e.g. wchar_t is built-in or not, for loop variables are local or not), and has almost no platform-specific or compiler-specific code. For the most part, you can just drop it in and it will work. The primary thing that needs to be in place is that EASTL .cpp files need to be compiled with the same struct padding/alignment settings as other code in the project. This of course is the same for just about any C++ source code library.

+

See the Performance section of this FAQ for a discussion of the optimal compiler settings for EASTL performance.

+

Info.15 +How hard is it to incorporate EASTL into my project?

+

It's probably trivial.
+
+EASTL has only one dependency: EABase. And EASTL auto-configures itself for most compiler environments and for the most typical configuration choices. Since it is fairly highly warning-free, you won't likely need to modify your compiler warning settings, even if they're pretty strict. EASTL has a few .cpp files which need to be compiled if you want to use the modules associated with those files. You can just compile those files with your regular compiler settings. Alternatively, you can use one of the EASTL project files.
+
+In its default configuration, the only thing you need to provide to make EASTL work is to define implementations of the following operator new functions:

+
#include <new>
+void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line); +void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+The flags and debugFlags arguments correspond to PPMalloc/RenderWare GeneralAllocator/GeneralAllocatorDebug Malloc equivalents.
+

Info.16 +Should I use EASTL instead of std STL or instead of my custom library?

+

There are reasons you may want to use EASTL; there are reasons you may not want to use it. Ditto for std STL or any other library. Here we present a list of reasons (+ and -) for why you might want to use one or another. However, it should be noted that while EASTL contains functionality found in std STL, it has another ~40% of functionality not found in std STL, so EASTL and std STL (and whatever other template library you may have) are not mutually exclusive.
+
+EASTL
+

+
+ Has higher performance than any commercial STL, especially on console platforms.
+ + Has extended functionality tailored for game development.
+ + Is highly configurable, and we own it so it can be amended at will. Std STL is owned by a third party committee.
+ + Is much easier to read and debug than other similar libraries, especially std STL.
+
+ - Is highly unit tested, but does not have the same level as std STL.
+ - Is more complicated than many users' lite template libraries, and may put off some beginners.
+- EASTL  
+

Std STL +

+
+ Is highly portable; your STL code will likely compile and run anywhere.
+ + Works without the need to install or download any package to use it. It just works.
+ + Is highly reliable and supported by the compiler vendor. You can have confidence in it.
+ + Some std STL versions (e.g. STLPort, VC8 STL) have better runtime debug checking than EASTL.
+
+ - Has (sometimes greatly) variable implementations, behavior, and performance between implementations.
+ - Is usually hard to read and debug.
+ - Doesn't support some of the needs of game development, such as aligned allocations, named allocations, intrusive containers, etc.
+- Is not as efficient as EASTL, especially on console platforms.
+

Your own library +

+
(please forgive us for implying there may be weaknesses in your libraries)
+ +

+ + You have control over it and can make it work however you want.
+ + You can fix bugs in it on the spot and have the fix in your codebase immediately.
+ + Your own library can be highly integrated into your application code or development environment.
+
+ - Many custom libraries don't have the same level of testing as libraries such as std STL or EASTL.
+ - Many custom libraries don't have the same breadth or depth as std STL or especially EASTL.
+- Many custom libraries don't have the level of performance tuning that std STL or especially EASTL has.
+

Info.17 +I think I've found a bug. What do I do?

+

Verify that you indeed have a bug
+There are various levels of bugs that can occur, which include the following:

+
    +
  1. Compiler warnings generated by EASTL.
  2. +
  3. Compiler errors generated by EASTL (failure to compile well-formed code).
  4. +
  5. Runtime misbehavior by EASTL (function does the wrong thing).
  6. +
  7. Runtime crash or data corruption by EASTL.
  8. +
  9. Mismatch between EASTL documentation and behavior.
  10. +
  11. Mismatch between EASTL behavior and user's expectations (mis-design).
  12. +
+

Any of the above items can be the fault of EASTL. However, the first four can also be the fault of the user. Your primary goal in verifying a potential bug is to determine if it is an EASTL bug or a user bug. Template errors can sometimes be hard to diagnose. It's probably best if you first show the problem to somebody you know to make sure you are not missing something obvious. Creating a reproducible case may be useful in helping convince yourself, but as is mentioned below, this is not required in order to report the bug.
+
+ Report the bug
+The first place to try is the standard EA centralized tech support site. As of this writing (10/2005), that tech site is http://eatech/. Due to the frequent technology churn that seems to occur within Electronic Arts, the bug reporting system in place when you read this may not be the one that was in place when this FAQ entry was written. If the tech site route fails, consider directly contacting the maintainer of the EASTL package.
+
+In reporting a bug, it is nice if there is a simple reproducible case that can be presented. However, such a case requires time to create, and so you are welcome to initially simply state what you think the bug is without producing a simple reproducible case. It may be that this is a known bug or it may be possible to diagnose the bug without a reproducible case. If more information is needed then the step of trying to produce a reproducible case may be necessary.

+

Info.18 + Can EASTL be used by third party EA developers?

+

EASTL and other core technologies authored by EA (and not licensed from other companies) can be used in source and binary form by designated 3rd parties. The primary case where there is an issue is if the library contains platform specific code for a platform that the 3rd party is not licensed for. In that case the platform-specific code would need to be removed. This doesn’t apply to EASTL, nor many of the other core tech packages.

+

Performance +

+ +

Perf.1 How efficient is EASTL compared to standard C++ STL implementations?

+

With respect to the functionality that is equivalent between EASTL and standard STL, the short answer to this is that EASTL is as at least as efficient as other STL implementations and in a number of aspects is more so. EASTL has functionality such as intrusive_list and linked_ptr that don't exist in standard STL but are explicitly present to provide significant optimizations over standard STL.

+

The medium length answer is that EASTL is significantly more efficient than Dinkumware STL, and Microsoft Windows STL. EASTL is generally more efficient than Metrowerks STL, but Metrowerks has a few tricks up its sleeve which EASTL doesn't currently implement. EASTL is roughly equal in efficiency to STLPort and GCC 3.x+ STL, though EASTL has some optimizations that these do not.

+

The long answer requires a breakdown of the functionality between various versions of the STL.

+ +

Perf.2 How efficient is EASTL in general?

+

This question is related to the question, "How efficient are templates?" If you understand the effects of templates then you can more or less see the answer for EASTL. Templates are more efficient than the alternative when they are used appropriately, but can be less efficient than the alternative when used under circumstances that don't call for them. The strength of templates is that the compiler sees all the code and data types at compile time and can often reduce statements to smaller and faster code than with conventional non-templated code. The weakness of templates is that the sometimes produce more code and can result in what is often called "code bloat". However, it's important to note that unused template functions result in no generated nor linked code, so if you have a templated class with 100 functions but you only use one, only that one function will be compiled.

+

EASTL is a rather efficient implementation of a template library and pulls many tricks of the trade in terms of squeezing optimal performance out of the compiler. The only way to beat it is to write custom code for the data types you are working with, and even then people are sometimes surprised to find that their hand-implemented algorithm works no better or even worse than the EASTL equivalent. But certainly there are ways to beat templates, especially if you resort to assembly language programming and some kinds of other non-generic tricks.

+ +

Perf.3 Strings don't appear to use the "copy-on-write" (CoW) optimization. Why not?

+

+Short answer
+CoW provides a benefit for a small percentage of uses but provides a disadvantage for the large majority of uses.
+
+Long answer
+The primary benefit of CoW is that it allows for the sharing of string data between two string objects. Thus if you say this: +

string a("hello");
+string b(a);
+the "hello" will be shared between a and b. If you then say this: +
a = "world";
+then a will release its reference to "hello" and +leave b with the only reference to it. Normally this functionality is accomplished via reference +counting and with atomic operations or mutexes.

+ +

The C++ standard does not say anything about basic_string and CoW. +However, for a basic_string implementation to be standards-conforming, a number of issues arise +which dictate some things about how one would have to implement a CoW string. The discussion of +these issues will not be rehashed here, as you can read the references below for better detail +than can be provided in the space we have here. However, we can say that the C++ standard +is sensible and that anything we try to do here to allow for an efficient CoW implementation +would result in a generally unacceptable string interface.

+

The disadvantages of CoW strings are:

+
    +
  • A reference count needs to exist with the string, which increases string memory usage.
  • +
  • With thread safety, atomic operations and mutex locks are expensive, especially on weaker memory systems such as console gaming platforms.
  • +
  • All non-const string accessor functions need to do a sharing check and the first such check needs to detach the string. Similarly, all string assignments need to do a sharing check as well. If you access the string before doing an assignment, the assignment doesn't result in a shared string, because the string has already been detached.
  • +
  • String sharing doesn't happen the large majority of the time. In some cases, the total sum of the reference count memory can exceed any memory savings gained by the strings that share representations.
  • +
+

The addition of a cow_string class is under consideration for EASTL. There are conceivably some systems which have string usage patterns which would benefit from CoW sharing. Such functionality is best saved for a separate string implementation so that the other string uses aren't penalized.

+

References

+

This is a good starting HTML reference on the topic:
+    http://www.gotw.ca/publications/optimizations.htm

+

Here is a well-known Usenet discussion on the topic:
+    http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d

+ +

Perf.4 Does EASTL cause code bloat, given that it uses templates?

+

The reason that templated functions and classes might cause an increase in code size +because each template instantiation theoretically creates a unique piece of code. For example, when you compile this +code:

+
template <typename T>
+const T min(const T a, const T b)
+    { return b < a ? b : a; }
+
+int    i = min<int>(3, 4);
+double d = min<double>(3.0, 4.0);
+

the compiler treats it as if you wrote this:

+
int min(const int a, const int b)
+    { return b < a ? b : a; }
+double min(const double a, const double b) +    { return b < a ? b : a; }
+

Imagine this same effect happening with containers such as list and map and you can see how it is that templates can cause code proliferation.

+

A couple things offset the possibility of code proliferation: inlining and folding. In practice the above 'min' function would be converted to inlined functions by the compiler which occupy only a few CPU instructions. In many of the simplest cases the inlined version actually occupies less code than the code required to push parameters on the stack and execute a function call. And they will execute much faster as well.

+

Code folding (a.k.a. "COMDAT folding", "duplicate stripping", "ICF" / "identical code folding") is a compiler optimization whereby the compiler realizes that two independent functions have compiled to the same code and thus can be reduced to a single function. The Microsoft VC++ compiler (Since VS2005), and GCC (v 4.5+) can do these kinds of optimizations on all platforms. This can result, for example, in all templated containers of pointers (e.g. vector<char*>, vector<Widget*>, etc.) to be linked as a single implementation. This folding occurs at a function level and so individual member functions can be folded while other member functions are not. A side effect of this optimization is that you aren't likely to gain much much declaring containers of void* instead of the pointer type actually contained.

+

The above two features reduce the extent of code proliferation, but certainly don't eliminate it. What you need to think about is how much code might be generated vs. what your alternatives are. Containers like vector can often inline completely away, whereas more complicated containers such as map can only partially be inlined. In the case of map, if you need such a container for your Widgets, what alternatives do you have that would be more efficient than instantiating a map? This is up to you to answer.

+

It's important to note that C++ compilers will throw away any templated functions that aren't used, including unused member functions of templated classes. However, some argue that by having many functions available to the user that users will choose to use that larger function set rather than stick with a more restricted set.

+

Also, don't be confused by syntax bloat vs. code bloat. In looking at templated libraries such as EASTL you will notice that there is sometimes a lot of text in the definition of a template implementation. But the actual underlying code is what you need to be concerned about.

+

There is a good Usenet discussion on this topic at: http://groups.google.com/group/comp.lang.c++.moderated/browse_frm/thread/2b00649a935997f5

+

Perf.5 +Don't STL and EASTL containers fragment memory?

+

They only fragment memory if you use them in a way that does so. This is no different from any other type of container used in a dynamic way. There are various solutions to this problem, and EASTL provides additional help as well:

+
    +
  • For vectors, use the reserve function (or the equivalent constructor) to set aside a block of memory for the container. The container will not reallocate memory unless you try grow beyond the capacity you reserve.
  • +
  • EASTL has "fixed" variations of containers which allow you to specify a fixed block of memory which the container uses for its memory. The container will not allocate any memory with these types of containers and all memory will be cache-friendly due to its locality.
  • +
  • You can assign custom allocators to containers instead of using the default global allocator. You would typically use an allocator that has its own private pool of memory.
  • +
  • Where possible, add all a container's elements to it at once up front instead of adding them over time. This avoids memory fragmentation and increase cache coherency.
  • +
+ +

Perf.6 I don't see container optimizations for equivalent scalar types such as pointer types. Why?

+

Metrowerks (and no other, as of this writing) STL has some container specializations for type +T* which maps them to type void*. The idea is that a user who declares a list of Widget* and a list of Gadget* +will generate only one container: a list of void*. As a result, code generation will be smaller. Often this is +done only in optimized builds, as such containers are harder to view in debug builds due to type information being lost.
+
+The addition of this optimization is under consideration for EASTL, though it might be noted that optimizing +compilers such as VC++ are already capable of recognizing duplicate generated code and folding it automatically +as part of link-time code generation (LTCG) (a.k.a. "whole program optimization"). This has been verified +with VC++, as the following code and resulting disassembly demonstrate:

+
eastl::list<int*>        intPtrList;
+eastl::list<TestObject*> toPtrList;
+
+eastl_size_t n1 = intPtrList.size();
+eastl_size_t n2 = toPtrList.size();
+
+0042D288  lea         edx,[esp+14h]
+0042D28C  call        eastl::list<TestObject>::size (414180h)
+0042D291  push        eax 
+0042D292  lea         edx,[esp+24h]
+0042D296  call        eastl::list<TestObject>::size (414180h)
+

Note that in the above case the compiler folded the two implementations of size() into a single implementation.

+ +

Perf.7 +I've seen some STL's provide a default quick "node allocator" as the default allocator. Why doesn't EASTL do this?

+

Short answer
+
This is a bad, misguided idea.

+

Long answer
+These node allocators implement a heap for all of STL with buckets for various sizes of allocations and implemented fixed-size pools for each of these buckets. These pools are attractive at first because they do well in STL comparison benchmarks, especially when thread safety is disabled. Such benchmarks make it impossible to truly compare STL implementations because you have two different allocators in use and in some cases allocator performance can dominate the benchmark. However, the real problem with these node allocators is that they badly fragment and waste memory. The technical discussion of this topic is outside the scope of this FAQ, but you can learn more about it by researching memory management on the Internet. Unfortunately, the people who implement STL libraries are generally not experts on the topic of memory management. A better approach, especially for game development, is for the user to decide when fixed-size pools are appropriate and use them via custom allocator assignment to containers.

+

Perf.8 Templates sometimes seem to take a long time to compile. Why do I do about that? +

+

C++ compilers are generally slower than C compilers, and C++ templates are generally slower to compile than regular C++ code. EASTL has some extra functionality (such as type_traits and algorithm specializations) that is not found in most other template libraries and significantly improves performance and usefulness but adds to the amount of code that needs to be compiled. Ironically, we have a case where more source code generates faster and smaller object code.

+

The best solution to the problem is to use pre-compiled headers, which are available on all modern ~2002+) compilers, such as VC6.0+, GCC 3.2+, and Metrowerks 7.0+. In terms of platforms this means all 2002+ platforms.

+

Some users have been speeding up build times by creating project files that put all the source code in one large .cpp file. This has an effect similar to pre-compiled headers. It can go even faster than pre-compiled headers but has downsides in the way of convenience and portability.

+

Perf.10 +How well does EASTL inline?

+

EASTL is written in such as way as to be easier to inline than typical templated libraries such as STL. How is this so? It is so because EASTL reduces the inlining depth of many functions, particularly the simple ones. In doing so it makes the implementation less "academic" but entirely correct. An example of this is the vector operator[] function, which is implemented like so with Microsoft STL:

+
reference operator[](size_type n) {
+   return *(begin() + n);
+}
+EASTL implements the function directly, like so: +
reference operator[](size_type n) {
+    return *(mpBegin + n);
+}
+Both implementations are correct, but the EASTL implementation will run faster in debug builds, be easier to debug, and will be more likely to be inlined when the usage of this function is within a hierarchy of other functions being inlined. It is not so simple to say that the Microsoft version will always inline in an optimized build, as it could be part of a chain and cause the max depth to be exceeded.
+
+That being said, EASTL appears to inline fairly well under most circumstances, including with GCC, which is the poorest of the compilers in its ability to inline well.
+

Perf.11 +How do I control function inlining?

+

Inlining is an important topic for templated code, as such code often relies on the compiler being able to do good function inlining for maximum performance. GCC, VC++, and Metrowerks are discussed here. We discuss compilation-level inlining and function-level inlining here, though the latter is likely to be of more use to the user of EASTL, as it can externally control how EASTL is inlined. A related topic is GCC's template expansion depth, discussed elsewhere in this FAQ. We provide descriptions of inlining options here but don't currently have any advice on how to best use these with EASTL.

+

Compilation-Level Inlining -- VC++

+

VC++ has some basic functionality to control inlining, and the compiler is pretty good at doing aggressive inlining when optimizing on for all platforms.

+
+

#pragma inline_depth( [0... 255] )

+

Controls the number of times inline expansion can occur by controlling the number of times that a series of function calls can be expanded (from 0 to 255 times). This pragma controls the inlining of functions marked inline and or inlined automatically under the /Ob2 option. The inline_depth pragma controls the number of times a series of function calls can be expanded. For example, if the inline depth is 4, and if A calls B and B then calls C, all three calls will be expanded inline. However, if the closest inline expansion is 2, only A and B are expanded, and C remains as a function call.

+

#pragma inline_recursion( [{on | off}] )

+

Controls the inline expansion of direct or mutually recursive function calls. Use this pragma to control functions marked as inline and or functions that the compiler automatically expands under the /Ob2 option. Use of this pragma requires an /Ob compiler option setting of either 1 or 2. The default state for inline_recursion is off. The inline_recursion pragma controls how recursive functions are expanded. If inline_recursion is off, and if an inline function calls itself (either directly or indirectly), the function is expanded only once. If inline_recursion is on, the function is expanded multiple times until it reaches the value set by inline_depth, the default value of 8, or a capacity limit.

+
+

Compilation-Level Inlining -- GCC

+

GCC has a large set of options to control function inlining. Some options are available only  in GCC 3.0 and later and thus not present on older platforms.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-fno-default-inlineDo not make member functions inline by default merely because they are defined inside the class scope (C++ only). Otherwise, when you specify -O, member functions defined inside class scope are compiled inline by default; i.e., you don't need to add `inline' in front of the member function name.
-fno-inlineDon't pay attention to the inline keyword. Normally this option is used to keep the compiler from expanding any functions inline. Note that if you are not optimizing, no functions can be expanded inline.
-finline-functionsIntegrate all simple functions into their callers. The compiler heuristically decides which functions are simple enough to be worth integrating in this way. If all calls to a given function are integrated, and the function is declared static, then the function is normally not output as assembler code in its own right. Enabled at level -O3.
-finline-limit=nBy default, GCC limits the size of functions that can be inlined. This flag allows the control of this limit for functions that are explicitly marked as inline (i.e., marked with the inline keyword or defined within the class definition in c++). n is the size of functions that can be inlined in number of pseudo instructions (not counting parameter handling). pseudo-instructions are an internal representation of function size. The default value of n is 600. Increasing this value can result in more inlined code at the cost of compilation time and memory consumption. Decreasing usually makes the compilation faster and less code will be inlined (which presumably means slower programs). This option is particularly useful for programs that use inlining heavily such as those based on recursive templates with C++.
+
+ Inlining is actually controlled by a number of parameters, which may be specified individually by using --param name=value. The -finline-limit=n option sets some of these parameters as follows:
+
+ max-inline-insns-single
+    is set to n/2.
+ max-inline-insns-auto
+    is set to n/2.
+ min-inline-insns
+    is set to 130 or n/4, whichever is smaller.
+ max-inline-insns-rtl
+    is set to n.
+
+ See --param below for a documentation of the individual parameters controlling inlining.
-fkeep-inline-functionsEmit all inline functions into the object file, even if they are inlined where used.
--param name=valueIn some places, GCC uses various constants to control the amount of optimization that is done. For example, GCC will not inline functions that contain more that a certain number of instructions. You can control some of these constants on the command-line using the --param option. 
+
+ max-inline-insns-single
+ Several parameters control the tree inliner used in gcc. This number sets the maximum number of instructions (counted in GCC's internal representation) in a single function that the tree inliner will consider for inlining. This only affects functions declared inline and methods implemented in a class declaration (C++). The default value is 450.
+
+ max-inline-insns-auto
+ When you use -finline-functions (included in -O3), a lot of functions that would otherwise not be considered for inlining by the compiler will be investigated. To those functions, a different (more restrictive) limit compared to functions declared inline can be applied. The default value is 90.
+
+ large-function-insns
+ The limit specifying really large functions. For functions larger than this limit after inlining inlining is constrained by --param large-function-growth. This parameter is useful primarily to avoid extreme compilation time caused by non-linear algorithms used by the backend. This parameter is ignored when -funit-at-a-time is not used. The default value is 2700.
+
+ large-function-growth
+ Specifies maximal growth of large function caused by inlining in percents. This parameter is ignored when -funit-at-a-time is not used. The default value is 100 which limits large function growth to 2.0 times the original size.
+
+ inline-unit-growth
+ Specifies maximal overall growth of the compilation unit caused by inlining. This parameter is ignored when -funit-at-a-time is not used. The default value is 50 which limits unit growth to 1.5 times the original size.
+
+ max-inline-insns-recursive
+ max-inline-insns-recursive-auto
+ Specifies maximum number of instructions out-of-line copy of self recursive inline function can grow into by performing recursive inlining. For functions declared inline --param max-inline-insns-recursive is taken into acount. For function not declared inline, recursive inlining happens only when -finline-functions (included in -O3) is enabled and --param max-inline-insns-recursive-auto is used. The default value is 450.
+
+ max-inline-recursive-depth
+ max-inline-recursive-depth-auto
+ Specifies maximum recursion depth used by the recursive inlining. For functions declared inline --param max-inline-recursive-depth is taken into acount. For function not declared inline, recursive inlining happens only when -finline-functions (included in -O3) is enabled and --param max-inline-recursive-depth-auto is used. The default value is 450.
+
+ inline-call-cost
+ Specify cost of call instruction relative to simple arithmetics operations (having cost of 1). Increasing this cost disqualify inlining of non-leaf functions and at same time increase size of leaf function that is believed to reduce function size by being inlined. In effect it increase amount of inlining for code having large abstraction penalty (many functions that just pass the arguments to other functions) and decrease inlining for code with low abstraction penalty. Default value is 16.
-finline-limit=n By default, GCC limits the size of functions that can be inlined. This flag allows the control of this limit for functions that are explicitly marked as inline (i.e., marked with the inline keyword or defined within the class definition in c++). n is the size of functions that can be inlined in number of pseudo instructions (not counting parameter handling). The default value of n is 600. Increasing this value can result in more inlined code at the cost of compilation time and memory consumption. Decreasing usually makes the compilation faster and less code will be inlined (which presumably means slower programs). This option is particularly useful for programs that use inlining heavily such as those based on recursive templates with C++.
+
+

Inlining is actually controlled by a number of parameters, which may be specified individually by using --param name=value. The -finline-limit=n option sets some of these parameters as follows:

+
+
+
+
max-inline-insns-single
+
is set to n/2.
+
+
max-inline-insns-auto
+
is set to n/2.
+
+
min-inline-insns
+
is set to 130 or n/4, whichever is smaller.
+
+
max-inline-insns-rtl
+
is set to n.
+
+
+
+

See below for a documentation of the individual parameters controlling inlining.

+

Note: pseudo instruction represents, in this particular context, an abstract measurement of function's size. In no way, it represents a count of assembly instructions and as such its exact meaning might change from one release to an another.

+

GCC additionally has the -Winline compiler warning, which emits a warning whenever a function declared as inline was not inlined.

+

Compilation-Level Inlining -- Metrowerks

+

Metrowerks has a number of pragmas (and corresponding compiler settings) to control inlining. These include always_inline, inline_depth, inline_max_size, and inline max_total_size.

+
+

#pragma always_inline on | off | reset

+

Controls the use of inlined functions. If you enable this pragma, the compiler ignores all inlining limits and attempts to inline all functions where it is legal to do so. This pragma is deprecated. Use the inline_depth pragma instead.
+
+ #pragma inline_depth(n)
+ #pragma inline_depth(smart)

+

Controls how many passes are used to expand inline function. Sets the number of passes used to expand inline function calls. The number n is an integer from 0 to 1024 or the smart specifier. It also represents the distance allowed in the call chain from the last function up. For example, if d is the total depth of a call chain, then functions below (d-n) are inlined if they do not exceed the inline_max_size and inline_max_total_size settings which are discussed directly below.
+
+ #pragma inline_max_size(n);
+ #pragma inline_max_total_size(n);

+

The first pragma sets the maximum function size to be considered for inlining; the second sets the maximum size to which a function is allowed to grow after the functions it calls are inlined. Here, n is the number of statements, operands, and operators in the function, which
+ turns out to be roughly twice the number of instructions generated by the function. However, this number can vary from function to function. For the inline_max_size pragma, the default value of n is 256; for the inline_max_total_size pragma, the default value of n is 10000. The smart specifier is the default mode, with four passes where the passes 2-4 are limited to small inline functions. All inlineable functions are expanded if inline_depth is set to 1-1024.

+
+

Function-Level Inlining -- VC++

+
+

To force inline usage under VC++, you use this:

+

    __forceinline void foo(){ ... }

+

It should be noted that __forceinline has no effect if the compiler is set to disable inlining. It merely tells the compiler that when inlining is enabled that it shouldn't use its judgment to decide if the function should be inlined but instead to always inline it.
+
+ To disable inline usage under VC++, you need to use this:

+

    #pragma inline_depth(0) // Disable inlining.
+     void foo() { ... }
+     #pragma inline_depth()  // Restore default.

+

The above is essentially specifying compiler-level inlining control within the code for a specific function.

+
+

Function-Level Inlining -- GCC / Metrowerks

+
+

To force inline usage under GCC 3.1+, you use this:

+

    inline void foo() __attribute__((always_inline)) { ... }
+        
or
+     inline __attribute__((always_inline)) void foo() { ... }

+

To disable inline usage under GCC 3+, you use this:

+

    void foo() __attribute__((noinline)) { ... }
+
        or
+     inline __attribute__((noinline)) void foo() { ... }

+

EABase has some wrappers for this, such as EA_FORCE_INLINE.

+
+

Perf.12 + C++ / EASTL seems to bloat my .obj files much more than C does. +

+

There is no need to worry. The way most C++ compilers compile templates, they compile all seen template code into the current .obj module, which results in larger .obj files and duplicated template code in multiple .obj files. However, the linker will (and in fact must) select only a single version of any given function for the application, and these linked functions will usually be located contiguously.

+

Additionally, the debug information for template definitions is usually larger than that for non-templated C++ definitions, which itself is sometimes larger than C definitions due to name decoration.

+

Perf.13 +What are the best compiler settings for EASTL?

+

We will discuss various aspects of this topic here. As of this writing, more EASTL research on this topic has been done on Microsoft compiler platforms (e.g. Win32) than GCC platforms. Thus currently this discussion focuses on VC++ optimization. Some of the concepts are applicable to GCC, though. EASTL has been successfully compiled and tested (the EASTL unit test) on our major development platforms with the highest optimization settings enabled, including GCC's infamous -O3 level.
+
+Optimization Topics

+
    +
  • Function inlining.
  • +
  • Optimization for speed vs. optimization for size.
  • +
  • Link-time code generation (LTCG).
  • +
  • Profile-guided optimization (PGO).
  • +
+

Function inlining
+ EASTL is a template library and inlining is important for optimal speed. Compilers have various options for enabling inlining and those options are discussed in this FAQ in detail. Most users will want to enable some form of inlining when compiling EASTL and other templated libraries. For users that are most concerned about the compiler's inlining increasing code size may want to try the 'inline only functions marked as inline' compiler option. Here is a table of normalized results from the benchmark project (Win32 platform):
+

+ + + + + + + + + + + + + + + + + + + + + +
Inlining DisabledInline only 'inline'Inline any
Application size100K86K86K
Execution time1007575
+


+The above execution times are highly simplified versions of the actual benchmark data but convey a sense of the general average behaviour that can be expected. In practice, simple functions such as vector::operator[] will execute much faster with inlining enabled but complex functions such as map::insert may execute no faster within inlining enabled.

+

Optimization for Speed / Size
+ Optimization for speed results in the compiler inlining more code than it would otherwise. This results in the inlined code executing faster than if it was not inlined. As mentioned above, basic function inlining can result in smaller code as well as faster code, but after a certain point highly inlined code becomes greater in size than less inlined code and the performance advantages of inlining start to lessen. The EASTL Benchmark project is a medium sized application that is about 80% templated and thus acts as a decent measure of the practical tradeoff between speed and size. Here is a table of normalized results from the benchmark project (Windows platform):
+

+ + + + + + + + + + + + + + + + + + + + + + + + +
SizeSpeedSpeed + LTCGSpeed + LTCG + PGO
Application size80K100K98K98K
Execution time100908375
+


+What the above table is saying is that if you are willing to have your EASTL code be 20% larger, it will be 10% faster. Note that it doesn't mean that your app will be 20% larger, only the templated code in it like EASTL will be 20% larger.

+

Link-time code generation (LTCG)
+ LTCG is a mechanism whereby the compiler compiles the application as if it was all in one big .cpp file instead of separate .cpp files that don't see each other. Enabling LTCG optimizations is done by simply setting some compiler and linker settings and results in slower link times. The benchmark results are presented above and for the EASTL Benchmark project show some worthwhile improvement.

+

Profile-guided optimization (PGO)
+ PGO is a mechanism whereby the compiler uses profiling information from one or more runs to optimize the compilation and linking of an application. Enabling PGO optimizations is done by setting some linker settings and doing some test runs of the application, then linking the app with the test run results. Doing PGO optimizations is a somewhat time-consuming task but the benchmark results above demonstrate that for the EASTL Benchmark project that PGO is worth the effort.

+

Problems

+

Prob.1 +I'm getting screwy behavior in sorting algorithms or sorted containers. What's wrong?

+

It may possible that you are seeing floating point roundoff problems. Many STL algorithms require object comparisons to act consistently. However, floating point values sometimes compare differently between uses because in one situation a value might be in 32 bit form in system memory, whereas in anther situation that value might be in an FPU register with a different precision. These are difficult problems to track down and aren't the fault of EASTL or whatever similar library you might be using. There are various solutions to the problem, but the important thing is to find a way to force the comparisons to be consistent.

+

The code below was an example of this happening, whereby the object pA->mPos was stored in system memory while pB->mPos was stored in a register and comparisons were inconsistent and a crash ensued.
+

+
class SortByDistance : public binary_function<WorldTreeObject*, WorldTreeObject*, bool>
+{
+private:
+    Vector3 mOrigin;
+
+public:
+    SortByDistance(Vector3 origin) {
+        mOrigin = origin;
+    }
+
+    bool operator()(WorldTreeObject* pA, WorldTreeObject* pB) const {
+         return ((WorldObject*)pA)->mPos - mOrigin).GetLength()
+              < ((WorldObject*)pB)->mPos - mOrigin).GetLength();
+    }
+};
+

Another thing to watch out for is the following mistake:
+

+
struct ValuePair
+{
+    uint32_t a;
+    uint32_t b;
+};
+
+// Improve speed by casting the struct to uint64_t
+bool operator<(const ValuePair& vp1, const ValuePair& vp2)
+    { return *(uint64_t*)&vp1 < *(uint64_t*)&vp2; }
+

The problem is that the ValuePair struct has 32 bit alignment but the comparison assumes 64 bit alignment. The code above has been observed to crash on the PowerPC 64-based machines. The resolution is to declare ValuePair as having 64 bit alignment.
+ +

+

Prob.2 I am getting compiler warnings (e.g. C4244, C4242 or C4267) that make no sense. Why?

+One cause of this occurs with VC++ when you have code compiled with the /Wp64 (detect 64 bit portability issues) option. This causes pointer types to have a hidden flag called __w64 attached to them by the compiler. So 'ptrdiff_t' is actually known by the compiler as '__w64 int', while 'int' is known by the compilers as simply 'int'. A problem occurs here when you use templates. For example, let's say we have this templated function +
template <typename T>
+T min(const T a, const T b) {
+    return b < a ? b : a;
+}
+If you compile this code: +
ptrdiff_t a = min(ptrdiff_t(0), ptrdiff_t(1));
+int       b = min((int)0, (int)1);
+You will get the following warning for the second line, which is somewhat nonsensical: +
warning C4244: 'initializing' : conversion from 'const ptrdiff_t' to 'int', possible loss of data
+

This could probably be considered a VC++ bug, but in the meantime you have little choice but to ignore the warning or disable it.

+ +

Prob.3 +I am getting compiler warning C4530, which complains about exception handling and "unwind semantics." What gives?

+

VC++ has a compiler option (/EHsc) that allows you to enable/disable exception handling stack unwinding but still enable try/catch. This is useful because it can save a lot in the way of code generation for your application. Disabling stack unwinding will decrease the size of your executable on at least the Win32 platform by 10-12%.
+
+If you have stack unwinding disabled, but you have try/catch statements, VC++ will generate the following warning:

+
warning C4530: C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+

As of EASTL v1.0, this warning has been disabled within EASTL for EASTL code. However, non-EASTL code such as std STL code may still cause this warning to be triggered. In this case there is not much you can do about this other than to disable the warning.

+

Prob.4 + Why are tree-based EASTL containers hard to read with a +debugger?

+

Short answer
+
Maximum performance and design mandates.

+

Long answer
+You may notice that when you have a tree-based container (e.g. set, map)  in the debugger that it isn't automatically able to recognize the tree nodes as containing instances of your contained object. You can get the debugger to do what you want with casting statements in the debug watch window, but this is not an ideal solution. The reason this is happening is that node-based containers always use an anonymous node type as the base class for container nodes. This is primarily done for performance, as it allows the node manipulation code to exist as a single non-templated library of functions and it saves memory because containers will have one or two base nodes as container 'anchors' and you don't want to allocate a node of the size of the user data when you can just use a base node. See list.h for an example of this and some additional in-code documentation on this.

+

Additionally, EASTL has the design mandate that an empty container constructs no user objects. This is both for performance reasons and because it doing so would skew the user's tracking of object counts and might possibly break some expectation the user has about object lifetimes.

+

Currently this debug issue exists only with tree-based containers. Other node-based containers such as list and slist use a trick to get around this problem in debug builds.

+

See Debug.2 for more. +

Prob.5 +The EASTL source code is sometimes rather complicated looking. Why is that?

+

Short answer
+Maximum performance.

+

Long answer
+ EASTL uses templates, type_traits, iterator categories, redundancy reduction, and branch reduction in order to achieve optimal performance. A side effect of this is that there are sometimes a lot of template parameters and multiple levels of function calls due to template specialization. The ironic thing about this is that this makes the code (an optimized build, at least) go faster, not slower. In an optimized build the compiler will see through the calls and template parameters and generate a direct optimized inline version.

+

As an example of this, take a look at the implementation of the copy implementation in algorithm.h. If you are copying an array of scalar values or other trivially copyable values, the compiler will see how the code directs this to the memcpy function and will generate nothing but a memcpy in the final code. For non-memcpyable data types the compiler will automatically understand that in do the right thing.

+

EASTL's primary objective is maximal performance, and it has been deemed worthwhile to make the code a little less obvious in order to achieve this goal. Every case where EASTL does something in an indirect way is by design and usually this is for the purpose of achieving the highest possible performance.

+

Prob.6 +When I get compilation errors, they are very long and complicated looking. What do I do?

+

Assuming the bugs are all worked out of EASTL, these errors really do indicate that you have something wrong. EASTL is intentionally very strict about types, as it tries to minimize the chance of users errors. Unfortunately, there is no simple resolution to the problem of long compiler errors other than to deal with them. On the other hand, once you've dealt with them a few times, you tend to realize that most of time they are the same kinds of errors and

+

Top five approaches to dealing with long compilation errors:

+
    +
  1. Look at the line where the compilation error occurred and ignore the text of the error and just look at obvious things that might be wrong.
  2. +
  3. Consider the most common typical causes of templated compilation errors and consider if any of these might be your problem. Usually one of them are.
  4. +
  5. Either read through the error (it's not as hard as it may look on the surface) or copy the error to a text file and remove the extraneous
  6. +
  7. Compile the code under GCC instead of MSVC, as GCC warnings and errors tend to be more helpful than MSVC's. Possibly also consider compiling an isolated version under Comeau C++'s free online compiler at www.comeaucomputing.com or the Dinkumware online compiler at http://dinkumware.com/exam/. 
  8. +
  9. Try using an STL filter (http://www.bdsoft.com/tools/stlfilt.html) which automatically boils down template errors to simpler forms. We haven't tried this yet with EASTL. Also there is the more generic TextFilt (http://textfilt.sourceforge.net/).
  10. +
+

Top five causes of EASTL compilation errors:

+
    +
  1. const-correctness. Perhaps a quarter of container template errors are due to the user not specifying const correctly.
  2. +
  3. Missing hash function. hash_map, hash_set, etc. require that you either specify a hash function or one exists for your class. See functional.h for examples of declarations of hash functions for common data types.
  4. +
  5. Missing operators. Various containers and algorithms require that certain operators exist for your contained classes. For example, list requires that you can test contained objects for equivalence (i.e. operator==), while map requires that you can test contained objects for "less-ness" (operator <). If you define a Widget class and don't have a way to compare two Widgets, you will get errors when trying to put them into a map.
  6. +
  7. Specifying the wrong data type. For example, it is a common mistake to forget that when you insert into a map, you need to insert a pair of objects and not just your key or value type.
  8. +
  9. Incorrect template parameters. When declaring a template instantiation (e.g. map<int, int, less<int> >) you simply need to get the template parameters correct. Also note that when you have ">>" next to each other that you need to separate them by one space (e.g. "> >").
  10. +
+

Prob.7 + Templates sometimes seem to take a long time to compile. Why do I do about that? +

+

C++ compilers are generally slower than C compilers, and C++ templates are generally slower to compile than regular C++ code. EASTL has some extra functionality (such as type_traits and algorithm specializations) that is not found in most other template libraries and significantly improves performance and usefulness but adds to the amount of code that needs to be compiled. Ironically, we have a case where more source code generates faster and smaller object code.

+

The best solution to the problem is to use pre-compiled headers, which are available on all modern ~2002+) compilers, such as VC6.0+, GCC 3.2+, and Metrowerks 7.0+. In terms of platforms this means all 2002+ platforms.

+

Some users have been speeding up build times by creating project files that put all the source code in one large .cpp file. This has an effect similar to pre-compiled headers. It can go even faster than pre-compiled headers but has downsides in the way of convenience and portability.

+

Prob.8 +I get the compiler error: "template instantiation depth exceeds maximum of 17. use -ftemplate-depth-NN to increase the maximum". 

+

This is a GCC error that occurs when a templated function calls a templated function which calls a templated function, etc. past a depth of 17. You can use the GCC command line argument -ftemplate-depth-40 (or some other high number) to get around this. As note below, the syntax starting with GCC 4.5 has changed slightly.

+

The primary reason you would encounter this with EASTL is type traits that are used by algorithms. The type traits library is a (necessarily) highly templated set of types and functions which adds at most about nine levels of inlining. The copy and copy_backward algorithms have optimized pathways that add about four levels of inlining. If you have just a few more layers on top of that in container or user code then the default limit of 17 can be exceeded. We are investigating ways to reduce the template depth in the type traits library, but only so much can be done, as most compilers don't support type traits natively. Metrowerks is the current exception.

+

From the GCC documentation:

+
-ftemplate-depth-n
+
+Set the maximum instantiation depth for template classes to n. 
+A limit on the template instantiation depth is needed to detect 
+endless recursions during template class instantiation ANSI/ISO 
+C++ conforming programs must not rely on a maximum depth greater than 17.
+
+ +

Note that starting with GCC 4.5 the syntax is -ftemplate-depth=N instead of -ftemplate-depth-n.

+

Prob.9 + I'm getting errors about min and max while compiling.

+

You need to define NOMINMAX under VC++ when this occurs, as it otherwise defines min and max macros that interfere. There may be equivalent issues with other compilers. Also, VC++ has a specific <minmax.h> header file which defines min and max macros but which doesn't pay attention to NOMINMAX and so in that case there is nothing to do but not include that file or to undefine min and max. minmax.h is not a standard file and its min and max macros are not standard C or C++ macros or functions.

+

Prob.10 +C++ / EASTL seems to bloat my .obj files much more than C does.

+

There is no need to worry. The way most C++ compilers compile templates, they compile all +seen template code into the current .obj module, which results in larger .obj files and duplicated template code in +multiple .obj files. However, the linker will (and must) select only a single version of any given function for the +application, and these linked functions will usually be located contiguously.

+

Prob.11 + I'm getting compiler errors regarding placement operator new +being previously defined.

+

This can happen if you are attempting to define your own versions of placement new/delete. The C++ language standard does not allow the user to override these functions. Section 18.4.3 of the standard states:

+

     Placement forms
+     1. These functions are reserved, a C++ program may not define functions that displace the versions in the Standard C++ library.

+

You may find that #defining __PLACEMENT_NEW_INLINE seems to fix your problems under VC++, but it can fail under some circumstances and is not portable and fails with other compilers, which don't have an equivalent workaround.

+

Prob.12 +I'm getting errors related to wchar_t string  functions such as wcslen().

+

EASTL requires EABase-related items that the following be so. If not, then EASTL gets confused about what types it can pass to wchar_t related functions.

+
    +
  • The #define EA_WCHAR_SIZE is equal to sizeof(wchar_t).
  • +
  • If sizeof(wchar_t) == 2, then char16_t is typedef'd to wchar_t.
  • +
  • If sizeof(wchar_t) == 4, then char32_t is typedef'd to wchar_t.
  • +
+

EABase v2.08 and later automatically does this for most current generation and all next generation platforms. With GCC 2.x, the user may need to predefine EA_WCHAR_SIZE to the appropriate value, due to limitations with the GCC compiler. Note that GCC defaults to sizeof(wchar_t) ==4, but it can be changed to 2 with the -fshort_wchar compiler command line argument. If you are using EASTL without EABase, you will need to make sure the above items are correctly defined.

+

Prob.13 + I'm getting compiler warning C4619: there is no warning number Cxxxx +(e.g. C4217).

+

Compiler warning C4619 is a VC++ warning which is saying that the user is attempting to enable or disable a warning which the compiler doesn't recognize. This warning only occurs if the user has the compiler set to enable warnings that are normally disabled, regardless of the warning level. The problem, however, is that there is no easy way for user code to tell what compiler warnings any given compiler version will recognize. That's why Microsoft normally disables this warning.

+

The only practical solution we have for this is for the user to disable warning 4619 globally or an a case-by-case basis. EA build systems such as nant/framework 2's eaconfig will usually disable 4619. In general, global enabling of 'warnings that are disabled by default' often result in quandrys such as this.

+

Prob.14 +My stack-based fixed_vector is not respecting the object alignment requirements.

+

EASTL fixed_* containers rely on the compiler-supplied alignment directives, such as that implemented by EA_PREFIX_ALIGN. This is normally a good thing because it allows the memory to be local with the container. However, as documented by Microsoft at http://msdn2.microsoft.com/en-us/library/83ythb65(VS.71).aspx, this doesn't work for stack variables. The two primary means of working around this are:

+
    +
  • Use something like AlignedObject<> from the EAStdC package's EAAllocator.h file.
  • +
  • Use eastl::vector with a custom allocator and have it provide aligned memory. EASTL automatically recognizes that the objects are aligned and will call the aligned version of your allocator allocate() function. You can get this aligned memory from the stack, if you need it, somewhat like how AlignedObject<> works.
  • +
+

Prob.15 I am getting compiler errors when using GCC under XCode (Macintosh/iphone).

+

The XCode environment has a compiler option which causes it to evaluate include directories recursively. So if you specify /a/b/c as an include directory, it will consider all directories underneath c to also be include directories. This option is enabled by default, though many XCode users disable it, as it is a somewhat dangerous option. The result of enabling this option with EASTL is that <EASTL/string.h> is used by the compiler when you say #include <string.h>. The solution is to disable this compiler option. It's probably a good idea to disable this option anyway, as it typically causes problems for users yet provides minimal benefits.

+

Prob.16 I am getting linker errors about Vsnprintf8 or Vsnprintf16.

+

EASTL requires the user to provide a function called Vsnprintf8 if the string::sprintf function is used. vsnprintf is not a standard C function, but most C standard libraries provide some form of it, though in some ways their implementations differ, especially in what the return value means. Also, most implementations of vsnprintf are slow, mostly due to mutexes related to locale functionality. And you can't really use vendor vsnprintf on an SPU due to the heavy standard library size. EASTL is stuck because it doesn't want to depend on something with these problems. EAStdC provides a single consistent fast lightweight, yet standards-conforming, implementation in the form of Vsnprintf(char8_t*, ...), but EASTL can't have a dependency on EAStdC. So the user must provide an implementation, even if all it does is call EAStdC's Vsnprintf or the vendor vsnprintf for that matter.

+

Example of providing Vsnprintf8 via EAStdC:

+
#include <EAStdC/EASprintf.h>
+   
+int Vsnprintf8(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments)
+{
+    return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+}
+
+int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments)
+{
+    return EA::StdC::Vsnprintf(pDestination, n, pFormat, arguments);
+}
+

Example of providing Vsnprintf8 via C libraries:

+
#include <stdio.h>
+   
+int Vsnprintf8(char8_t* p, size_t n, const char8_t* pFormat, va_list arguments)
+{
+    #ifdef _MSC_VER
+        return vsnprintf_s(p, n, _TRUNCATE, pFormat, arguments);
+    #else
+        return vsnprintf(p, n, pFormat, arguments);
+    #endif
+}
+
+int Vsnprintf16(char16_t* p, size_t n, const char16_t* pFormat, va_list arguments)
+{
+    #ifdef _MSC_VER
+        return vsnwprintf_s(p, n, _TRUNCATE, pFormat, arguments);
+    #else
+        return vsnwprintf(p, n, pFormat, arguments); // Won't work on Unix because its libraries implement wchar_t as int32_t.
+    #endif
+}
+

Prob.17 I am getting compiler errors about UINT64_C or UINT32_C.

+

This is usually an order-of-include problem that comes about due to the implementation of __STDC_CONSTANT_MACROS in C++ Standard libraries. The C++ <stdint.h> header file defineds UINT64_C only if __STDC_CONSTANT_MACROS has been defined by the user or the build system; the compiler doesn't automatically define it. The failure you are seeing occurs because user code is #including a system header before #including EABase and without defining __STDC_CONSTANT_MACROS itself or globally. EABase defines __STDC_CONSTANT_MACROS and #includes the appropriate system header. But if the system header was already previously #included and __STDC_CONSTANT_MACROS was not defined, then UINT64_C doesn't get defined by anybody.

+

The real solution that the C++ compiler and standard library wants is for the app to globally define __STDC_CONSTANT_MACROS itself in the build.

+

Prob.18 I am getting a crash with a global EASTL container.

+

This usually due to compiler's lack of support for global (and static) C++ class instances. The crash is happening because the global variable exists but its constructor was not called on application startup and it's member data is zeroed bytes. To handle this you need to manually initialize such variables. There are two primary ways:

+

Failing code:

+
eastl::list<int> gIntList; // Global variable.
+   
+void DoSomething()
+{
+    gIntList.push_back(1); // Crash. gIntList was never constructed.
+}
+

Declaring a pointer solution:

+
eastl::list<int>* gIntList = NULL;
+   
+void DoSomething()
+{
+    if(!gIntList) // Or move this to an init function.
+        gIntList = new eastl::list<int>;
+
+    gIntList->push_back(1); // Success
+}
+

Manual constructor call solution:

+
eastl::list<int> gIntList;
+   
+void InitSystem()
+{
+    new(&gIntList) eastl::list<int>;
+}
+
+void DoSomething()
+{
+    gIntList.push_back(1); // Success
+}
+

Prob.19 Why doesn't EASTL support passing NULL string functions?

+

+

The primary argument is to make functions safer for use. Why crash on NULL pointer access when you can make the code safe? That's a good argument. The counter argument, which EASTL currently makes, is:

+
    +
  • It breaks consistency with the C++ STL library and C libraries, which require strings to be valid.
  • +
  • It makes the coder slower and bigger for all users, though few need NULL checks.
  • +
  • The specification for how to handle NULL is simple for some cases but not simple for others. Operator < below a case where the proper handling of it in a consistent way is not simple, as all comparison code (<, >, ==, !=, >=, <=) in EASTL must universally and consistently handle the case where either or both sides are NULL. A NULL string seems similar to an empty string, but doesn't always work out so simply.
  • +
  • What about other invalid string pointers? NULL is merely one invalid value of many, with its only distinction being that sometimes it's intentionally NULL (as opposed to being NULL due to not being initialized).
  • +
  • How and where to implement the NULL checks in such a way as to do it efficiently is not always simple, given that public functions call public functions.
  • +
  • It's arguable (and in fact the intent of the C++ standard library) that using pointers that are NULL is a user/app mistake. If we really want to be safe then we should be using string objects for everything. You may not entirely buy this argument in practice, but on the other hand one might ask why is the caller of EASTL using a NULL pointer in the first place? The answer of course is that somebody gave it to him.
  • +
+

Debug

+

Debug.1 +How do I set the VC++ debugger to display EASTL container data with tooltips?

+

See Cont.9

+

Debug.2 +How do I view containers if the visualizer/tooltip support is not present?

+ +

Here is a table of answers about how to manually inspect containers in the debugger.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 ContainerApproach
slist
+ fixed_slist
slist is a singly-linked list. Look at the slist mNode variable. You can walk the list by looking at mNode.mpNext, etc.
list
+ fixed_list
list is a doubly-linked list. Look at the list mNode variable. You can walk the list forward by looking at mNode.mpNext, etc. and backward by looking at mpPrev, etc.
intrusive_list
+ intrusive_slist
Look at the list mAnchor node. This lets you walk forward and backward in the list via mpNext and mpPrev.
arrayView the array mValue member in the debugger. It's simply a C style array.
vector
+ fixed_vector
View the vector mpBegin value in the debugger. If the string is long, use ", N" to limit the view length, as with someVector.mpBegin, 32
vector_set
+ vector_multiset
+ vector_map
+ vector_multimap
These are containers that are implemented as a sorted vector, deque, or array. They are searched via a standard binary search. You can view them the same way you view a vector or deque.
deque
deque is implemented as an array of arrays, where the arrays implement successive equally-sized segments of the deque. The mItBegin deque member points the deque begin() position.
bitvectorLook at the bitvector mContainer variable. If it's a vector, then see vector above.
bitsetLook at the bitset mWord variable. The bitset is nothing but one or more uint32_t mWord items.
set
+ multiset
+ fixed_set
+ fixed_multiset
The set containers are implemented as a tree of elements. The set mAnchor.mpNodeParent points to the top of the tree; the mAnchor.mpNodeLeft points to the far left node of the tree (set begin()); the mAnchor.mpNodeRight points to the right of the tree (set end()).
map
+ multimap
+ fixed_map
+ fixed_multimap
The map containers are implemented as a tree of pairs, where pair.first is the map key and pair.second is the map value. The map mAnchor.mpNodeParent points to the top of the tree; the mAnchor.mpNodeLeft points to the far left node of the tree (map begin()); the mAnchor.mpNodeRight points to the right of the tree (map end()).
hash_map
+ hash_multimap
+ fixed_hash_map
+ fixed_hash_multimap
hash tables in EASTL are implemented as an array of singly-linked lists. The array is the mpBucketArray member. Each element in the list is a pair, where the first element of the pair is the map key and the second is the map value.
intrusive_hash_map
+ intrusive_hash_multimap
+ intrusive_hash_set
+ intrusive_hash_multiset
intrusive hash tables in EASTL are implemented very similarly to regular hash tables. See the hash_map and hash_set entries for more info.
hash_set
+ hash_multiset
+ fixed_hash_set
+ fixed_hash_map
hash tables in EASTL are implemented as an array of singly-linked lists. The array is the mpBucketArray member.
basic_string
+ fixed_string
+ fixed_substring
View the string mpBegin value in the debugger. If the string is long, use ", N" to limit the view length, as with someString.mpBegin, 32
heap
A heap is an array of data (e.g. EASTL vector) which is organized in a tree whereby the highest priority item is array[0], The next two highest priority items are array[1] and [2]. Underneath [1] in priority are items [3] and [4], and underneath item [2] in priority are items [5] and [6]. etc.
stack
View the stack member c value in the debugger. That member will typically be a list or deque.
queue
View the queue member c value in the debugger. That member will typically be a list or deque.
priority_queue
View the priority_queue member c value in the debugger. That member will typically be a vector or deque which is organized as a heap. See the heap section above for how to view a heap.
smart_ptrView the mpValue member.
+
+

Debug.3 +The EASTL source code is sometimes rather complicated looking. Why is that?

+

Short answer
+Maximum performance.

+

Long answer
+ EASTL uses templates, type_traits, iterator categories, redundancy reduction, and branch reduction in order to achieve optimal performance. A side effect of this is that there are sometimes a lot of template parameters and multiple levels of function calls due to template specialization. The ironic thing about this is that this makes the code (an optimized build, at least) go faster, not slower. In an optimized build the compiler will see through the calls and template parameters and generate a direct optimized inline version.

+

As an example of this, take a look at the implementation of the copy implementation in algorithm.h. If you are copying an array of scalar values or other trivially copyable values, the compiler will see how the code directs this to the memcpy function and will generate nothing but a memcpy in the final code. For non-memcpyable data types the compiler will automatically understand that in do the right thing.

+

EASTL's primary objective is maximal performance, and it has been deemed worthwhile to make the code a little less obvious in order to achieve this goal. Every case where EASTL does something in an indirect way is by design and usually this is for the purpose of achieving the highest possible performance.

+

Debug.4 +When I get compilation errors, they are very long and complicated looking. What do I do?

+

Assuming the bugs are all worked out of EASTL, these errors really do indicate that you have something wrong. EASTL is intentionally very strict about types, as it tries to minimize the chance of users errors. Unfortunately, there is no simple resolution to the problem of long compiler errors other than to deal with them. On the other hand, once you've dealt with them a few times, you tend to realize that most of time they are the same kinds of errors and
+
+Top five approaches to dealing with long compilation errors:

+
    +
  1. Look at the line where the compilation error occurred and ignore the text of the error and just look at obvious things that might be wrong.
  2. +
  3. Consider the most common typical causes of templated compilation errors and consider if any of these might be your problem. Usually one of them are.
  4. +
  5. Either read through the error (it's not as hard as it may look on the surface) or copy the error to a text file and remove the extraneous
  6. +
  7. Compile the code under GCC instead of MSVC, as GCC warnings and errors tend to be more helpful than MSVC's. Possibly also consider compiling an isolated version under Comeau C++'s free online compiler at www.comeaucomputing.com or the Dinkumware online compiler at http://dinkumware.com/exam/. 
  8. +
  9. Try using an STL filter (http://www.bdsoft.com/tools/stlfilt.html) which automatically boils down template errors to simpler forms. We haven't tried this yet with EASTL. Also there is the more generic TextFilt (http://textfilt.sourceforge.net/).
  10. +
+

Top five causes of EASTL compilation errors:

+
    +
  1. const-correctness. Perhaps a quarter of container template errors are due to the user not specifying const correctly.
  2. +
  3. Missing hash function. hash_map, hash_set, etc. require that you either specify a hash function or one exists for your class. See functional.h for examples of declarations of hash functions for common data types.
  4. +
  5. Missing operators. Various containers and algorithms require that certain operators exist for your contained classes. For example, list requires that you can test contained objects for equivalence (i.e. operator==), while map requires that you can test contained objects for "less-ness" (operator <). If you define a Widget class and don't have a way to compare two Widgets, you will get errors when trying to put them into a map.
  6. +
  7. Specifying the wrong data type. For example, it is a common mistake to forget that when you insert into a map, you need to insert a pair of objects and not just your key or value type.
  8. +
  9. Incorrect template parameters. When declaring a template instantiation (e.g. map<int, int, less<int> >) you simply need to get the template parameters correct. Also note that when you have ">>" next to each other that you need to separate them by one space (e.g. "> >").
  10. +
+

Debug.5 +How do I measure hash table balancing?

+

The following functionality lets you spelunk hash container layout.

+
    +
  • There is the load_factor function which tells you the overall hashtable load, but doesn't tell you if a load is unevenly distributed.
  • +
  • You can control the load factor and thus the automated bucket redistribution with set_load_factor.
  • +
  • The local_iterator begin(size_type n) and local_iterator end(size_type) functions lets you iterate each bucket individually. You can use this to examine the elements in a bucket.
  • +
  • You can use the above to get the size of any bucket, but there is also simply the bucket_size(size_type n) function.
  • +
  • The bucket_count function tells you the count of buckets. So with this you can completely visualize the layout of the hash table.
  • +
  • There is also iterator find_by_hash(hash_code_t c), for what it's worth.
  • +
+

The following function draws an ASCII bar graph of the hash table for easy visualization of bucket distribution:

+
+

#include <EASTL/hash_map.h>
+ #include <EASTL/algorithm.h>
+ #include <stdio.h>
+
+ template <typename HashTable>
+ void VisualizeHashTableBuckets(const HashTable& h)
+ {
+    eastl_size_t bucketCount       = h.bucket_count();
+    eastl_size_t largestBucketSize = 0;
+
+    for(eastl_size_t i = 0; i < bucketCount; i++)
+        largestBucketSize = eastl::max_alt(largestBucketSize, h.bucket_size(i));
+
+    YourPrintFunction("\n --------------------------------------------------------------------------------\n");
+
+    for(eastl_size_t i = 0; i < bucketCount; i++)
+    {
+        const eastl_size_t k = h.bucket_size(i) * 80 / largestBucketSize;
+
+        char buffer[16];
+        sprintf(buffer, "%3u|", (unsigned)i);
+        YourPrintFunction(buffer);
+
+        for(eastl_size_t j = 0; j < k; j++)
+            YourPrintFunction("*");
+
+        YourPrintFunction("\n");
+    }
+
+    YourPrintFunction(" --------------------------------------------------------------------------------\n");
+ }

+
+

This results in a graph that looks like the following (with one horizontal bar per bucket). This hashtable has a large number of collisions in each of its 10 buckets. +

+

   ------------------------------------------------------
+ 0|********************************************
+ 1|************************************************
+ 2|***************************************
+ 3|********************************************
+ 4|*****************************************************
+ 5|*************************************************
+ 6|****************************************
+ 7|***********************************************
+ 8|********************************************
+ 9|**************************************
+ 10|********************************************
+   -----------------------------------------------------
+

+

Containers

+

Cont.1 +Why do some containers have "fixed" versions (e.g. fixed_list) but others(e.g. deque) don't have fixed versions?

+

Recall that fixed containers are those that are implemented via a single contiguous block of memory and don't use a general purpose heap to allocate memory from. For example, fixed_list is a list container that implements its list by a user-configurable fixed block of memory. Such containers have an upper limit to how many items they can hold, but have the advantage of being more efficient with memory use and memory access coherency.

+

The reason why some containers don't have fixed versions is that such functionality doesn't make sense with these containers. Containers which don't have fixed versions include:

+
array, deque, bitset, stack, queue, priority_queue,
+intrusive_list, intrusive_hash_map, intrusive_hash_set,
+intrusive_hash_multimap, intrusive_hash_multimap,
+vector_map, vector_multimap, vector_set, vector_multiset.
+

Some of these containers are adapters which wrap other containers and thus there is no need for a fixed version because you can just wrap a fixed container. In the case of intrusive containers, the user is doing the allocation and so there are no memory allocations. In the case of array, the container is a primitive type which doesn't allocate memory. In the case of deque, it's primary purpose for being is to dynamically resize and thus the user would likely be better of using a fixed_vector.

+

Cont.2 +Can I mix EASTL with standard C++ STL?

+

This is possible to some degree, though the extent depends on the implementation of C++ STL. One of things that makes interoperability is something called iterator categories. Containers and algorithms recognize iterator types via their category and STL iterator categories are not recognized by EASTL and vice versa.

+

Things that you definitely can do:

+
    +
  • #include both EASTL and standard STL headers from the same .cpp file.
  • +
  • Use EASTL containers to hold STL containers.
  • +
  • Construct an STL reverse_iterator from an EASTL iterator.
  • +
  • Construct an EASTL reverse_iterator from an STL iterator.
  • +
+

Things that you probably will be able to do, though a given std STL implementation may prevent it: +

+
    +
  • Use STL containers in EASTL algorithms.
  • +
  • Use EASTL containers in STL algorithms.
  • +
  • Construct or assign to an STL container via iterators into an EASTL container.
  • +
  • Construct or assign to an EASTL container via iterators into an STL container.
  • +
+

Things that you would be able to do if the given std STL implementation is bug-free: +

+
    +
  • Use STL containers to hold EASTL containers. Unfortunately, VC7.x STL has a confirmed bug that prevents this. Similarly, STLPort versions prior to v5 have a similar but.
  • +
+

Things that you definitely can't do: +

+
    +
  • Use an STL allocator directly with an EASTL container (though you can use one indirectly).
  • +
  • Use an EASTL allocator directly with an STL container (though you can use one indirectly).
  • +
+

Cont.3 +Why are there so many containers?

+

EASTL has a large number of container types (e.g vector, list, set) and often has a number of variations of given types (list, slist, intrusive_list, fixed_list). The reason for this is that each container is tuned and to a specific need and there is no single container that works for all needs. The more the user is concerned about squeezing the most performance out of their system, the more the individual container variations become significant. It's important to note that having additional container types generally does not mean generating additional code or code bloat. Templates result in generated code regardless of what templated class they come from, and so for the most part you get optimal performance by choosing the optimal container for your needs.

+

Cont.4 +Don't STL and EASTL containers fragment memory?

+

They only fragment memory if you use them in a way that does so. This is no different from any other type of container used in a dynamic way. There are various solutions to this problem, and EASTL provides additional help as well:

+
    +
  • For vectors, use the reserve function (or the equivalent constructor) to set aside a block of memory for the container. The container will not reallocate memory unless you try grow beyond the capacity you reserve.
  • +
  • EASTL has "fixed" variations of containers which allow you to specify a fixed block of memory which the container uses for its memory. The container will not allocate any memory with these types of containers and all memory will be cache-friendly due to its locality.
  • +
  • You can assign custom allocators to containers instead of using the default global allocator. You would typically use an allocator that has its own private pool of memory.
  • +
  • Where possible, add all a container's elements to it at once up front instead of adding them over time. This avoids memory fragmentation and increase cache coherency.
  • +
+

Cont.5 + I don't see container optimizations for equivalent scalar types such +as pointer types. Why?

+

Metrowerks (and no other, as of this writing) STL has some container specializations for type T* which maps them to type void*. The idea is that a user who declares a list of Widget* and a list of Gadget* will generate only one container: a list of void*. As a result, code generation will be smaller. Often this is done only in optimized builds, as such containers are harder to view in debug builds due to type information being lost.
+
+The addition of this optimization is under consideration for EASTL, though it might be noted that optimizing compilers such as VC++ are already capable of recognizing duplicate generated code and folding it automatically as part of link-time code generation (LTCG) (a.k.a. "whole program optimization"). This has been verified with VC++, as the following code and resulting disassembly demonstrate:

+
eastl::list<int*>        intPtrList;
+eastl::list<TestObject*> toPtrList;
+
+eastl_size_t n1 = intPtrList.size();
+eastl_size_t n2 = toPtrList.size();
+
+0042D288  lea         edx,[esp+14h]
+0042D28C  call        eastl::list<TestObject>::size (414180h)
+0042D291  push        eax 
+0042D292  lea         edx,[esp+24h]
+0042D296  call        eastl::list<TestObject>::size (414180h)
+Note that in the above case the compiler folded the two implementations of size() into a single implementation.
+

Cont.6 +What about alternative container and algorithm implementations (e.g. treaps, skip lists, avl trees)?

+

EASTL chooses to implement some alternative containers and algorithms and not others. It's a matter of whether or not the alternative provides truly complementary or improved functionality over existing containers. The following is a list of some implemented and non-implemented alternatives and the rationale behind each:

+

Implemented:

+
    +
  • intrusive_list, etc. -- Saves memory and improves cache locality.
  • +
  • vector_map, etc. -- Saves memory and improves cache locality.
  • +
  • ring_buffer -- Useful for some types of operations and has no alternative.
  • +
  • shell_sort -- Useful sorting algorithm.
  • +
  • sparse_matrix -- Useful for some types of operations and has no alternative.
  • +
+

Not implemented: +

+
    +
  • skip lists (alternative to red-black tree) -- These use more memory and usually perform worse than rbtrees.
  • +
  • treap (alternative to red-black tree) -- These are easier and smaller than rbtrees, but perform worse.
  • +
  • avl tree (alternative to red-black tree) -- These have slightly better search performance than rbtrees, but significantly worse insert/remove performance.
  • +
  • btree (alternative to red-black tree) --  These are no better than rbtrees.
  • +
+

If you have an idea of something that should be implemented, please suggest it or even provide at least a prototypical implementation.

+

Cont.7 +Why are tree-based EASTL containers hard to read with a debugger?

+

Short answer
+
Maximum performance and design mandates.

+

Long answer
+You may notice that when you have a tree-based container (e.g. set, map)  in the debugger that it isn't automatically able to recognize the tree nodes as containing instances of your contained object. You can get the debugger to do what you want with casting statements in the debug watch window, but this is not an ideal solution. The reason this is happening is that node-based containers always use an anonymous node type as the base class for container nodes. This is primarily done for performance, as it allows the node manipulation code to exist as a single non-templated library of functions and it saves memory because containers will have one or two base nodes as container 'anchors' and you don't want to allocate a node of the size of the user data when you can just use a base node. See list.h for an example of this and some additional in-code documentation on this.

+

Additionally, EASTL has the design mandate that an empty container constructs no user objects. This is both for performance reasons and because it doing so would skew the user's tracking of object counts and might possibly break some expectation the user has about object lifetimes.

+

Currently this debug issue exists only with tree-based containers. Other node-based containers such as list and slist use a trick to get around this problem in debug builds.

+

Cont.8 +How do I assign a custom allocator to an EASTL container?

+

There are two ways of doing this:

+
    +
  1. Use the set_allocator function that is present in each container.
  2. +
  3. Specify a new allocator type via the Allocator template parameter that is present in each container.
  4. +
+

For item #1, EASTL expects that you provide an instance of an allocator of the type that EASTL recognizes. This is simple but has the disadvantage that all such allocators must be of the same class. The class would need to have C++ virtual functions in order to allow a given instance to act differently from another instance.

+

For item #2, you specify that the container use your own allocator class. The advantage of this is that your class can be implemented any way you want and doesn't require virtual functions for differentiation from other instances. Due to the way C++ works your class would necessarily have to use the same member function names as the default allocator class type. In order to make things easier, we provide a skeleton allocator here which you can copy and fill in with your own implementation.

+
class custom_allocator
+{
+public:
+    custom_allocator(const char* pName = EASTL_NAME_VAL("custom allocator"))
+    {
+        #if EASTL_NAME_ENABLED
+            mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+        #endif
+
+        // Possibly do something here.
+    }
+
+    custom_allocator(const allocator& x, const char* pName = EASTL_NAME_VAL("custom allocator"));
+    {
+        #if EASTL_NAME_ENABLED
+            mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+        #endif
+
+        // Possibly copy from x here.
+    }
+
+    ~custom_allocator();
+    {
+        // Possibly do something here.
+    }
+
+    custom_allocator& operator=(const custom_allocator& x)
+    {
+        // Possibly copy from x here.
+        return *this;
+    }
+
+    void* allocate(size_t n, int flags = 0)
+    {
+        // Implement the allocation here.
+    }
+
+    void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+    {
+        // Implement the allocation here.
+    }
+
+    void deallocate(void* p, size_t n)
+    {
+        // Implement the deallocation here.
+    }
+
+    const char* get_name() const
+    {
+        #if EASTL_NAME_ENABLED
+            return mpName;
+        #else
+            return "custom allocator";
+        #endif
+    }
+
+    void set_name(const char* pName)
+    {
+        #if EASTL_NAME_ENABLED
+            mpName = pName;
+        #endif
+    }
+
+protected:
+    // Possibly place instance data here.
+
+    #if EASTL_NAME_ENABLED
+        const char* mpName; // Debug name, used to track memory.
+    #endif
+};
+
+
+inline bool operator==(const allocator& a, const allocator& b)
+{
+    // Provide a comparison here.
+}
+
+inline bool operator!=(const allocator& a, const allocator& b)
+{
+    // Provide a negative comparison here.
+}
+

Here's an example of how to use the above custom allocator:

+
// Declare a Widget list and have it default construct.
+list<Widget, custom_allocator> widgetList;
+
+// Declare a Widget list and have it construct with a copy of some global allocator.
+list<Widget, custom_allocator> widgetList2(gSomeGlobalAllocator);
+
+// Declare a Widget list and have it default construct, but assign
+// an underlying implementation after construction.
+list<Widget, custom_allocator> widgetList;
+widgetList.get_allocator().mpIAllocator = new WidgetAllocatorImpl;
+ +

Cont.9 How do I set the VC++ debugger to display EASTL container data with tooltips?

+

Visual Studio supports this via the AutoExp.dat file, an example of which is present with this documentation.

+

Sometimes the AutoExp.dat doesn't seem to work. Avery Lee's explanation:

+
+

If I had to take a guess, the problem is most likely in the cast to the concrete node type. These are always tricky because, for some strange reason, the debugger is whitespace sensitive with regard to specifying template types. You might try manually checking one of the routines of the specific map instantiation and checking that the placement of whitespace and const within the template expression still matches exactly. In some cases the compiler uses different whitespace rules depending on the value type which makes it impossible to correctly specify a single visualizer – this was the case for eastl::list<>, for which I was forced to include sections for both cases. The downside is that you have a bunch of (error) entries either way.

+
+

Cont.10 +How do I use a memory pool with a container?

+

Using custom memory pools is a common technique for decreasing memory fragmentation and increasing memory cache locality. EASTL gives you the flexibility of defining your own memory pool systems for containers. There are two primary ways of doing this:

+
    +
  • Assign a custom allocator to a container. eastl::fixed_pool provides an implementation.
  • +
  • Use one of the EASTL fixed containers, such as fixed_list.
  • +
+

Custom Allocator
+In the custom allocator case, you will want to create a memory pool and assign it to the container. For purely node-based containers such as list, slist, map, set, multimap, and multiset, your pool simply needs to be able to allocate list nodes. Each of these containers has a member typedef called node_type which defines the type of node allocated by the container. So if you have a memory pool that has a constructor that takes the size of pool items and the count of pool items, you would do this (assuming that MemoryPool implements the Allocator interface):

+
typedef list<Widget, MemoryPool> WidgetList;           // Declare your WidgetList type.
+
+MemoryPool myPool(sizeof(WidgetList::node_type), 100); // Make a pool of 100 Widget nodes.
+WidgetList myList(&myPool);                            // Create a list that uses the pool.
+

In the case of containers that are array-based, such as vector and basic_string, memory pools don't work very well as these containers work on a realloc-basis instead of by adding incremental nodes. What we want to do with these containers is assign a sufficient block of memory to them and reserve() the container's capacity to the size of the memory.

+

In the case of mixed containers which are partly array-based and partly node-based, such as hash containers and deque, you can use a memory pool for the nodes but will need a single array block to supply for the buckets (hash containers and deque both use a bucket-like system).

+

You might consider using eastl::fixed_pool as such an allocator, as it provides such functionality and allows the user to provide the actual memory used for the pool. Here is some example code:

+
char buffer[256];
+
+list<Widget, fixed_pool> myList;
+myList.get_allocator().init(buffer, 256);
+

Fixed Container
+In the fixed container case, the container does all the work for you. To use a list which implements a private pool of memory, just declare it like so:

+
fixed_list<Widget, 100> fixedList; // Declare a fixed_list that can hold 100 Widgets
+

Cont.11 +How do I write a comparison (operator<()) for a struct that contains two or more members? 

+

See Algo.2

+

Cont.12 +Why doesn't container X have member function Y?

+

Why don't the list or vector containers have a find() function? Why doesn't the vector container have a sort() function? Why doesn't the string container have a mid() function? These are common examples of such questions.

+

The answer usually boils down to two reasons:

+
    +
  • The functionality exists in a more centralized location elsewhere, such as the algorithms.
  • +
  • The functionality can be had by using other member functions.
  • +
+

In the case of find and sort functions not being part of containers, the find algorithm and sort algorithm are centralized versions that apply to any container. Additionally, the algorithms allow you to specify a sub-range of the container on which to apply the algorithm. So in order to find an element in a list, you would do this:
+

+
list<int>::iterator i = find(list.begin(), list.end(), 3);
+

And in order to sort a vector, you would do this:
+

+
quick_sort(v.begin(), v.end());   // Sort the entire array. +
+quick_sort(&v[3], &v[8]);         // Sort the items at the indexes in the range of [3, 8).
+

In the case of functionality that can be had by using other member functions, +note that EASTL follows the philosophy that duplicated functionality should not exist in a container, +with exceptions being made for cases where mistakes and unsafe practices commonly happen if the given +function isn't present. In the case of string not having a mid function, this is because there is a +string constructor that takes a sub-range of another string. So to make a string out of the middle of +another, you would do this:

+
string strMid(str, 3, 5); // Make a new string of the characters from the source range of [3, 3+5).
+

It might be noted that the EASTL string class is unique among EASTL containers in that it sometimes violates the minimum functionality rule. This is so because the std C++ string class similarly does so and EASTL aims to be compatible.

+

Cont.13 +How do I search a hash_map of strings via a char pointer efficiently? If I use map.find("hello") it creates a temporary string, which is inefficient.

+

The problem is illustrated with this example:

+
map<string, Widget> swMap;
+  ...
+map<string, Widget>::iterator it = swMap.find("blue"); // A temporary string object is created here.
+

In this example, the find function expects a string object and not a string literal and so (silently!) creates a temporary string object for the duration of the find. There are two solutions to this problem: +

+
    +
  • Make the map a map of char pointers instead of string objects. Don't forget to write a custom compare or else the default comparison function will compare pointer values instead of string contents.
  • +
  • Use the EASTL hash_map::find_as function, which allows you to find an item in a hash container via an alternative key than the one the hash table uses.
  • +
+

Cont.14 +Why are set and hash_set iterators const (i.e. const_iterator)?

+

The situation is illustrated with this example:

+
set<int> intSet;
+
+intSet.insert(1);
+set<int>::iterator i = intSet.begin();
+*i = 2; // Error: iterator i is const.
+

In this example, the iterator is a regular iterator and not a const_iterator, yet the compiler gives an error when trying to change the iterator value. The reason this is so is that a set is an ordered container and changing the value would make it out of order. Thus, set and multiset iterators are always const_iterators. If you need to change the value and are sure the change will not alter the container order, use const_cast or declare mutable member variables for your contained object. This resolution is the one blessed by the C++ standardization committee.

+ +

Cont.15 +How do I prevent my hash container from re-hashing?

+

If you want to make a hashtable never re-hash (i.e. increase/reallocate its bucket count), +call set_max_load_factor with a very high value such as 100000.f.

+

Similarly, you can control the bucket growth factor with the rehash_policy function. +By default, when buckets reallocate, they reallocate to about twice their previous count. +You can control that value as with the example code here:

+
hash_set<int> hashSet;
+hashSet.rehash_policy().mfGrowthFactor = 1.5f
+ +

+ Cont.16 +Which uses less memory, a map or a hash_map? +

+

A hash_map will virtually always use less memory. A hash_map will use an average of two pointers per stored element, while a map uses three pointers per stored element.

+

Cont.17 +How do I write a custom hash function?

+

You can look at the existing hash functions in functional.h, but we provide a couple examples here.

+

To write a specific hash function for a Widget class, you would do this:

+
struct WidgetHash {
+    size_t operator()(const Widget& w) const
+        { return w.id; }
+};
+
+hash_set<Widget, WidgetHash> widgetHashSet;
+

To write a generic (templated) hash function for a set of similar classes (in this case that have an id member), you would do this:
+

+
template <typename T>
+struct GeneralHash {
+    size_t operator()(const T& t) const
+        { return t.id; }
+};
+
+hash_set<Widget, GeneralHash<Widget> > widgetHashSet;
+hash_set<Dogget, GeneralHash<Dogget> > doggetHashSet;
+ +

Cont.18 +How do I write a custom compare function for a map or set?

+

The sorted containers require that an operator< exist for the stored values or that the user provide a suitable custom comparison function. A custom can be implemented like so:
+

+
struct WidgetLess { +    bool operator()(const Widget& w1, const Widget& w2) const +        { return w.id < w2.id; } +}; + +set<Widget, WidgetLess> wSet;
+

It's important that your comparison function must be consistent in its behaviour, else the container will either be unsorted or a crash will occur. This concept is called "strict weak ordering."

+

Cont.19 +How do I force my vector or string capacity down to the size of the container?

+

You can simply use the set_capacity() member function which is present in both vector and string. This is a function that is not present in std STL vector and string functions.

+
eastl::vector<Widget> x;
+x.set_capacity();   // Shrink x's capacity to be equal to its size.
+
+eastl::vector<Widget> x;
+x.set_capacity(0);  // Completely clear x.
+

To compact your vector or string in a way that would also work with std STL you need to do the following.

+

How to shrink a vector's capacity to be equal to its size:

+
std::vector<Widget> x;
+std::vector<Widget>(x).swap(x); // Shrink x's capacity.
+How to completely clear a std::vector (size = 0, capacity = 0, no allocation):
+
std::vector<Widget> x;
+std::vector<Widget>().swap(x); // Completely clear x.
+
+

Cont.20 +How do I iterate a container while (selectively) removing items from it?

+

All EASTL containers have an erase function which takes an iterator as an argument and returns an iterator to the next item. Thus, you can erase items from a container while iterating it like so:

+
set<int> intSet;
+set<int>::iterator i = intSet.begin();
+while(i != intSet.end()) +{ + if(*i & 1)  // Erase all odd integers from the container. +        i = intSet.erase(i); +    else +        ++i; +}
+

Cont.21 +How do I store a pointer in a container?

+

The problem with storing pointers in containers is that clearing the container will not +free the pointers automatically. There are two conventional resolutions to this problem:

+
    +
  • Manually free pointers when removing them from containers. 
  • +
  • Store the pointer as a smart pointer instead of a "raw"pointer.
  • +
+

The advantage of the former is that it makes the user's intent obvious and prevents the possibility of smart pointer "thrashing" with some containers. The disadvantage of the former is that it is more tedicous and error-prone.

+

The advantage of the latter is that your code will be cleaner and will always be error-free. The disadvantage is that it is perhaps slightly obfuscating and with some uses of some containers it can cause smart pointer thrashing, whereby a resize of a linear container (e.g. vector) can cause shared pointers to be repeatedly incremented and decremented with no net effect.

+

It's important that you use a shared smart pointer and not an unshared one such as C++ auto_ptr, as the latter will result in crashes upon linear container resizes. Here we provide an example of how to create a list of smart pointers:

+
list< shared_ptr<Widget> > wList;
+
+wList.push_back(shared_ptr<Widget>(new Widget));
+wList.pop_back(); // The Widget will be freed.
+

Cont.22 +How do I make a union of two containers? difference? intersection?

+

The best way to accomplish this is to sort your container (or use a sorted container such as set) and then apply the set_union, set_difference, or set_intersection algorithms.

+

Cont.23 +How do I override the default global allocator? 

+

There are multiple ways to accomplish this. The allocation mechanism is defined in EASTL/internal/config.h and in allocator.h/cpp. Overriding the default global allocator means overriding these files, overriding what these files refer to, or changing these files outright. Here is a list of things you can do, starting with the simplest:

+
    +
  • Simply provide the following versions of operator new (which EASTL requires, actually):
    +     void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
    +     void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
  • +
  • Predefine the config.h macros for EASTLAlloc, EASTLFree, etc. See config.h for this.
  • +
  • Override config.h entirely via EASTL_USER_CONFIG_HEADER. See config.h for this.
  • +
  • Provide your own version of allocator.h/cpp
  • +
  • Provide your own version of config.h. 
  • +
+

If you redefine the allocator class, you can make it work however you want.

+

Note that config.h defines EASTLAllocatorDefault, which returns the default allocator instance. As documented in config.h, this is not a global allocator which implements all container allocations but is the allocator that is used when EASTL needs to allocate memory internally. There are very few cases where EASTL allocates memory internally, and in each of these it is for a sensible reason that is documented to behave as such.

+

Cont.24 +How do I do trick X with the string container?

+

There seem to be many things users want to do with strings. Perhaps the most commonly requested EASTL container extensions are string class shortcut functions. While some of these requests are being considered, we provide some shortcut functions here.
+
+find_and_replace

+
template <typename String>
+void find_and_replace(String& s, const typename String::value_type* pFind, const typename String::value_type* pReplace)    
+{
+    for(size_t i; (i = source.find(pFind)) != T::npos; )
+        s.replace(i, eastl::CharStrlen(pFind), pReplace);
+}
+
+Example:
+    find_and_replace(s, "hello", "hola");
+

trim front (multiple chars)

+
template <typename String>
+void trim_front(String& s, const typename String::value_type* pValues)
+{
+    s.erase(0, s.find_first_not_of(pValues));
+}
+
+Example:
+    trim_front(s, " \t\n\r");
+

trim back (multiple chars)

+
template <typename String>
+void trim_front(String& s, const typename String::value_type* pValues)
+{
+    s.resize(s.find_last_not_of(pValues) + 1);
+}
+
+Example:
+    trim_back(s, " \t\n\r");
+

prepend

+
template <typename String>
+void prepend(String& s, const typename String::value_type* p)
+{
+    s.insert(0, p);
+}
+
+Example:
+    prepend(s, "log: ");
+

begins_with +

+
template <typename String>
+bool begins_with(const String& s, const typename String::value_type* p)
+{
+    return s.compare(0, eastl::CharStrlen(p), p) == 0;
+}
+
+Example:
+    if(begins_with(s, "log: ")) ...
+

ends_with

+
template <typename String>
+bool ends_with(const String& s, const typename String::value_type* p)
+{
+    const typename String::size_type n1 = s.size();
+    const typename String::size_type n2 = eastl::CharStrlen(p);
+    return ((n1 >= n2) && s.compare(n1 - n2, n2, p) == 0);
+}
+
+Example:
+    if(ends_with(s, "test.")) ...
+

tokenize
+Here is a simple tokenization function that acts very much like the C strtok function. 

+
template <typename String>
+size_t tokenize(const String& s, const typename String::value_type* pDelimiters,
+                String* resultArray, size_t resultArraySize)
+{
+    size_t n = 0;
+    typename String::size_type lastPos = s.find_first_not_of(pDelimiters, 0);
+    typename String::size_type pos     = s.find_first_of(pDelimiters, lastPos);
+
+    while((n < resultArraySize) && (pos != String::npos) || (lastPos != String::npos))
+    {
+        resultArray[n++].assign(s, lastPos, pos - lastPos);
+        lastPos = s.find_first_not_of(pDelimiters, pos);
+        pos     = s.find_first_of(pDelimiters, lastPos);
+    }
+
+    return n;
+}
+
+Example:
+   string resultArray[32];
+tokenize(s, " \t", resultArray, 32));
+ +

Cont.25 How do EASTL smart pointers compare to Boost smart pointers? 

+

EASTL's smart pointers are nearly identical to Boost (including all that crazy member template and dynamic cast functionality in shared_ptr), but are not using the Boost source code. EA legal has already stated that it is fine to have smart pointer classes with the same names and functionality as those present in Boost. EA legal specifically looked at the smart pointer classes in EASTL for this. There are two differences between EASTL smart pointers and Boost smart pointers:

+
    +
  • EASTL smart pointers don't have thread safety built-in. It was deemed that this is too much overhead and that thread safety is something best done at a higher level. By coincidence the C++ library proposal to add shared_ptr also omits the thread safety feature. FWIW, I put a thread-safe shared_ptr in EAThread, though it doesn't attempt to do all the fancy member template things that Boost shared_ptr does. Maybe I'll add that some day if people care.
  • +
+
    +
  • EASTL shared_ptr object deletion goes through a deletion object instead of through a virtual function interface. 95% of the time this makes no difference (aside from being more efficient), but the primary case where it matters is when you have shared_ptr<void> and assign to is something like "new Widget". The problem is that shared_ptr<void> doesn't know what destructor to call and so doesn't call a destructor unless you specify a custom destructor object as part of the template specification. I don't know what to say about this one, as it is less safe, but forcing everybody to have the overhead of additional templated classes and virtual destruction functions doesn't seem to be in the spirit of high performance or lean game development.
  • +
+

There is the possibility of making a shared_ptr_boost which is completely identical to Boost shared_ptr. So perhaps that will be done some day.

+

Cont.26 +How do your forward-declare an EASTL container?

+

Here is are some examples of how to do this:

+
namespace eastl
+{
+    template <typename T, typename Allocator> class basic_string;
+    typedef basic_string<char, allocator> string8;   // Forward declare EASTL's string8 type.
+
+    template <typename T, typename Allocator> class vector;
+    typedef vector<char, allocator> CharArray;
+
+    template <typename Value, typename Hash, typename Predicate, typename Allocator, bool bCacheHashCode> class hash_set;
+
+    template <typename Key, typename T, typename Compare, typename Allocator> class map;
+}
+

The forward declaration can be used to declare a pointer or reference to such a class. It cannot be used to declare an instance of a class or refer to class data, static or otherwise. Nevertheless, forward declarations for pointers and references are useful for reducing the number of header files a header file needs to include.

+

Cont.27 +How do I make two containers share a memory pool?

+

EASTL (and std STL) allocators are specified by value semantics and not reference semantics. Value semantics is more powerful (because a value can also be a reference, but not the other way around), but is not always what people expects if they're used to writing things the other way.

+

Here is some example code:

+
struct fixed_pool_reference
{
public:
fixed_pool_reference()
{
mpFixedPool = NULL;
}

fixed_pool_reference(eastl::fixed_pool& fixedPool)
{
mpFixedPool = &fixedPool;
}

fixed_pool_reference(const fixed_pool_reference& x)
{
mpFixedPool = x.mpFixedPool;
}

fixed_pool_reference& operator=(const fixed_pool_reference& x)
{
mpFixedPool = x.mpFixedPool;
return *this;
}

void* allocate(size_t /*n*/, int /*flags*/ = 0)
{
return mpFixedPool->allocate();
}

void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
{
return mpFixedPool->allocate();
}

void deallocate(void* p, size_t /*n*/)
{
return mpFixedPool->deallocate(p);
}

const char* get_name() const
{
return "fixed_pool_reference";
}

void set_name(const char* /*pName*/)
{
}

protected:
friend bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b);
friend bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b);

eastl::fixed_pool* mpFixedPool;
}; + +inline bool operator==(const fixed_pool_reference& a, const fixed_pool_reference& b) +{ + return (a.mpFixedPool == b.mpFixedPool);
} + +inline bool operator!=(const fixed_pool_reference& a, const fixed_pool_reference& b) +{ + return (a.mpFixedPool != b.mpFixedPool); +}
+

Example usage of the above:

+
typedef eastl::list<int, fixed_pool_reference> IntList;
+
+IntList::node_type buffer[2];
+eastl::fixed_pool  myPool(buffer, sizeof(buffer), sizeof(Int::node_type), 2);
+
+IntList myList1(myPool);
+IntList myList2(myPool);
+           
+myList1.push_back(37);
+myList2.push_back(39);
+

Cont.28 +Can I use a std (STL) allocator with EASTL?

+

No. EASTL allocators are similar in interface to std STL allocators, but not 100% compatible. If it was possible to make them compatible with std STL allocators but also match the design of EASTL then compatibility would exist. The primary reasons for lack of compatibility are:

+
    +
  • EASTL allocators have a different allocate function signature.
  • +
  • EASTL allocators have as many as four extra required functions: ctor(name), get_name(), set_name(), allocate(size, align, offset).
  • +
  • EASTL allocators have an additional allocate function specifically for aligned allocations, as listed directly above.
  • +
+

What are the requirements of classes stored in containers?

+

Class types stored in containers must have:

+
    +
  • a public copy constructor
  • +
  • a public assignment operator
  • +
  • a public destructor
  • +
  • an operator < that compares two such classes (sorted containers only).
  • +
  • an operator == that compares two such classes (hash containers only).
  • +
+

Recall that the compiler generates basic versions these functions for you when you don't implement them yourself, so you can omit any of the above if the compiler-generated version is sufficient.

+

For example, the following code will act incorrectly, because the user forgot to implement an assignment operator. The compiler-generated assignment operator will assign the refCount value, which the user doesn't want, and which will be called by the vector during resizing.

+
struct NotAPod
+{
+   NotAPod(const NotAPod&) {} // Intentionally don't copy the refCount 
+  int refCount; // refCounts should not be copied between NotAPod instances. +}; + +eastl::vector<NotAPod> v;
+

Algorithms

+

Algo.1 + I'm getting screwy behavior in sorting algorithms or sorted +containers. What's wrong?

+

It may possible that you are seeing floating point roundoff problems. Many STL algorithms require object comparisons to act consistently. However, floating point values sometimes compare differently between uses because in one situation a value might be in 32 bit form in system memory, whereas in anther situation that value might be in an FPU register with a different precision. These are difficult problems to track down and aren't the fault of EASTL or whatever similar library you might be using. There are various solutions to the problem, but the important thing is to find a way to force the comparisons to be consistent.

+

The code below was an example of this happening, whereby the object pA->mPos was stored in system memory while pB->mPos was stored in a register and comparisons were inconsistent and a crash ensued.

+
class SortByDistance : public binary_function<WorldTreeObject*, WorldTreeObject*, bool>
+{
+private:
+    Vector3 mOrigin;
+
+public:
+    SortByDistance(Vector3 origin) {
+        mOrigin = origin;
+    }
+
+    bool operator()(WorldTreeObject* pA, WorldTreeObject* pB) const {
+        return ((WorldObject*)pA)->mPos - mOrigin).GetLength()
+             < ((WorldObject*)pB)->mPos - mOrigin).GetLength();
+    }
+};
+ +

Algo.2 +How do I write a comparison (operator<()) for a struct that contains two or more members? 

+

For a struct with two members such as the following:

+
struct X {
+    Blah m1;
+    Blah m2;
+};
+

You would write the comparison function like this:

+
bool operator<(const X& a, const X& b) {
+    return (a.m1 == b.m1) ? (a.m2 < b.m2) : (a.m1 < b.m1);
+}
+

or, using only operator < but more instructions:

+
bool operator<(const X& a, const X& b) {
+    return (a.m1 < b.m1) || (!(b.m1 < a.m1) && (a.m2 < b.m2));
+}
+

For a struct with three members, you would have:

+
bool operator<(const X& a, const X& b) {
+    if(a.m1 != b.m1)
+        return (a.m1 < b.m1);
+    if(a.m2 != b.m2)
+        return (a.m2 < b.m2);
+    return (a.mType < b.mType);
+}
+

And a somewhat messy implementation if you wanted to use only operator <.

+

Note also that you can use the above technique to implement operator < for spatial types such as vectors, points, and rectangles. You would simply treat the members of the struct as an array of values and ignore the fact that they have spatial meaning. All operator < cares about is that things order consistently.

+
bool operator<(const Point2D& a, const Point2D& b) {
+    return (a.x == b.x) ? (a.y < b.y) : (a.x < b.x);
+}
+

Algo.3 +How do I sort something in reverse order?

+

Normally sorting puts the lowest value items first in the sorted range. You can change this by simply reversing the comparison. For example:
+

+
sort(intVector.begin(), intVector.end(), greater<int>());
+

It's important that you use operator > instead of >=. The comparison function must return false for every case where values are equal.

+

Algo.4 +I'm getting errors about min and max while compiling.

+

You need to define NOMINMAX under VC++ when this occurs, as it otherwise defines min and max macros that interfere. There may be equivalent issues with other compilers. Also, VC++ has a specific <minmax.h> header file which defines min and max macros but which doesn't pay attention to NOMINMAX and so in that case there is nothing to do but not include that file or to undefine min and max. minmax.h is not a standard file and its min and max macros are not standard C or C++ macros or functions.

+

Algo.5 +Why don't algorithms take a container as an argument instead of iterators? A container would be more convenient.

+

Having algorithms that use containers instead of algorithms would reduce reduce functionality with no increase in performance. This is because the use of iterators allows for the application of algorithms to sub-ranges of containers and allows for the application of algorithms to containers aren't formal C++ objects, such as C-style arrays.

+

Providing additional algorithms that use containers would introduce redundancy with respect to the existing algorithms that use iterators.

+

Algo.6 +Given a container of pointers, how do I find an element by value (instead of by pointer)?

+

Functions such as find_if help you find a T element in a container of Ts. But if you have a container of pointers such as vector<Widget*>, these functions will enable you to find an element that matches a given Widget* pointer, but they don't let you find an element that matches a given Widget object.

+

You can write your own iterating 'for' loop and compare values, or you can use a generic function object to do the work if this is a common task:

+
template<typename T>
+struct dereferenced_equal
+{
+    const T& mValue;
+
+    dereferenced_equal(const T& value) : mValue(value) { }     
+    bool operator==(const T* pValue) const { return *pValue == mValue; }
+};
+
+...
+
+find_if(container.begin(), container.end(), dereferenced_equal<Widget>(someWidget));
+ +

Algo.7 +When do stored objects need to support operator < vs. when do they need to support operator ==?

+

Any object which is sorted needs to have operator < defined for it, implicitly via operator < or explicitly via a user-supplied Compare function. Sets and map containers require operator <, while sort, binary search, and min/max algorithms require operator <.

+

Any object which is compared for equality needs to have operator == defined for it, implicitly via operator == or explicitly via a user-supplied BinaryPredicate function. Hash containers required operator ==, while many of the algorithms other than those mentioned above for operator < require operator ==.

+

Some algorithms and containers require neither < nor ==. Interestingly, no algorithm or container requires both < and ==.

+

Algo.8 How do I sort via pointers or array indexes instead of objects directly?

+

Pointers

+
vector<TestObject>  toArray;
+vector<TestObject*> topArray;
+
+for(eastl_size_t i = 0; i < 32; i++)
+   toArray.push_back(TestObject(rng.RandLimit(20)));
+for(eastl_size_t i = 0; i < 32; i++) // This needs to be a second loop because the addresses might change in the first loop due to container resizing.
+   topArray.push_back(&toArray[i]);
+
+struct TestObjectPtrCompare
+{
+    bool operator()(TestObject* a, TestObject* b)
+        { return a->mX < a->mX; }
+};
+
+quick_sort(topArray.begin(), topArray.end(), TestObjectPtrCompare());
+

Array indexes

+
vector<TestObject>   toArray;
+vector<eastl_size_t> toiArray;
+
+for(eastl_size_t i = 0; i < 32; i++)
+{
+    toArray.push_back(TestObject(rng.RandLimit(20)));
+    toiArray.push_back(i);
+}
+
+struct TestObjectIndexCompare
+{
+    vector* mpArray;
+
+    TestObjectIndexCompare(vector<TestObject>* pArray) : mpArray(pArray) { }
+    TestObjectIndexCompare(const TestObjectIndexCompare& x) : mpArray(x.mpArray){ }
+    TestObjectIndexCompare& operator=(const TestObjectIndexCompare& x) { mpArray = x.mpArray; return *this; }
+
+    bool operator()(eastl_size_t a, eastl_size_t b)
+       { return (*mpArray)[a] < (*mpArray)[b]; }
+};
+
+quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray));
+
+

Array indexes (simpler version using toArray as a global variable)

+
vector<TestObject>   toArray;
+vector<eastl_size_t> toiArray;
+
+for(eastl_size_t i = 0; i < 32; i++)
+{
+    toArray.push_back(TestObject(rng.RandLimit(20)));
+    toiArray.push_back(i);
+}
+
+struct TestObjectIndexCompare
+{
+    bool operator()(eastl_size_t a, eastl_size_t b)
+       { return toArray[a] < toArray[b]; }
+};
+
+quick_sort(toiArray.begin(), toiArray.end(), TestObjectIndexCompare(&toArray));
+

Iterators

+

Iter.1 +What's the difference between iterator, const iterator, and const_iterator?

+

An iterator can be modified and item it points to can be modified.
+A const iterator cannot be modified, but the items it points to can be modified.
+A const_iterator can be modified, but the items it points to cannot be modified.
+A const const_iterator cannot be modified, nor can the items it points to.

+

This situation is much like with char pointers:

+
+ + + + + + + + + + + + + + + + + + + + + + + +
Iterator typePointer equivalent
iteratorchar*
const iteratorchar* const
const_iteratorconst char*
const const_iteratorconst char* const
+
+

Iter.2 How do I tell from an iterator what type of thing it is iterating?

+

Use the value_type typedef from iterator_traits, as in this example

+
template <typename Iterator>
+void DoSomething(Iterator first, Iterator last)
+{
+    typedef typename iterator_traits<Iterator>::value_type;
+
+    // use value_type
+}
+

Iter.3 +How do I iterate a container while (selectively) removing items from it?

+

All EASTL containers have an erase function which takes an iterator as an +argument and returns an iterator to the next item. Thus, you can erase items from a container +while iterating it like so:

+
set<int> intSet;
+set<int>::iterator i = intSet.begin();
+
+while(i != intSet.end())
+{
+    if(*i & 1) // Erase all odd integers from the container.
+        i = intSet.erase(i);
+    else
+        ++i;
+}
+

Iter.4 +What is an insert_iterator?

+

An insert_iterator is a utility class which is like an iterator except that when you assign a value to it, the insert_iterator inserts the value into the container (via insert()) and increments the iterator. Similarly, there are front_insert_iterator and back_insert_iterator, which are similar to insert_iterator except that assigning a value to them causes then to call push_front and push_back, respectively, on the container. These utilities may seem a slightly abstract, but they have uses in generic programming.
+

+
+End of document
+
+
+
+
+ + diff --git a/doc/html/EASTL Glossary.html b/doc/html/EASTL Glossary.html new file mode 100644 index 0000000..bd4b865 --- /dev/null +++ b/doc/html/EASTL Glossary.html @@ -0,0 +1,490 @@ + + + + EASTL Glossary + + + + + + +

EASTL Glossary

+

This document provides definitions to various terms related to EASTL. Items that are capitalized are items that are +used as template parameters.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
adapterAn adapter is something that encapsulates a component to provide another interface, such as a C++ class which makes +a stack out of a list.
algorithm
Algorithms are standalone functions which manipulate data which usually but not +necessarily comes from a container. Some algorithms change the data while others don't. Examples are reverse, sort, +find, and remove.
associative containerAn associative container is a variable-sized container that supports efficient retrieval of elements (values) based +on keys. It supports insertion and removal of elements, but differs from a sequence in that it does not provide a +mechanism for inserting an element at a specific position. Associative containers include map, multimap, set, multiset, +hash_map, hash_multimap, hash_set, hash_multiset.
arrayAn array is a C++ container which directly implements a C-style fixed array but which adds STL container semantics +to it.
basic_stringA templated string class which is usually used to store char or wchar_t strings.
beginThe function used by all conventional containers to return the first item in the container.
BidirectionalIteratorAn input iterator which is like ForwardIterator except it can be read in a backward direction as well.
BinaryOperation A function which takes two arguments and returns a value (which will usually be assigned to a third object).
BinaryPredicateA function which takes two arguments and returns true if some criteria is met (e.g. they are equal).
binder1st, binder2ndThese are function objects which convert one function object into another.  In particular, they implement a +binary function whereby you can specify one of the arguments.This is a somewhat abstract concept but has its uses.
bit vectorA specialized container that acts like vector<bool> but is implemented via one bit per entry. STL +vector<bool> is usually implemented as a bit vector but EASTL avoids this in favor of a specific bit vector +container.
bitsetAn extensible yet efficient implementation of bit flags. Not strictly a conventional STL container and not the same +thing as vector<bool> or a bit_vector, both of which are formal iterate-able containers.
capacityRefers to the amount of total storage available in an array-based container such as vector, string, and array. +Capacity is always >= container size and is > size in order to provide extra space for a container to grow +into.
const_iteratorAn iterator whose iterated items are cannot be modified. A const_iterator is akin to a const pointer such as 'const +char*'.
containerA container is an object that stores other objects (its elements), and that has methods for accessing its elements. +In particular, every type that is a model of container has an associated iterator type that can be used to iterate +through the container's elements.
copy constructorA constructor for a type which takes another object of that type as its argument. For a hypothetical Widget class, +the copy constructor is of the form Widget(const Widget& src);
CompareA function which takes two arguments and returns the lesser of the two.
dequeThe name deque is pronounced "deck" and stands for "double-ended queue."
+
+A deque is very much like a vector: like vector, it is a sequence that supports random access to elements, constant +time insertion and removal of elements at the end of the sequence, and linear time insertion and removal of elements in +the middle.
+
+The main way in which deque differs from vector is that deque also supports constant time insertion and removal of +elements at the beginning of the sequence. Additionally, deque does not have any member functions analogous to vector's +capacity() and reserve(), and does not provide the guarantees on iterator validity that are associated with those +member functions.
difference_typeThe typedef'd type used by all conventional containers and iterators to define the distance between two iterators. +It is usually the same thing as the C/C++ ptrdiff_t data type.
emptyThe function used by all conventional containers to tell if a container has a size of zero. In many cases empty is +more efficient than checking for size() == 0.
elementAn element refers to a member of a container.
endThe function used by all conventional containers to return one-past the last item in the container.
equal_rangeequal_range is a version of binary search: it attempts to find the element value in an ordered range [first, last). +The value returned by equal_range is essentially a combination of the values returned by lower_bound and upper_bound: +it returns a pair of iterators i and j such that i is the first position where value could be inserted without +violating the ordering and j is the last position where value could be inserted without violating the ordering. It +follows that every element in the range [i, j) is equivalent to value, and that [i, j) is the largest subrange of +[first, last) that has this property.
explicit instantiationExplicit instantiation lets you create an instantiation of a templated class or function without actually using it +in your code. Since this is useful when you are creating library files that use templates for distribution, +uninstantiated template definitions are not put into object files. An example of the syntax for explicit +instantiation is:
+    template class vector<char>;
+    template void min<int>(int, int);
+    template void min(int, int);
ForwardIteratorAn input iterator which is like InputIterator except it can be reset back to the beginning.
FunctionA function which takes one argument and applies some operation to the target.
function object, functorA function object or functor is a class that has the function-call operator (operator()) +defined.
GeneratorA function which takes no arguments and returns a value (which will usually be assigned to an object).
hash_map, hash_multimap, hash_set, hash_multisetThe hash containers are implementations of map, multimap, set, and multiset via a hashtable instead of via a tree. +Searches are O(1) (fast) but the container is not sorted.
heapA heap is a data structure which is not necessarily sorted but is organized such that the highest priority item is +at the top. A heap is synonymous with a priority queue and has numerous applications in computer science.
InputIteratorAn input iterator (iterator you read from) which allows reading each element only once and only in a forward +direction.
intrusive_list, intrusive_hash_map, etc.Intrusive containers are containers which don't allocate memory but instead use their contained object to manage +the container's memory. While list allocates nodes (with mpPrev/mpNext pointers) that contain the list items, +intrusive_list doesn't allocate nodes but instead the container items have the mpPrev/mpNext pointers.
intrusive_ptrintrusive_ptr is a smart pointer which doesn't allocate memory but instead uses the contained object to manage +lifetime via addref and release functions.
iteratorAn iterator is the fundamental entity of reading and enumerating values in a container. Much like a pointer +can be used to walk through a character array, an iterator is used to walk through a linked list.
iterator categoryAn iterator category defines the functionality the iterator provides. The conventional iterator categories are +InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator, and OutputIterator. See the definitions of +each of these for more information.Iterator category is synonymous with iterator_tag.
iterator_tagSee iterator category.
key_type, KeyA Key or key_type is the identifier used by associative (a.k.a. dictionary) containers (e.g. map, hash_map) to +identify the type used to index the mapped_type. If you have a dictionary of strings that you access by an integer id, +the ids are the keys and the strings are the mapped types.
lexicographical compareA lexicographical compare is a comparison of two containers that compares them element by element, much like the C +strcmp function compares two strings.
linked_ptrA linked_ptr is a shared smart pointer which implements object lifetime via a linked list of all linked_ptrs that +are referencing the object. linked_ptr, like intrusive_ptr, is a non-memory-allocating alternative to shared_ptr.
listA list is a doubly linked list. It is a sequence that supports both forward and backward traversal, and (amortized) +constant time insertion and removal of elements at the beginning or the end, or in the middle. Lists have the important +property that insertion and splicing do not invalidate iterators to list elements, and that even removal invalidates +only the iterators that point to the elements that are removed. The ordering of iterators may be changed (that is, +list<T>::iterator might have a different predecessor or successor after a list operation than it did before), but +the iterators themselves will not be invalidated or made to point to different elements unless that invalidation or +mutation is explicit.
lower_boundlower_bound is a version of binary search: it attempts to find the element value in an ordered range [first, last). +Specifically, it returns the first position where value could be inserted without violating the ordering.
mapMap is a sorted associative container that associates objects of type Key with objects of type T. Map is a pair +associative container, meaning that its value type is pair<const Key, T>. It is also a unique associative +container, meaning that no two elements have the same key. It is implemented with a tree structure.
mapped_typeA mapped_type is a typedef used by associative containers to identify the container object which is accessed by a +key. If you have a dictionary of strings that you access by an integer id, the ids are the keys and the strings are the +mapped types.
member templateA member template is a templated function of a templated class. Thus with a member template function there are two +levels of templating -- the class and the function.
multimap, Multimap is a sorted associative container that associates objects of type Key with objects of type T. +multimap is a pair associative container, meaning that its value type is pair<const Key, T>. It is also a +multiple associative container, meaning that there is no limit on the number of elements with the same key.It is +implemented with a tree structure.
multisetMultiset is a sorted associative container that stores objects of type Key. Its value type, as well as its key +type, is Key. It is also a multiple associative container, meaning that two or more elements may be identical. It +is implemented with a tree structure.
nodeA node is a little holder class used by many containers to hold the contained items. A linked-list, for example, +defines a node which has three members: mpPrev, mpNext, and T (the contained object).
nposnpos is used by the string class to identify a non-existent index. Some string functions return npos to indicate +that the function failed.
rel_opsrel_ops refers to "relational operators" and is a set of templated functions which provide operator!= for classes +that  have only operator== and provide operator > for classes that have only operator <, etc. Unfortunately, +rel_ops have a habit of polluting the global operator space and creating conflicts. They must be used with +discretion.
reverse_iteratorA reverse_iterator is an iterator which wraps a bidirectional or random access iterator and allows the iterator to +be read in reverse direction. The difference between using reverse_iterators and just decrementing regular iterators is +that reverse_iterators use operator++ to move backwards and thus work in any algorithm that calls ++ to move through a +container.
OutputIteratorAn output iterator (iterator you write to) which allows writing each element only once in only in a forward +direction.
PODPOD means Plain Old Data. It refers to C++ classes which act like built-in types and C structs. These are useful to +distinguish because some algorithms can be made more efficient when they can detect that they are working with PODs +instead of regular classes. 
PredicateA function which takes one argument returns true if the argument meets some criteria.
priority_queueA priority_queue is an adapter container which implements a heap via a random access container such as vector or +deque.
queueA queue is an adapter container which implements a FIFO (first-in, first-out) container with which you can add +items to the back and get items from the front.
RandomAccessIteratorAn input iterator which can be addressed like an array. It is a superset of all other input iterators.
red-black treeA red-black tree is a binary tree which has the property of being always balanced. The colors red and black are +somewhat arbitrarily named monikers for nodes used to measure the balance of the tree. Red-black trees are considered +the best all-around data structure for sorted containers.
scalarA scalar is a data type which is implemented via a numerical value. In C++ this means integers, floating point +values, enumerations, and pointers. 
scoped_ptrA scoped_ptr is a smart pointer which is the same as C++ auto_ptr except that it cannot be copied.
setSet is a sorted associative container that stores objects of type Key. Its value type, as well as its key type, is +Key. It is also a unique associative container, meaning that no two elements are the same.It is implemented with a tree +structure.
sequenceA sequence is a variable-sized container whose elements are arranged in a strict linear (though not necessarily +contiguous) order. It supports insertion and removal of elements. Sequence containers include vector, deque, array, +list, slist.
sizeAll conventional containers have a size member function which returns the count of elements in the container. The +efficiency of the size function differs between containers.
size_typeThe type that a container uses to define its size and counts. This is similar to the C/C++ size_t type but may be +specialized for the container. It defaults to size_t, but it is possible to force it to be 4 bytes for 64 bit machines by defining EASTL_SIZE_T_32BIT.
skip listA skip-list is a type of container which is an alternative to a binary tree for finding data.
shared_ptrA shared_ptr is a smart pointer which allows multiple references (via multiple shared_ptrs) to the same object. +When the last shared_ptr goes away, the pointer is freed. shared_ptr is implemented via a shared count between all +instances.
slistAn slist is like a list but is singly-linked instead of doubly-linked. It can only be iterated in a +forward-direction.
smart pointerSmart pointer is a term that identifies a family of utility classes which store pointers and free them when the +class instance goes out of scope. Examples of smart pointers are shared_ptr, linked_ptr, intrusive_ptr, and +scoped_ptr.
spliceSplicing refers to the moving of a subsequence of one Sequence into another Sequence.
stackA stack is a adapter container which implements LIFO (last-in, first, out) access via another container such as a +list or deque.
STLStandard Template Library. 
StrictWeakOrderingA BinaryPredicate that compares two objects, returning true if the first precedes the second. Like Compare but has +additional requirements. Used for sorting routines.
+
+This predicate must satisfy the standard mathematical definition of a strict weak ordering. A StrictWeakOrdering has to +behave the way that "less than" behaves: if a is less than b then b is not less than a, if a is less than b and b is +less than c then a is less than c, and so on.
stringSee basic_string.
TT is the template parameter name used by most containers to identify the contained element type. 
template parameterA template parameter is the templated type used to define a template function or class. In the declaration +'template <typename T> class vector{ },'  T is a template parameter.
template specializationA template specialization is a custom version of a template which overrides the default version and provides +alternative functionality, often for the purpose of providing improved or specialized functionality.
treapA tree-like structure implemented via a heap. This is an alternative to a binary tree (e.g. red-black tree), +skip-list, and sorted array as a mechanism for a fast-access sorted container.
type traitsType traits are properties of types. If you have a templated type T and you want to know if it is a pointer, you +would use the is_pointer type trait. If you want to know if the type is a POD, you would use the is_pod type trait. +Type traits are very useful for allowing the implementation of optimized generic algorithms and for asserting that +types have properties expected by the function or class contract. For example, you can use type_traits to tell if a +type can be copied via memcpy instead of a slower element-by-element copy.
typenameTypename is a C++ keyword used in templated function implementations which identifies to the compiler that the +following expression is a type and not a value. It is used extensively in EASTL, particularly in the algorithms.
UnaryOperationA function which takes one argument and returns a value (which will usually be assigned to second object).
upper_boundupper_bound is a version of binary search: it attempts to find the element value in an ordered range [first, last). +Specifically, it returns the last position where value could be inserted without violating the ordering.
value_type, ValueA value_type is a typedef used by all containers to identify the elements they contain. In most cases value_type is +simply the same thing as the user-supplied T template parameter. The primary exception is the associative containers +whereby value_type is the pair of key_type and mapped_type.
vectorA vector is a Sequence that supports random access to elements, constant time insertion and removal of elements at +the end, and linear time insertion and removal of elements at the beginning or in the middle. The number of elements in +a vector may vary dynamically; memory management is automatic. Vector is the simplest of the container classes, and in +many cases the most efficient.
vector_map, vector_multimap, vector_set, vector_multisetThese are containers that implement the functionality of map, multimap, set, and multiset via a vector or deque +instead of a tree. They use less memory and find items faster, but are slower to modify and modification invalidates +iterators.
weak_ptrA weak_ptr is an adjunct to shared_ptr which doesn't increment the reference on the contained object but can safely +tell you if the object still exists and access it if so. It has uses in preventing circular references in +shared_ptrs.
+
+ +
+End of document
+
+
+
+
+
+
+
+
+ + diff --git a/doc/html/EASTL Gotchas.html b/doc/html/EASTL Gotchas.html new file mode 100644 index 0000000..daa8f7a --- /dev/null +++ b/doc/html/EASTL Gotchas.html @@ -0,0 +1,175 @@ + + + + EASTL Gotchas + + + + + + + +

EASTL Gotchas

+

There are some cases where the EASTL design results in "gotchas" or behavior that isn't necessarily what the new user + would expect. These are all situations in which this behavior may be undesirable. One might ask, "Why not change EASTL + to make these gotchas go away?" The answer is that in each case making the gotchas go away would either be impossible + or would compromise the functionality of the library.

+

Summary

+

The descriptions here are intentionally terse; this is to make them easier to visually scan.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1map::operator[] can create elements.
2char* converts to string silently.
3char* is compared by ptr and not by contents.
4Iterators can be invalidated by container mutations.
5Vector resizing may cause ctor/dtor cascades.
6Vector and string insert/push_back/resize can reallocate.
7Deriving from containers may not work.
8set::iterator is const_iterator.
9Inserting elements means copying by value.
10Containers of pointers can leak if you aren't careful.
11Containers of auto_ptrs can crash.
12Remove algorithms don't actually remove elements.
13list::size() is O(n).
14vector and deque::size() may incur integer division.
15Be careful making custom Compare functions.
16Comparisons involving floating point are dangerous.
17Writing beyond string::size and vector::size is dangerous.
18Container operator=() doesn't copy allocators.
+

Detail

+

1 +map::operator[] can create elements.

+

By design, map operator[] creates a value for you if it isn't already present. The reason for this is that the alternative behavior would be to throw an exception, and such behavior isn't desirable. The resolution is to simply use the map::find function instead of operator[].

+

2 +char* converts to string silently.

+

The string class has a non-explicit constructor that takes char* as an argument. Thus if you pass char* to a function that takes a string object, a temporary string will be created. In some cases this is undesirable behavior but the user may not notice it right away, as the compiler gives no warnings. The reason that the string constructor from char* is not declared explicit is that doing so would prevent the user from expressions such as: string s = "hello". In this example, no temporary string object is created, but the syntax is not possible if the char* constructor is declared explicit. Thus a decision to make the string char* constructor explicit involves tradeoffs.

+

There is an EASTL configuration option called EASTL_STRING_EXPLICIT which makes the string char* ctor explicit and avoids the behaviour described above.

+

3 +char* is compared by ptr and not by contents.

+

If you have a set of strings declared as set<char*>, the find function will compare via the pointer value and not the string contents. The workaround is to make a set of string objects or, better, to supply a custom string comparison function to the set. The workaround is not to declare a global operator< for type char*, as that could cause other systems to break.

+

4 Iterators can be invalidated by container mutations

+

With some containers, modifications of them may invalidate iterators into them. With other containers, modifications of them only an iterator if the modification involves the element that iterator refers to. Containers in the former category include vector, deque, basic_string (string), vector_map, vector_multimap, vector_set, and vector_multiset. Containers in the latter category include list, slist, map, multimap, multiset, all hash containers, and all intrusive containers.

+

5 Vector resizing may cause ctor/dtor cascades.

+

If elements are inserted into a vector in middle of the sequence, the elements from the insertion point to the end will be copied upward. This will necessarily cause a series of element constructions and destructions as the elements are copied upward. Similarly, if an element is appended to a vector but the vector capacity is exhausted and needs to be reallocated, the entire vector will undergo a construction and destruction pass as the values are copied to the new storage. This issue exists for deque as well, though to a lesser degree. For vector, the resolution is to reserve enough space in your vector to prevent such reallocation. For deque the resolution is to set its subarray size to enough to prevent such reallocation. Another solution that can often be used is to take advantage of the has_trivial_relocate type trait, which can cause such moves to happen via memcpy instead of via ctor/dtor calls. If your class can be safely memcpy'd, you can use EASTL_DECLARE_TRIVIAL_RELOCATE to tell the compiler it can be memcpy'd. Note that built-in scalars (e.g. int) already are automatically memcpy'd by EASTL.

+

6 +Vector and string insert/push_back/resize can reallocate.

+

If you create an empty vector and use push_back to insert 100 elements, the vector will reallocate itself at least three or four times during the operation. This can be an undesirable thing. The best thing to do if possible is to reserve the size you will need up front in the vector constructor or before you add any elements.

+

7 +Deriving from containers may not work.

+

EASTL containers are not designed with the guarantee that they can be arbitrarily subclassed. This is by design and is done for performance reasons, as such guarantees would likely involve making containers use virtual functions. However, some types of subclassing can be successful and EASTL does such subclassing internally to its advantage. The primary problem with subclassing results when a parent class function calls a function that the user wants to override. The parent class cannot see the overridden function and silent unpredictable behavior will likely occur. If your derived container acts strictly as a wrapper for the container then you will likely be able to successfully subclass it.

+

8 +set::iterator is const_iterator.

+

The reason this is so is that a set is an ordered container and changing the value referred to by an iterator could make the set be out of order. Thus, set and multiset iterators are always const_iterators. If you need to change the value and are sure the change will not alter the container order, use const_cast or declare mutable member variables for your contained object. This resolution is the one blessed by the C++ standardization committee. This issue is addressed in more detail in the EASTL FAQ.

+

9 +Inserting elements means copying by value.

+

When you insert an element into a (non-intrusive) container, the container makes a copy of the element. There is no provision to take over ownership of an object from the user. The exception to this is of course when you use a container of pointers instead of a container of values. See the entry below regarding containers of pointers. Intrusive containers (e.g. intrusive_list) do in fact take over the user-provided value, and thus provide another advantage over regular containers in addition to avoiding memory allocation.

+

10 + Containers of pointers can leak if you aren't careful.

+

Containers of points don't know or care about the possibility that the pointer may have been allocated and need to be freed. Thus if you erase such elements from a container they are not freed. The resolution is to manually free the pointers when removing them or to instead use a container of smart pointers (shared smart pointers, in particular). This issue is addressed in more detail in the EASTL FAQ and the auto_ptr-related entry below.

+

11 +Containers of auto_ptrs can crash

+

We suggested above that the user can use a container of smart pointers to automatically manage contained pointers. However, you don't want to use auto_ptr, as auto_ptrs cannot be safely assigned to each other; doing so results in a stale pointer and most likely a crash.

+

12 +Remove algorithms don't actually remove elements.

+

Algorithms such as remove, remove_if, remove_heap, and unique do not erase elements from the sequences they work on. Instead, they return an iterator to the new end of the sequence and the user must call erase with that iterator in order to actually remove the elements from the container. This behavior exists because algorithms work on sequences via iterators and don't know how to work with containers. Only the container can know how to best erase its own elements. In each case, the documentation for the algorithm reminds the user of this behavior. Similarly, the copy algorithm copies elements from one sequence to another and doesn't modify the size of the destination sequence. So the destination must hold at least as many items as the source, and if it holds more items, you may want to erase the items at the end after the copy.

+

13 +list::size() is O(n).

+

By this we mean that calling size() on a list will iterate the list and add the size as it goes. Thus, getting the size of a list is not a fast operation, as it requires traversing the list and counting the nodes. We could make list::size() be fast by having a member mSize variable. There are reasons for having such functionality and reasons for not having such functionality. We currently choose to not have a member mSize variable as it would add four bytes to the class, add processing to functions such as insert and erase, and would only serve to improve the size function, but no other function. The alternative argument is that the C++ standard states that std::list should be an O(1) operation (i.e. have a member size variable), most C++ standard library list implementations do so, the size is but an integer which is quick to update, and many users expect to have a fast size function. All of this applies to slist and intrusive_list as well.

+

Note that EASTL's config.h file has an option in it to cause list and slist to cache their size with an mSize variable and thus make size() O(1). This option is disabled by default.

+

14 + vector and deque::size() may incur integer division.

+

Some containers (vector and deque in particular) calculate their size by pointer subtraction. For example, the implementation of vector::size() is 'return mpEnd - mpBegin'. This looks like a harmless subtraction, but if the size of the contained object is not an even power of two then the compiler will likely need to do an integer division to calculate the value of the subtracted pointers. One might suggest that vector use mpBegin and mnSize as member variables instead of mpBegin and mpEnd, but that would incur costs in other vector operations. The suggested workaround is to iterate a vector instead of using a for loop and operator[] and for those cases where you do use a for loop and operator[], get the size once at the beginning of the loop instead of repeatedly during the condition test.

+

15 + Be careful making custom Compare functions. +

+

A Compare function compares two values and returns true if the first is less than the second. This is easy to understand for integers and strings, but harder to get right for more complex structures. Many a time have people decided to come up with a fancy mechanism for comparing values and made mistakes. The FAQ has a couple entries related to this. See http://blogs.msdn.com/oldnewthing/archive/2003/10/23/55408.aspx for a story about how this can go wrong by being overly clever.

+

16 + Comparisons involving floating point are dangerous.

+

Floating point comparisons between two values that are very nearly equal can result in inconsistent results. Similarly, floating point comparisons between NaN values will always generate inconsistent results, as NaNs by definition always compare as non-equal. You thus need to be careful when using comparison functions that work with floating point values. Conversions to integral values may help the problem, but not necessarily.

+

17 Writing beyond string::size and vector::size is dangerous.

+

A trick that often comes to mind when working with strings is to set the string capacity to some maximum value, strcpy data into it, and then resize the string when done. This can be done with EASTL, but only if you resize the string to the maximum value and not reserve the string to the maximum value. The reason is that when you resize a string from size (n) to size (n + count), the count characters are zeroed and overwrite the characters that you strcpyd.

+

The following code is broken:

+

string mDataDir;
+
+ mDataDir.reserve(kMaxPathLength);
+ strcpy(&mDataDir[0], "blah/blah/blah");
+mDataDir.resize(strlen(&mDataDir[0])); // Overwrites your blah/... with 00000...

+

This following code is OK:

+

string mDataDir;
+
+ mDataDir.resize(kMaxPathLength);
+ strcpy(&mDataDir[0], "blah/blah/blah");
+mDataDir.resize(strlen(&mDataDir[0]));

+

18 Container operator=() doesn't copy allocators. +

+

EASTL container assignment (e.g. vector::operator=(const vector&)) doesn't copy the allocator. There are good and bad reasons for doing this, but that's how it acts. So you need to beware that you need to assign the allocator separately or make a container subclass which overrides opeator=() and does this.

+
+
+End of document
+
+
+
+
+ + diff --git a/doc/html/EASTL Introduction.html b/doc/html/EASTL Introduction.html new file mode 100644 index 0000000..0e9b23c --- /dev/null +++ b/doc/html/EASTL Introduction.html @@ -0,0 +1,47 @@ + + + + EASTL Introduction + + + + + + +

EASTL Introduction

+

EASTL stands for Electronic Arts Standard Template Library. It is a C++ template library of containers, algorithms, and + iterators useful for runtime and tool development across multiple platforms. It is a fairly extensive and robust + implementation of such a library and has an emphasis on high performance above all other considerations.

+

Intended Audience

+

This is a short document intended to provide a basic introduction to EASTL for + those new to the concept of EASTL or STL. If you are familiar with the C++ STL + or have worked with other templated container/algorithm libraries, you probably + don't need to read this. If you have no familiarity with C++ templates at all, + then you probably will need more than this document to get you up to speed. In + this case you need to understand that templates, when used properly, are powerful + vehicles for the ease of creation of optimized C++ code. A description of C++ + templates is outside the scope of this documentation, but there is plenty of such + documentation on the Internet. See the EASTL FAQ.html + document for links to information related to learning templates and STL.

+

EASTL Modules

+

EASTL consists primarily of containers, algorithms, and iterators. An example of a container is a linked list, while an + example of an algorithm is a sort function; iterators are the entities of traversal for containers and algorithms. + EASTL containers a fairly large number of containers and algorithms, each of which is a very clean, efficient, and + unit-tested implementation. We can say with some confidence that you are not likely to find better implementations of + these (commercial or otherwise), as these are the result of years of wisdom and diligent work. For a detailed list of + EASTL modules, see EASTL Modules.html.

+

EASTL Suitability

+

What uses are EASTL suitable for? Essentially any situation in tools and shipping applications where the functionality + of EASTL is useful. Modern compilers are capable of producing good code with templates and many people are using them + in both current generation and future generation applications on multiple platforms from embedded systems to servers + and mainframes.

+
+End of document
+
+
+
+
+
+
+ + diff --git a/doc/html/EASTL Maintenance.html b/doc/html/EASTL Maintenance.html new file mode 100644 index 0000000..aaca955 --- /dev/null +++ b/doc/html/EASTL Maintenance.html @@ -0,0 +1,292 @@ + + + + EASTL Maintenance + + + + + + + +

EASTL Maintenance

+

Introduction

+

The purpose of this document is to provide some necessary background for anybody who might do work on EASTL. Writing + generic templated systems like EASTL can be surprisingly tricky. There are numerous details of the C++ language that + you need to understand which don't usually come into play during the day-to-day C++ coding that many people do. It is + easy to make a change to some function that seems proper and works for your test case but either violates the design + expectations or simply breaks under other circumstances.
+
+ It may be useful to start with an example. Here we provide an implementation of the count algorithm which is seems +simple enough. Except it is wrong and while it will compile in some cases it won't compile in others:

+
template <class InputIterator, class T>
+int count(InputIterator first, InputIterator last, const T& value)
+{
+     int result = 0;
+ 
+     for(; first < last; ++first){
+         if(*first == value)
+             ++result;
+     }
+ 
+     return result;
+ } 
+

The problem is with the comparison 'first < last'. The count algorithm takes an InputIterator and operator< is +not guaranteed to exist for any given InputIterator (and indeed while operator< exists for vector::iterator, it +doesn't exist for list::iterator). The comparison in the above algorithm must instead be implemented as 'first != +last'. If we were working with a RandomAccessIterator then 'first < last' would be valid.

+

In the following sections we cover various topics of interest regarding the development and maintentance of EASTL. + Unfortunately, this document can't cover every aspect of EASTL maintenance issues, but at least it should give you a +sense of the kinds of issues.

+ +

C++ Language Standard

+

First and foremost, you need to be familiar with the C++ standard. In particular, the sections of the standard related + to containers, algorithms, and iterators are of prime significance. We'll talk about some of this in more detail below. + Similarly, a strong understanding of the basic data types is required. What is the difference between ptrdiff_t and +intptr_t; unsigned int and size_t; char and signed char?

+

In addition to the C++ language standard, you'll want to be familiar with the C++ Defect Report. This is a continuously + updated document which lists flaws in the original C++ language specification and the current thinking as the +resolutions of those flaws. You will notice various references to the Defect Report in EASTL source code.

+

Additionally, you will want to be familiar with the C++ Technical Report 1 (as of this writing there is only one). This + document is the evolving addendum to the C++ standard based on both the Defect Report and based on desired additions to +the C++ language and standard library.

+

Additionally, you will probably want to have some familiarity with Boost. It also helps to keep an eye on + comp.std.c++ Usenet discussions. However, watch out for what people say on Usenet. They tend to defend GCC, Unix, std + STL, and C++ to a sometimes unreasonable degree. Many discussions ignore performance implications and +concentrate only on correctness and sometimes academic correctness above usability.

+

Language Use

+

Macros are (almost) not allowed in EASTL. A prime directive of EASTL is to be easier to read by users and most of + the time macros are an impedence to this. So we avoid macros at all costs, even if it ends up making our development + and maintenance more difficult. That being said, you will notice that the EASTL config.h file uses macros to control + various options. This is an exception to the rule; when we talk about not using macros, we mean with the EASTL +implementation itself.

+

EASTL assumes a compliant and intelligent C++ compiler, and thus all language facilities are usable. However, we +nevertheless choose to stay away from some language functionality. The primary language features we avoid are:

+
    +
  • RTTI (run-time-type-identification) (this is deemed too costly)
  • +
  • Template export (few compilers support this)
  • +
  • Exception specifications (most compilers ignore them)
  • +
+

Use of per-platform or per-compiler code should be avoided when possible but where there is a significant advantage to + be gained it can and indeed should be used. An example of this is the GCC __builtin_expect feature, which allows the + user to give the compiler a hint about whether an expression is true or false. This allows for the generation of code +that executes faster due to more intelligent branch prediction.

+

Prime Directives

+

The +implementation of EASTL is guided foremost by the following directives which are listed in order of importance.

+
    +
  1. Efficiency (speed and memory usage)
  2. +
  3. Correctness (doesn't have bugs)
  4. +
  5. Portability (works on all required platforms with minimal specialized code)
  6. +
  7. Readability (code is legible and comments are present and useful)
  8. +
+

Note that unlike commercial STL implementations which must put correctness above all, we put a higher value on + efficiency. As a result, some functionality may have some usage limitation that is not present in other similar systems +but which allows for more efficient operation, especially on the platforms of significance to us.

+

Portability is significant, but not critical. Yes, EASTL must compile and run on all platforms that we will ship games + for. But we don't take that to mean under all compilers that could be conceivably used for such platforms. For example, + Microsoft VC6 can be used to compile Windows programs, but VC6's C++ support is too weak for EASTL and so you simply +cannot use EASTL under VC6.

+

Readability is something that EASTL achieves better than many other templated libraries, particularly Microsoft STL and + STLPort. We make every attempt to make EASTL code clean and sensible. Sometimes our need to provide optimizations + (particularly related to type_traits and iterator types) results in less simple code, but efficiency happens to be our +prime directive and so it overrides all other considerations.

+ +

Coding Conventions

+

Here we provide a list of coding conventions to follow when maintaining or adding to EASTL, starting with the three +language use items from above:

+
    +
  • No RTTI use.
  • +
  • No use of exception specifications (e.g. appending the 'throw' declarator to a function).
  • +
  • No use of exception handling itself except where explicitly required by the implementation (e.g. vector::at).
  • +
  • Exception use needs to savvy to EASTL_EXCEPTIONS_ENABLED.
  • +
  • No use of macros (outside of config.h). Macros make things more difficult for the user.
  • +
  • No use of static or global variables.
  • +
  • No use of global new, delete, malloc, or free. All memory must be user-specifyable via an Allocator parameter +(default-specified or explicitly specified).
  • +
  • Containers use protected member data and functions as opposed to private. This is because doing so allows +subclasses to extend the container without the creation of intermediary functions. Recall from our prime directives above that performance and simplicity overrule all.
  • +
  • No use of multithreading primitives. 
  • +
  • No use of the export keyword.
  • +
  • We don't have a rule about C-style casts vs. C++ static_cast<>, etc. We would always use static_cast except +that debuggers can't evaluate them and so in practice they can get in the way of debugging and tracing. However, if the +cast is one that users don't tend to need to view in a debugger, C++ casts are preferred.
  • +
  • No external library dependencies whatsoever, including standard STL. EASTL is dependent on only EABase and the +C++ compiler. 
  • +
  • All code must be const-correct. This isn't just for readability -- compilation can fail unless const-ness is used +correctly everywhere. 
  • +
  • Algorithms do not refer to containers; they refer only to iterators.
  • +
  • Algorithms in general do not allocate memory. If such a situation arises, there should be a version of the +algorithm which allows the user to provide the allocator.
  • +
  • No inferior implementations. No facility should be added to EASTL unless it is of professional +quality.
  • +
  • The maintainer should emulate the EASTL style of code layout, regardless of the maintainer's personal preferences. +When in Rome, do as the Romans do. EASTL uses 4 spaces for indents, which is how the large majority of code within EA +is written.
  • +
  • No major changes should be done without consulting a peer group.
  • +
+ +

Compiler Issues

+

Historically, templates are the feature of C++ that has given C++ compilers the most fits. We are still working with + compilers that don't completely and properly support templates. Luckily, most compilers are now good enough to handle +what EASTL requires. Nevertheless, there are precautions we must take.

+

It turns out that the biggest problem in writing portable EASTL code is that VC++ allows you to make illegal statements + which are not allowed by other compilers. For example, VC++ will allow you to neglect using the typename keyword in +template references, whereas GCC (especially 3.4+) requires it.

+

In order to feel comfortable that your EASTL code is C++ correct and is portable, you must do at least these two +things:

+
    +
  • Test under at least VS2005, GCC 3.4+, GCC 4.4+, EDG, and clang.
  • +
  • Test all functions that you write, as compilers will often skip the compilation of a template function if it isn't +used.
  • +
+

The two biggest issues to watch out for are 'typename' and a concept called "dependent names". In both cases VC++ will + accept non-conforming syntax whereas most other compilers will not. Whenever you reference a templated type (and not a templated + value) in a template, you need to prefix it by 'typename'. Whenever your class function refers to a base class member (data or + function), you need to refer to it by "this->", "base_type::", or by placing a "using" statement in your class to +declare that you will be referencing the given base class member.

+ +

Iterator Issues

+

The most important thing to understand about iterators is the concept of iterator types and their designated + properties. In particular, we need to understand the difference between InputIterator, ForwardIterator, + BidirectionalIterator, RandomAccessIterator, and OutputIterator. These differences dictate both how we implement our + algorithms and how we implement our optimizations. Please read the C++ standard for a reasonably well-implemented + description of these iterator types.

+

Here's an example from EASTL/algorithm.h which demonstrates how we use iterator types to optimize the reverse algorithm +based on the kind of iterator passed to it:

+
template <class BidirectionalIterator>
+inline void reverse_impl(BidirectionalIterator first, BidirectionalIterator last, bidirectional_iterator_tag)
{ +    for(; (first != last) && (first != --last); ++first) // We are not allowed to use operator <, <=, >, >= with +        iter_swap(first, last);                          // a generic (bidirectional or otherwise) iterator. +}
+ +template <typename RandomAccessIterator> +inline void reverse_impl(RandomAccessIterator first, RandomAccessIterator last, random_access_iterator_tag) +{ +    for(; first < --last; ++first) // With a random access iterator, we can use operator < to more efficiently implement +        iter_swap(first, last);    // this algorithm. A generic iterator doesn't necessarily have an operator < defined. +}

+template <class BidirectionalIterator> +inline void reverse(BidirectionalIterator first, BidirectionalIterator last) +{ +    typedef typename iterator_traits<BidirectionalIterator>::iterator_category IC; +    reverse_impl(first, last, IC()); +}
+ +

Exception Handling

+

You will notice that EASTL uses try/catch in some places (particularly in containers) and uses + the EASTL_EXCEPTIONS_ENABLED define. For starters, any EASTL code that uses try/catch should always be wrapped + within #if EASTL_EXCEPTIONS_ENABLED (note: #if, not #ifdef).

+

This is simple enough, but what you may be wondering is how it is that EASTL decides to use try/catch for some sections + of code and not for others. EASTL follows the C++ standard library conventions with respect to exception handling, and + you will see similar exception handling in standard STL. The code that you need to wrap in try/catch is code that can + throw a C++ exception (not to be confused with CPU exception) and needs to have something unwound (or fixed) as a + result. The important thing is that the container be in a valid state after encountering such exceptions. In general +the kinds of things that require such try/catch are:

+
    +
  • Memory allocation failures (which throw exceptions)
  • +
  • Constructor exceptions
  • +
+

Take a look at the cases in EASTL where try/catch is used and see what it is doing.

+

Type Traits

+

EASTL provides a facility called type_traits which is very similar to the type_traits being proposed by the C++ TR1 + (see above). type_traits are useful because they tell you about properties of types at compile time. This allows you to + do things such as assert that a data type is scalar or that a data type is const. The way we put them to use in EASTL + is to take advantage of them to implement different pathways for functions based on types. For example, we can copy a + contiguous array of scalars much faster via memcpy than we can via a for loop, though we could not safely employ the + for loop for a non-trivial C++ class.

+

As mentioned in the GeneralOptimizations section below, EASTL should take advantage of type_traits information to the +extent possible to achive maximum effiiciency.

+

General +Optimizations

+

One of the primary goals of EASTL is to achieve the highest possible efficiency. In cases where EASTL functionality + overlaps standard C++ STL functionality, standard STL implementations provided by compiler vendors are a benchmark upon + which EASTL strives to beat. Indeed EASTL is more efficient than all other current STL implementations (with some + exception in the case of some Metrowerks STL facilities). Here we list some of the things to look for when considering + optimization of EASTL code These items can be considered general optimization suggestions for any code, but this +particular list applies to EASTL:

+
    +
  • Take advantage of type_traits to the extent possible (e.g. to use memcpy to move data instead of a for loop when +possible).
  • +
  • Take advantage of iterator types to the extent possible.
  • +
  • Take advantage of the compiler's expectation that if statements are expected to evaluate as true and for loop +conditions are expected to evaluate as false.
  • +
  • Make inline-friendly code. This often means avoiding temporaries to the extent possible.
  • +
  • Minimize branching (i.e. minimize 'if' statements). Where branching is used, make it so that 'if' statements +execute as true.
  • +
  • Use EASTL_LIKELY/EASTL_UNLIKELY to give branch hints to the compiler when you are confident it will be +beneficial.
  • +
  • Use restricted pointers (EABase's EA_RESTRICT or various compiler-specific versions of __restrict).
  • +
  • Compare unsigned values to < max instead of comparing signed values to >= 0 && < max.
  • +
  • Employ power of 2 integer math instead of math with any kind of integer.
  • +
  • Use template specialization where possible to implement improved functionality.
  • +
  • Avoid function calls when the call does something trivial. This improves debug build speed (which matters) and +sometimes release build speed as well, though sometimes makes the code intent less clear. A comment next to the code +saying what call it is replacing makes the intent clear without sacrificing performance.
  • +
+

Unit Tests

+

Writing robust templated containers and algorithms is difficult or impossible without a heavy unit test suite in place. + EASTL has a pretty extensive set of unit tests for all containers and algorithms. While the successful automated unit + testing of shipping application programs may be a difficult thing to pull off, unit testing of libraries such as this + is of huge importance and cannot be understated.

+
    +
  • When making a new unit test, start by copying one of the existing unit tests and follow its conventions.
  • +
  • Test containers of both scalars and classes.
  • +
  • Test algorithms on both container iterators (e.g. vector.begin()) and pointer iterators (e.g. int*).
  • +
  • Make sure that algorithm or container member functions which take iterators work with the type of iterator they +claim to (InputIterator, ForwardIterator, BidirectionalIterator, RandomAccessIterator). 
  • +
  • Test for const-correctness. If a user is allowed to modify something that is supposed to be const, silent errors +can go undetected.
  • +
  • Make sure that unit tests cover all functions and all pathways of the tested code. This means that in writing the +unit test you need to look at the source code to understand all the pathways.
  • +
  • Consider using a random number generator (one is provided in the test library) to do 'monkey' testing whereby +unexpected input is given to a module being tested. When doing so, make sure you seed the generator in a way that +problems can be reproduced.
  • +
  • While we avoid macros in EASTL user code, macros to assist in unit tests aren't considered a problem. However, +consider that a number of macros could be replaced by templated functions and thus be easier to work with.
  • +
  • Unit tests don't need to be efficient; feel free to take up all the CPU power and time you need to test a module +sufficiently.
  • +
  • EASTL containers are not thread-safe, by design. Thus there is no need to do multithreading tests as long as you +stay away from the usage of static and global variables.
  • +
  • Unit tests must succeed with no memory leaks and of course no memory corruption. The heap system should be +configured to test for this, and heap validation functions are available to the unit tests while in the middle of +runs.
  • +
+ +

Things to Keep in Mind

+
    +
  • When referring to EASTL functions and types from EASTL code, make sure to preface the type with the EASTL +namespace. If you don't do this you can get collisions due to the compiler not knowing if it should use the EASTL +namespace or the namespace of the templated type for the function or type.
  • +
  • Newly constructed empty containers do no memory allocation. Some STL and other container libraries allocate an +initial node from the class memory allocator. EASTL containers by design never do this. If a container needs an +initial node, that node should be made part of the container itself or be a static empty node object.
  • +
  • Empty containers (new or otherwise) contain no constructed objects, including those that might be in an 'end' node. +Similarly, no user object (e.g. of type T) should be constructed unless required by the design and unless documented in +the cotainer/algorithm contract. 
  • +
  • When creating a new container class, it's best to copy from an existing similar class to the extent possible. This +helps keep the library consistent and resolves subtle problems that can happen in the construction of containers.
  • +
  • Be very careful about tweaking the code. It's easy to think (for example) that a > could be switch to a >= +where instead it is a big deal. Just about every line of code in EASTL has been thought through and has a purpose. Unit +tests may or may not currently test every bit of EASTL, so you can't necessarily rely on them to give you 100% +confidence in changes. If you are not sure about something, contact the original author and he will tell you for +sure.
  • +
  • Algorithm templates always work with iterators and not containers. A given container may of course implement an +optimized form or an algorithm itself.
  • +
  • Make sure everything is heavily unit tested. If somebody finds a bug, fix the bug and make a unit test to make sure +the bug doesn't happen again.
  • +
  • It's easy to get iterator categories confused or forgotten while implementing algorithms and containers.
  • +
  • Watch out for the strictness of GCC 3.4+. There is a bit of syntax — especially related to templates — that other +compilers accept but GCC 3.4+ will not.
  • +
  • Don't forget to update the config.h EASTL_VERSION define before publishing.
  • +
  • The vector and string classes define iterator to be T*. We want to always leave this so — at least in release +builds — as this gives some algorithms an advantage that optimizers cannot get around.
  • +
+
+
+
+
+
+
+ + diff --git a/doc/html/EASTL Modules.html b/doc/html/EASTL Modules.html new file mode 100644 index 0000000..620937e --- /dev/null +++ b/doc/html/EASTL Modules.html @@ -0,0 +1,666 @@ + + + + EASTL Modules + + + + + + + +

EASTL Modules

+

Introduction

+

We provide here a list of all top-level modules present or planned for future presence in EASTL. In some cases (e.g. + algorithm), the module consists of many smaller submodules which are not described in detail here. In those cases you + should consult the source code for those modules or consult the detailed documentation for those modules. This document +is a high level overview and not a detailed document.

+

Module List

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 ModuleDescription
configConfiguration header. Allows for changing some compile-time options.
slist
+fixed_slist
Singly-linked list.
+fixed_slist is a version which is implemented via a fixed block of contiguous memory.
list
+fixed_list
Doubly-linked list.
intrusive_list
+intrusive_slist
List whereby the contained item provides the node implementation.
arrayWrapper for a C-style array which extends it to act like an STL container.
vector
+fixed_vector
Resizable array container.
vector_set
+vector_multiset
Set implemented via a vector instead of a tree. Speed and memory use is improved but resizing is slower.
vector_map
+vector_multimap
Map implemented via a vector instead of a tree. Speed and memory use is improved but resizing is slower.
deque
Double-ended queue, but also with random access. Acts like a vector but insertions and +removals are efficient.
bit_vectorImplements a vector of bool, but the actual storage is done with one bit per bool. Not the same thing as a +bitset.
bitsetImplements an efficient arbitrarily-sized bitfield. Note that this is not strictly the same thing as a vector of +bool (bit_vector), as it is optimized to act like an arbitrary set of flags and not to be a generic container which can +be iterated, inserted, removed, etc.
set
+multiset
+fixed_set
+fixed_multiset
A set is a sorted unique collection, multiset is sorted but non-unique collection.
map
+multimap
+fixed_map
+fixed_multimap
A map is a sorted associative collection implemented via a tree. It is also known as dictionary.
hash_map
+hash_multimap
+fixed_hash_map
+fixed_hash_multimap
Map implemented via a hash table.
intrusive_hash_map
+intrusive_hash_multimap
+intrusive_hash_set
+intrusive_hash_multiset
hash_map whereby the contained item provides the node implementation, much like intrusive_list.
hash_set
+hash_multiset
+fixed_hash_set
+fixed_hash_map
Set implemented via a hash table.
basic_string
+fixed_string
+fixed_substring
basic_string is a character string/array.
+fixed_substring is a string which is a reference to a range within another string or character array.
+cow_string is a string which implements copy-on-write.
algorithmmin/max, find, binary_search, random_shuffle, reverse, etc. 
sort
Sorting functionality, including functionality not in STL. quick_sort, heap_sort, +merge_sort, shell_sort, insertion_sort, etc.
numericNumeric algorithms: accumulate, inner_product, partial_sum, adjacent_difference, etc.
heap
Heap structure functionality: make_heap, push_heap, pop_heap, sort_heap, is_heap, +remove_heap, etc.
stack
Adapts any container into a stack.
queue
Adapts any container into a queue.
priority_queue
Implements a conventional priority queue via a heap structure.
type_traitsType information, useful for writing optimized and robust code. Also used for implementing optimized containers and +algorithms.
utility
pair, make_pair, rel_ops, etc.
functional
Function objects.
iterator
Iteration for containers and algorithms.
smart_ptrSmart pointers: shared_ptr, shared_array, weak_ptr, scoped_ptr, scoped_array, linked_ptr, linked_array, +intrusive_ptr.
+

 

+

Module Behaviour

+

The overhead sizes listed here refer to an optimized release build; debug builds may add some additional overhead. Some + of the overhead sizes may be off by a little bit (usually at most 4 bytes). This is because the values reported here + are those that refer to when EASTL's container optimizations have been complete. These optimizations may not have been + completed as you are reading this.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+

Container

+
+

Stores

+
Container Overhead (32 bit)Container Overhead (64 bit) +

Node Overhead (32 bit)

+
Node Overhead (64 bit) +

Iterator category

+
size() efficiencyoperator[] efficiency +

Insert efficiency

+
+

Erase via Iterator efficiency

+
+

Find efficiency

+
+

Sort efficiency

+
slistT81648fn-11nn+
+

list

+
+

T

+
1224 +

8

+
16 +

b

+
n- +

1

+
+

1

+
+

n

+
+

n log(n)

+
intrusive_slistT4848fn-11nn+
intrusive_listT816816bn-11nn log(n)
arrayT0000r11--nn log(n)
vectorT163200r111 at end, else n1 at end, else nnn log(n)
vector_setT163200r111 at end, else n1 at end, else nlog(n)1
vector_multisetT163200r111 at end, else n1 at end, else nlog(n)1
vector_mapKey, T163200r111 at end, else n1 at end, else nlog(n)1
vector_multimapKey, T163200r111 at end, else n1 at end, else nlog(n)1
dequeT448400r111 at begin or end,
+else n / 2
1 at begin or end,
+else n / 2
nn log(n)
bit_vectorbool81600r111 at end, else n1 at end, else nnn log(n)
string (all types)T163200r111 at end, else n1 at end, else nnn log(n)
setT24441628b1-log(n)log(n)log(n)1
multisetT24441628b1-log(n)log(n)log(n)1
mapKey, T24441628b1log(n)log(n)log(n)log(n)1
multimapKey, T24441628b1-log(n)log(n)log(n)1
hash_setT162048b1-111-
hash_multisetT162048b1-1
11-
hash_mapKey, T162048b1-111-
hash_multimapKey, T162048b1-111-
intrusive_hash_setT162048b1-111-
intrusive_hash_multisetT162048b1-111-
intrusive_hash_mapT (Key == T)162048b1-111-
intrusive_hash_multimapT (Key == T) 162048b1-111-
+
    +
  • - means that the operation does not exist.
  • +
  • 1 means amortized constant time. Also known as O(1)
  • +
  • n means time proportional to the container size. Also known as O(n)
  • +
  • log(n) means time proportional to the natural logarithm of the container size. Also known as O(log(n))
  • +
  • n log(n) means time proportional to log(n) times the size of the container. Also known as O(n log(n))
  • +
  • n+ means that the time is at least n, and possibly higher.
  • +
  • Iterator meanings are: f = forward iterator; b = bidirectional iterator, r = random iterator.
  • +
  • Overhead indicates approximate per-element overhead memory required in bytes. Overhead doesn't include possible +additional overhead that may be imposed by the memory heap used to allocate nodes. General heaps tend to have between 4 +and 16 bytes of overhead per allocation, depending on the heap.
  • +
  • Some overhead values are dependent on the structure alignment characteristics in effect. The values reported here +are those that would be in effect for a system that requires pointers to be aligned on boundaries of their size and +allocations with a minimum of 4 bytes (thus one byte values get rounded up to 4).
  • +
  • Some overhead values are dependent on the size_type used by containers. size_type defaults to size_t, but it is possible to force it to be 4 bytes for 64 bit machines by defining EASTL_SIZE_T_32BIT.
  • +
  • Inserting at the end of a vector may cause the vector to be resized; resizing a vector is O(n). However, the +amortized time complexity for vector insertions at the end is constant.
  • +
  • Sort assumes the usage of the best possible sort for a large container of random data. Some sort algorithms (e.g. +quick_sort) require random access iterators and so the sorting of some containers requires a different sort algorithm. +We do not include bucket or radix sorts, as they are always O(n).
  • +
  • Some containers (e.g. deque, hash*) have unusual data structures that make per-container and per-node overhead +calculations not quite account for all memory.
  • +
+
+End of document
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ + diff --git a/doc/html/EASTLDoc.css b/doc/html/EASTLDoc.css new file mode 100644 index 0000000..b2656d8 --- /dev/null +++ b/doc/html/EASTLDoc.css @@ -0,0 +1,86 @@ +body +{ + font-family: Georgia, "Times New Roman", Times, serif; + font-size: 12pt; +} + +h1 +{ + font-family: Verdana, Arial, Helvetica, sans-serif; + display: block; + background-color: #BBCCDD; + border: 2px solid #000000; + font-size: 16pt; + font-weight: bold; + padding: 6px; +} + +h2 +{ + font-size: 14pt; + font-family: Verdana; + border-bottom: 2px solid black; +} + +h3 +{ + font-family: Verdana; + font-size: 13pt; + font-weight: bold; +} + +.code-example +{ + display: block; + background-color: #D1DDE9; + margin-left: 3em; + margin-right: 3em; + margin-top: 1em; + margin-bottom: 1em; + padding: 8px; + border: 2px solid #7993C8; + font-family: "Courier New", Courier, mono; + font-size: 10pt; + white-space: pre; +} + +.code-example-span +{ + font-family: "Courier New", Courier, mono; + font-size: 10pt; + white-space: pre; +} + +.code-example-comment +{ + background-color: #e0e0f0; + padding: 0px 0px; + font-family: "Courier New", Courier, mono; + font-size: 10pt; + white-space: pre; + color: #999999; + margin: auto auto; +} + + +.faq-question +{ + background-color: #D9E2EC; + font-size: 12pt; + font-weight: bold; + margin-top: 0em; + padding-left:5px; + padding-right:8px; + padding-top:2px; + padding-bottom:3px; + margin-bottom: 0.5em; +} + +.faq-answer +{ + display: block; + margin: 4pt 1em 0.8em; +} +.indented { + margin-left: 50px; +} diff --git a/doc/quick-reference.pdf b/doc/quick-reference.pdf new file mode 100644 index 0000000..b62ff9d Binary files /dev/null and b/doc/quick-reference.pdf differ diff --git a/include/EASTL/algorithm.h b/include/EASTL/algorithm.h new file mode 100644 index 0000000..0e0522a --- /dev/null +++ b/include/EASTL/algorithm.h @@ -0,0 +1,4098 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements some of the primary algorithms from the C++ STL +// algorithm library. These versions are just like that STL versions and so +// are redundant. They are provided solely for the purpose of projects that +// either cannot use standard C++ STL or want algorithms that have guaranteed +// identical behaviour across platforms. +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// Definitions +// +// You will notice that we are very particular about the templated typenames +// we use here. You will notice that we follow the C++ standard closely in +// these respects. Each of these typenames have a specific meaning; +// this is why we don't just label templated arguments with just letters +// such as T, U, V, A, B. Here we provide a quick reference for the typenames +// we use. See the C++ standard, section 25-8 for more details. +// -------------------------------------------------------------- +// typename Meaning +// -------------------------------------------------------------- +// T The value type. +// Compare A function which takes two arguments and returns the lesser of the two. +// Predicate A function which takes one argument returns true if the argument meets some criteria. +// BinaryPredicate A function which takes two arguments and returns true if some criteria is met (e.g. they are equal). +// StrickWeakOrdering A BinaryPredicate that compares two objects, returning true if the first precedes the second. Like Compare but has additional requirements. Used for sorting routines. +// Function A function which takes one argument and applies some operation to the target. +// Size A count or size. +// Generator A function which takes no arguments and returns a value (which will usually be assigned to an object). +// UnaryOperation A function which takes one argument and returns a value (which will usually be assigned to second object). +// BinaryOperation A function which takes two arguments and returns a value (which will usually be assigned to a third object). +// InputIterator An input iterator (iterator you read from) which allows reading each element only once and only in a forward direction. +// ForwardIterator An input iterator which is like InputIterator except it can be reset back to the beginning. +// BidirectionalIterator An input iterator which is like ForwardIterator except it can be read in a backward direction as well. +// RandomAccessIterator An input iterator which can be addressed like an array. It is a superset of all other input iterators. +// OutputIterator An output iterator (iterator you write to) which allows writing each element only once in only in a forward direction. +// +// Note that with iterators that a function which takes an InputIterator will +// also work with a ForwardIterator, BidirectionalIterator, or RandomAccessIterator. +// The given iterator type is merely the -minimum- supported functionality the +// iterator must support. +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// Optimizations +// +// There are a number of opportunities for opptimizations that we take here +// in this library. The most obvious kinds are those that subsitute memcpy +// in the place of a conventional loop for data types with which this is +// possible. The algorithms here are optimized to a higher level than currently +// available C++ STL algorithms from vendors such as Microsoft. This is especially +// so for game programming on console devices, as we do things such as reduce +// branching relative to other STL algorithm implementations. However, the +// proper implementation of these algorithm optimizations is a fairly tricky +// thing. +// +// The various things we look to take advantage of in order to implement +// optimizations include: +// - Taking advantage of random access iterators. +// - Taking advantage of POD (plain old data) data types. +// - Taking advantage of type_traits in general. +// - Reducing branching and taking advantage of likely branch predictions. +// - Taking advantage of issues related to pointer and reference aliasing. +// - Improving cache coherency during memory accesses. +// - Making code more likely to be inlinable by the compiler. +// +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// Supported Algorithms +// +// Algorithms that we implement are listed here. Note that these items are not +// all within this header file, as we split up the header files in order to +// improve compilation performance. Items marked with '+' are items that are +// extensions which don't exist in the C++ standard. +// +// ------------------------------------------------------------------------------- +// Algorithm Notes +// ------------------------------------------------------------------------------- +// adjacent_find +// adjacent_find +// all_of C++11 +// any_of C++11 +// none_of C++11 +// binary_search +// binary_search +// +binary_search_i +// +binary_search_i +// +change_heap Found in heap.h +// +change_heap Found in heap.h +// copy +// copy_if C++11 +// copy_n C++11 +// copy_backward +// count +// count_if +// equal +// equal +// equal_range +// equal_range +// fill +// fill_n +// find +// find_end +// find_end +// find_first_of +// find_first_of +// +find_first_not_of +// +find_first_not_of +// +find_last_of +// +find_last_of +// +find_last_not_of +// +find_last_not_of +// find_if +// find_if_not +// for_each +// generate +// generate_n +// +identical +// +identical +// iter_swap +// lexicographical_compare +// lexicographical_compare +// lower_bound +// lower_bound +// make_heap Found in heap.h +// make_heap Found in heap.h +// min +// min +// max +// max +// +min_alt Exists to work around the problem of conflicts with min/max #defines on some systems. +// +min_alt +// +max_alt +// +max_alt +// +median +// +median +// merge Found in sort.h +// merge Found in sort.h +// min_element +// min_element +// max_element +// max_element +// mismatch +// mismatch +// move +// move_backward +// nth_element Found in sort.h +// nth_element Found in sort.h +// partial_sort Found in sort.h +// partial_sort Found in sort.h +// push_heap Found in heap.h +// push_heap Found in heap.h +// pop_heap Found in heap.h +// pop_heap Found in heap.h +// random_shuffle +// remove +// remove_if +// remove_copy +// remove_copy_if +// +remove_heap Found in heap.h +// +remove_heap Found in heap.h +// replace +// replace_if +// replace_copy +// replace_copy_if +// reverse_copy +// reverse +// rotate +// rotate_copy +// search +// search +// search_n +// set_difference +// set_difference +// set_intersection +// set_intersection +// set_symmetric_difference +// set_symmetric_difference +// sort Found in sort.h +// sort Found in sort.h +// sort_heap Found in heap.h +// sort_heap Found in heap.h +// stable_sort Found in sort.h +// stable_sort Found in sort.h +// swap +// swap_ranges +// transform +// transform +// unique +// unique +// upper_bound +// upper_bound +// is_permutation +// is_permutation +// next_permutation +// next_permutation +// +// Algorithms from the C++ standard that we don't implement are listed here. +// Most of these items are absent because they aren't used very often. +// They also happen to be the more complicated than other algorithms. +// However, we can implement any of these functions for users that might +// need them. +// includes +// includes +// inplace_merge +// inplace_merge +// partial_sort_copy +// partial_sort_copy +// paritition +// prev_permutation +// prev_permutation +// random_shuffle +// search_n +// set_union +// set_union +// stable_partition +// unique_copy +// unique_copy +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ALGORITHM_H +#define EASTL_ALGORITHM_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #if defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #include + #endif +#endif + #include + #include // memcpy, memcmp, memmove +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// min/max workaround +// +// MSVC++ has #defines for min/max which collide with the min/max algorithm +// declarations. The following may still not completely resolve some kinds of +// problems with MSVC++ #defines, though it deals with most cases in production +// game code. +// +#if EASTL_NOMINMAX + #ifdef min + #undef min + #endif + #ifdef max + #undef max + #endif +#endif + + + + +namespace eastl +{ + /// min_element + /// + /// min_element finds the smallest element in the range [first, last). + /// It returns the first iterator i in [first, last) such that no other + /// iterator in [first, last) points to a value smaller than *i. + /// The return value is last if and only if [first, last) is an empty range. + /// + /// Returns: The first iterator i in the range [first, last) such that + /// for any iterator j in the range [first, last) the following corresponding + /// condition holds: !(*j < *i). + /// + /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the + /// corresponding comparisons. + /// + template + ForwardIterator min_element(ForwardIterator first, ForwardIterator last) + { + if(first != last) + { + ForwardIterator currentMin = first; + + while(++first != last) + { + if(*first < *currentMin) + currentMin = first; + } + return currentMin; + } + return first; + } + + + /// min_element + /// + /// min_element finds the smallest element in the range [first, last). + /// It returns the first iterator i in [first, last) such that no other + /// iterator in [first, last) points to a value smaller than *i. + /// The return value is last if and only if [first, last) is an empty range. + /// + /// Returns: The first iterator i in the range [first, last) such that + /// for any iterator j in the range [first, last) the following corresponding + /// conditions hold: compare(*j, *i) == false. + /// + /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the + /// corresponding comparisons. + /// + template + ForwardIterator min_element(ForwardIterator first, ForwardIterator last, Compare compare) + { + if(first != last) + { + ForwardIterator currentMin = first; + + while(++first != last) + { + if(compare(*first, *currentMin)) + currentMin = first; + } + return currentMin; + } + return first; + } + + + /// max_element + /// + /// max_element finds the largest element in the range [first, last). + /// It returns the first iterator i in [first, last) such that no other + /// iterator in [first, last) points to a value greater than *i. + /// The return value is last if and only if [first, last) is an empty range. + /// + /// Returns: The first iterator i in the range [first, last) such that + /// for any iterator j in the range [first, last) the following corresponding + /// condition holds: !(*i < *j). + /// + /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the + /// corresponding comparisons. + /// + template + ForwardIterator max_element(ForwardIterator first, ForwardIterator last) + { + if(first != last) + { + ForwardIterator currentMax = first; + + while(++first != last) + { + if(*currentMax < *first) + currentMax = first; + } + return currentMax; + } + return first; + } + + + /// max_element + /// + /// max_element finds the largest element in the range [first, last). + /// It returns the first iterator i in [first, last) such that no other + /// iterator in [first, last) points to a value greater than *i. + /// The return value is last if and only if [first, last) is an empty range. + /// + /// Returns: The first iterator i in the range [first, last) such that + /// for any iterator j in the range [first, last) the following corresponding + /// condition holds: compare(*i, *j) == false. + /// + /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the + /// corresponding comparisons. + /// + template + ForwardIterator max_element(ForwardIterator first, ForwardIterator last, Compare compare) + { + if(first != last) + { + ForwardIterator currentMax = first; + + while(++first != last) + { + if(compare(*currentMax, *first)) + currentMax = first; + } + return currentMax; + } + return first; + } + + + #if EASTL_MINMAX_ENABLED + + /// min + /// + /// Min returns the lesser of its two arguments; it returns the first + /// argument if neither is less than the other. The two arguments are + /// compared with operator <. + /// + /// This min and our other min implementations are defined as returning: + /// b < a ? b : a + /// which for example may in practice result in something different than: + /// b <= a ? b : a + /// in the case where b is different from a (though they compare as equal). + /// We choose the specific ordering here because that's the ordering + /// done by other STL implementations. + /// + /// Some compilers (e.g. VS20003 - VS2013) generate poor code for the case of + /// scalars returned by reference, so we provide a specialization for those cases. + /// The specialization returns T by value instead of reference, which is + /// not that the Standard specifies. The Standard allows you to use + /// an expression like &max(x, y), which would be impossible in this case. + /// However, we have found no actual code that uses min or max like this and + /// this specialization causes no problems in practice. Microsoft has acknowledged + /// the problem and may fix it for a future VS version. + /// + template + inline EA_CONSTEXPR typename eastl::enable_if::value, T>::type + min(T a, T b) + { + return b < a ? b : a; + } + + template + inline EA_CONSTEXPR typename eastl::enable_if::value, const T&>::type + min(const T& a, const T& b) + { + return b < a ? b : a; + } + + inline EA_CONSTEXPR float min(float a, float b) { return b < a ? b : a; } + inline EA_CONSTEXPR double min(double a, double b) { return b < a ? b : a; } + inline EA_CONSTEXPR long double min(long double a, long double b) { return b < a ? b : a; } + + #endif // EASTL_MINMAX_ENABLED + + + /// min_alt + /// + /// This is an alternative version of min that avoids any possible + /// collisions with Microsoft #defines of min and max. + /// + /// See min(a, b) for detailed specifications. + /// + template + inline EA_CONSTEXPR typename eastl::enable_if::value, T>::type + min_alt(T a, T b) + { + return b < a ? b : a; + } + + template + inline typename eastl::enable_if::value, const T&>::type + min_alt(const T& a, const T& b) + { + return b < a ? b : a; + } + + inline EA_CONSTEXPR float min_alt(float a, float b) { return b < a ? b : a; } + inline EA_CONSTEXPR double min_alt(double a, double b) { return b < a ? b : a; } + inline EA_CONSTEXPR long double min_alt(long double a, long double b) { return b < a ? b : a; } + + + #if EASTL_MINMAX_ENABLED + + /// min + /// + /// Min returns the lesser of its two arguments; it returns the first + /// argument if neither is less than the other. The two arguments are + /// compared with the Compare function (or function object), which + /// takes two arguments and returns true if the first is less than + /// the second. + /// + /// See min(a, b) for detailed specifications. + /// + /// Example usage: + /// struct A{ int a; }; + /// struct Struct{ bool operator()(const A& a1, const A& a2){ return a1.a < a2.a; } }; + /// + /// A a1, a2, a3; + /// a3 = min(a1, a2, Struct()); + /// + /// Example usage: + /// struct B{ int b; }; + /// inline bool Function(const B& b1, const B& b2){ return b1.b < b2.b; } + /// + /// B b1, b2, b3; + /// b3 = min(b1, b2, Function); + /// + template + inline const T& + min(const T& a, const T& b, Compare compare) + { + return compare(b, a) ? b : a; + } + + #endif // EASTL_MINMAX_ENABLED + + + /// min_alt + /// + /// This is an alternative version of min that avoids any possible + /// collisions with Microsoft #defines of min and max. + /// + /// See min(a, b) for detailed specifications. + /// + template + inline const T& + min_alt(const T& a, const T& b, Compare compare) + { + return compare(b, a) ? b : a; + } + + + #if EASTL_MINMAX_ENABLED + + /// max + /// + /// Max returns the greater of its two arguments; it returns the first + /// argument if neither is greater than the other. The two arguments are + /// compared with operator < (and not operator >). + /// + /// This min and our other min implementations are defined as returning: + /// a < b ? b : a + /// which for example may in practice result in something different than: + /// a <= b ? b : a + /// in the case where b is different from a (though they compare as equal). + /// We choose the specific ordering here because that's the ordering + /// done by other STL implementations. + /// + template + inline EA_CONSTEXPR typename eastl::enable_if::value, T>::type + max(T a, T b) + { + return a < b ? b : a; + } + + template + inline EA_CONSTEXPR typename eastl::enable_if::value, const T&>::type + max(const T& a, const T& b) + { + return a < b ? b : a; + } + + inline EA_CONSTEXPR float max(float a, float b) { return a < b ? b : a; } + inline EA_CONSTEXPR double max(double a, double b) { return a < b ? b : a; } + inline EA_CONSTEXPR long double max(long double a, long double b) { return a < b ? b : a; } + + #endif // EASTL_MINMAX_ENABLED + + + /// max_alt + /// + /// This is an alternative version of max that avoids any possible + /// collisions with Microsoft #defines of min and max. + /// + template + inline EA_CONSTEXPR typename eastl::enable_if::value, T>::type + max_alt(T a, T b) + { + return a < b ? b : a; + } + + template + inline EA_CONSTEXPR typename eastl::enable_if::value, const T&>::type + max_alt(const T& a, const T& b) + { + return a < b ? b : a; + } + + inline EA_CONSTEXPR float max_alt(float a, float b) { return a < b ? b : a; } + inline EA_CONSTEXPR double max_alt(double a, double b) { return a < b ? b : a; } + inline EA_CONSTEXPR long double max_alt(long double a, long double b) { return a < b ? b : a; } + + + #if EASTL_MINMAX_ENABLED + /// max + /// + /// Min returns the lesser of its two arguments; it returns the first + /// argument if neither is less than the other. The two arguments are + /// compared with the Compare function (or function object), which + /// takes two arguments and returns true if the first is less than + /// the second. + /// + template + inline const T& + max(const T& a, const T& b, Compare compare) + { + return compare(a, b) ? b : a; + } + #endif + + + /// max_alt + /// + /// This is an alternative version of max that avoids any possible + /// collisions with Microsoft #defines of min and max. + /// + template + inline const T& + max_alt(const T& a, const T& b, Compare compare) + { + return compare(a, b) ? b : a; + } + + + /// min(std::initializer_list) + /// + template + T min(std::initializer_list ilist) + { + return *eastl::min_element(ilist.begin(), ilist.end()); + } + + /// min(std::initializer_list, Compare) + /// + template + T min(std::initializer_list ilist, Compare compare) + { + return *eastl::min_element(ilist.begin(), ilist.end(), compare); + } + + + /// max(std::initializer_list) + /// + template + T max(std::initializer_list ilist) + { + return *eastl::max_element(ilist.begin(), ilist.end()); + } + + /// max(std::initializer_list, Compare) + /// + template + T max(std::initializer_list ilist, Compare compare) + { + return *eastl::max_element(ilist.begin(), ilist.end(), compare); + } + + + /// minmax_element + /// + /// Returns: make_pair(first, first) if [first, last) is empty, otherwise make_pair(m, M), + /// where m is the first iterator in [first,last) such that no iterator in the range + /// refers to a smaller element, and where M is the last iterator in [first,last) such + /// that no iterator in the range refers to a larger element. + /// + /// Complexity: At most max([(3/2)*(N - 1)], 0) applications of the corresponding predicate, + /// where N is distance(first, last). + /// + template + eastl::pair + minmax_element(ForwardIterator first, ForwardIterator last, Compare compare) + { + eastl::pair result(first, first); + + if(!(first == last) && !(++first == last)) + { + if(compare(*first, *result.first)) + { + result.second = result.first; + result.first = first; + } + else + result.second = first; + + while(++first != last) + { + ForwardIterator i = first; + + if(++first == last) + { + if(compare(*i, *result.first)) + result.first = i; + else if(!compare(*i, *result.second)) + result.second = i; + break; + } + else + { + if(compare(*first, *i)) + { + if(compare(*first, *result.first)) + result.first = first; + + if(!compare(*i, *result.second)) + result.second = i; + } + else + { + if(compare(*i, *result.first)) + result.first = i; + + if(!compare(*first, *result.second)) + result.second = first; + } + } + } + } + + return result; + } + + + template + eastl::pair + minmax_element(ForwardIterator first, ForwardIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + + return eastl::minmax_element(first, last, eastl::less()); + } + + + + /// minmax + /// + /// Requires: Type T shall be LessThanComparable. + /// Returns: pair(b, a) if b is smaller than a, and pair(a, b) otherwise. + /// Remarks: Returns pair(a, b) when the arguments are equivalent. + /// Complexity: Exactly one comparison. + /// + + // The following optimization is a problem because it changes the return value in a way that would break + // users unless they used auto (e.g. auto result = minmax(17, 33); ) + // + // template + // inline EA_CONSTEXPR typename eastl::enable_if::value, eastl::pair >::type + // minmax(T a, T b) + // { + // return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b); + // } + // + // template + // inline typename eastl::enable_if::value, eastl::pair >::type + // minmax(const T& a, const T& b) + // { + // return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b); + // } + + // It turns out that the following conforming definition of minmax generates a warning when used with VC++ up + // to at least VS2012. The VS2012 version of minmax is a broken and non-conforming definition, and we don't + // want to do that. We could do it for scalars alone, though we'd have to decide if we are going to do that + // for all compilers, because it changes the return value from a pair of references to a pair of values. + template + inline eastl::pair + minmax(const T& a, const T& b) + { + return (b < a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b); + } + + + template + eastl::pair + minmax(const T& a, const T& b, Compare compare) + { + return compare(b, a) ? eastl::make_pair(b, a) : eastl::make_pair(a, b); + } + + + + template + eastl::pair + minmax(std::initializer_list ilist) + { + typedef typename std::initializer_list::iterator iterator_type; + eastl::pair iteratorPair = eastl::minmax_element(ilist.begin(), ilist.end()); + return eastl::make_pair(*iteratorPair.first, *iteratorPair.second); + } + + template + eastl::pair + minmax(std::initializer_list ilist, Compare compare) + { + typedef typename std::initializer_list::iterator iterator_type; + eastl::pair iteratorPair = eastl::minmax_element(ilist.begin(), ilist.end(), compare); + return eastl::make_pair(*iteratorPair.first, *iteratorPair.second); + } + + template + inline T&& median_impl(T&& a, T&& b, T&& c) + { + if(a < b) + { + if(b < c) + return eastl::forward(b); + else if(a < c) + return eastl::forward(c); + else + return eastl::forward(a); + } + else if(a < c) + return eastl::forward(a); + else if(b < c) + return eastl::forward(c); + return eastl::forward(b); + } + + /// median + /// + /// median finds which element of three (a, b, d) is in-between the other two. + /// If two or more elements are equal, the first (e.g. a before b) is chosen. + /// + /// Complexity: Either two or three comparisons will be required, depending + /// on the values. + /// + template + inline const T& median(const T& a, const T& b, const T& c) + { + return median_impl(a, b, c); + } + + /// median + /// + /// median finds which element of three (a, b, d) is in-between the other two. + /// If two or more elements are equal, the first (e.g. a before b) is chosen. + /// + /// Complexity: Either two or three comparisons will be required, depending + /// on the values. + /// + template + inline T&& median(T&& a, T&& b, T&& c) + { + return eastl::forward(median_impl(eastl::forward(a), eastl::forward(b), eastl::forward(c))); + } + + + template + inline T&& median_impl(T&& a, T&& b, T&& c, Compare compare) + { + if(compare(a, b)) + { + if(compare(b, c)) + return eastl::forward(b); + else if(compare(a, c)) + return eastl::forward(c); + else + return eastl::forward(a); + } + else if(compare(a, c)) + return eastl::forward(a); + else if(compare(b, c)) + return eastl::forward(c); + return eastl::forward(b); + } + + + /// median + /// + /// median finds which element of three (a, b, d) is in-between the other two. + /// If two or more elements are equal, the first (e.g. a before b) is chosen. + /// + /// Complexity: Either two or three comparisons will be required, depending + /// on the values. + /// + template + inline const T& median(const T& a, const T& b, const T& c, Compare compare) + { + return median_impl(a, b, c, compare); + } + + /// median + /// + /// median finds which element of three (a, b, d) is in-between the other two. + /// If two or more elements are equal, the first (e.g. a before b) is chosen. + /// + /// Complexity: Either two or three comparisons will be required, depending + /// on the values. + /// + template + inline T&& median(T&& a, T&& b, T&& c, Compare compare) + { + return eastl::forward(median_impl(eastl::forward(a), eastl::forward(b), eastl::forward(c), compare)); + } + + + + + /// all_of + /// + /// Returns: true if the unary predicate p returns true for all elements in the range [first, last) + /// + template + inline bool all_of(InputIterator first, InputIterator last, Predicate p) + { + for(; first != last; ++first) + { + if(!p(*first)) + return false; + } + return true; + } + + + /// any_of + /// + /// Returns: true if the unary predicate p returns true for any of the elements in the range [first, last) + /// + template + inline bool any_of(InputIterator first, InputIterator last, Predicate p) + { + for(; first != last; ++first) + { + if(p(*first)) + return true; + } + return false; + } + + + /// none_of + /// + /// Returns: true if the unary predicate p returns true for none of the elements in the range [first, last) + /// + template + inline bool none_of(InputIterator first, InputIterator last, Predicate p) + { + for(; first != last; ++first) + { + if(p(*first)) + return false; + } + return true; + } + + + /// adjacent_find + /// + /// Returns: The first iterator i such that both i and i + 1 are in the range + /// [first, last) for which the following corresponding conditions hold: *i == *(i + 1). + /// Returns last if no such iterator is found. + /// + /// Complexity: Exactly 'find(first, last, value) - first' applications of the corresponding predicate. + /// + template + inline ForwardIterator + adjacent_find(ForwardIterator first, ForwardIterator last) + { + if(first != last) + { + ForwardIterator i = first; + + for(++i; i != last; ++i) + { + if(*first == *i) + return first; + first = i; + } + } + return last; + } + + + + /// adjacent_find + /// + /// Returns: The first iterator i such that both i and i + 1 are in the range + /// [first, last) for which the following corresponding conditions hold: predicate(*i, *(i + 1)) != false. + /// Returns last if no such iterator is found. + /// + /// Complexity: Exactly 'find(first, last, value) - first' applications of the corresponding predicate. + /// + template + inline ForwardIterator + adjacent_find(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate) + { + if(first != last) + { + ForwardIterator i = first; + + for(++i; i != last; ++i) + { + if(predicate(*first, *i)) + return first; + first = i; + } + } + return last; + } + + + /// shuffle + /// + /// New for C++11 + /// Randomizes a sequence of values via a user-supplied UniformRandomNumberGenerator. + /// The difference between this and the original random_shuffle function is that this uses the more + /// advanced and flexible UniformRandomNumberGenerator interface as opposed to the more + /// limited RandomNumberGenerator interface of random_shuffle. + /// + /// Effects: Shuffles the elements in the range [first, last) with uniform distribution. + /// + /// Complexity: Exactly '(last - first) - 1' swaps. + /// + /// Example usage: + /// struct Rand{ eastl_size_t operator()(eastl_size_t n) { return (eastl_size_t)(rand() % n); } }; // Note: The C rand function is poor and slow. + /// Rand randInstance; + /// shuffle(pArrayBegin, pArrayEnd, randInstance); + /// + // See the C++11 Standard, 26.5.1.3, Uniform random number generator requirements. + // Also http://en.cppreference.com/w/cpp/numeric/random/uniform_int_distribution + + template + void shuffle(RandomAccessIterator first, RandomAccessIterator last, UniformRandomNumberGenerator&& urng) + { + if(first != last) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::make_unsigned::type unsigned_difference_type; + typedef typename eastl::uniform_int_distribution uniform_int_distribution; + typedef typename uniform_int_distribution::param_type uniform_int_distribution_param_type; + + uniform_int_distribution uid; + + for(RandomAccessIterator i = first + 1; i != last; ++i) + iter_swap(i, first + uid(urng, uniform_int_distribution_param_type(0, i - first))); + } + } + + + /// random_shuffle + /// + /// Randomizes a sequence of values. + /// + /// Effects: Shuffles the elements in the range [first, last) with uniform distribution. + /// + /// Complexity: Exactly '(last - first) - 1' swaps. + /// + /// Example usage: + /// eastl_size_t Rand(eastl_size_t n) { return (eastl_size_t)(rand() % n); } // Note: The C rand function is poor and slow. + /// pointer_to_unary_function randInstance(Rand); + /// random_shuffle(pArrayBegin, pArrayEnd, randInstance); + /// + /// Example usage: + /// struct Rand{ eastl_size_t operator()(eastl_size_t n) { return (eastl_size_t)(rand() % n); } }; // Note: The C rand function is poor and slow. + /// Rand randInstance; + /// random_shuffle(pArrayBegin, pArrayEnd, randInstance); + /// + template + inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last, RandomNumberGenerator&& rng) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + // We must do 'rand((i - first) + 1)' here and cannot do 'rand(last - first)', + // as it turns out that the latter results in unequal distribution probabilities. + // http://www.cigital.com/papers/download/developer_gambling.php + + for(RandomAccessIterator i = first + 1; i < last; ++i) + iter_swap(i, first + (difference_type)rng((eastl_size_t)((i - first) + 1))); + } + + + /// random_shuffle + /// + /// Randomizes a sequence of values. + /// + /// Effects: Shuffles the elements in the range [first, last) with uniform distribution. + /// + /// Complexity: Exactly '(last - first) - 1' swaps. + /// + /// Example usage: + /// random_shuffle(pArrayBegin, pArrayEnd); + /// + /// *** Disabled until we decide if we want to get into the business of writing random number generators. *** + /// + /// template + /// inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last) + /// { + /// for(RandomAccessIterator i = first + 1; i < last; ++i) + /// iter_swap(i, first + SomeRangedRandomNumberGenerator((i - first) + 1)); + /// } + + + + + + + /// move_n + /// + /// Same as move(InputIterator, InputIterator, OutputIterator) except based on count instead of iterator range. + /// + template + inline OutputIterator + move_n_impl(InputIterator first, Size n, OutputIterator result, EASTL_ITC_NS::input_iterator_tag) + { + for(; n > 0; --n) + *result++ = eastl::move(*first++); + return result; + } + + template + inline OutputIterator + move_n_impl(RandomAccessIterator first, Size n, OutputIterator result, EASTL_ITC_NS::random_access_iterator_tag) + { + return eastl::move(first, first + n, result); // Take advantage of the optimizations present in the move algorithm. + } + + + template + inline OutputIterator + move_n(InputIterator first, Size n, OutputIterator result) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return eastl::move_n_impl(first, n, result, IC()); + } + + + + /// copy_n + /// + /// Same as copy(InputIterator, InputIterator, OutputIterator) except based on count instead of iterator range. + /// Effects: Copies exactly count values from the range beginning at first to the range beginning at result, if count > 0. Does nothing otherwise. + /// Returns: Iterator in the destination range, pointing past the last element copied if count>0 or first otherwise. + /// Complexity: Exactly count assignments, if count > 0. + /// + template + inline OutputIterator + copy_n_impl(InputIterator first, Size n, OutputIterator result, EASTL_ITC_NS::input_iterator_tag) + { + for(; n > 0; --n) + *result++ = *first++; + return result; + } + + template + inline OutputIterator + copy_n_impl(RandomAccessIterator first, Size n, OutputIterator result, EASTL_ITC_NS::random_access_iterator_tag) + { + return eastl::copy(first, first + n, result); // Take advantage of the optimizations present in the copy algorithm. + } + + + template + inline OutputIterator + copy_n(InputIterator first, Size n, OutputIterator result) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return eastl::copy_n_impl(first, n, result, IC()); + } + + + /// copy_if + /// + /// Effects: Assigns to the result iterator only if the predicate is true. + /// + template + inline OutputIterator + copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate) + { + // This implementation's performance could be improved by taking a more complicated approach like with the copy algorithm. + for(; first != last; ++first) + { + if(predicate(*first)) + *result++ = *first; + } + + return result; + } + + + + + // Implementation moving copying both trivial and non-trivial data via a lesser iterator than random-access. + template + struct move_and_copy_backward_helper + { + template + static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + while(first != last) + *--resultEnd = *--last; + return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end. + } + }; + + // Specialization for moving non-trivial data via a lesser iterator than random-access. + template + struct move_and_copy_backward_helper + { + template + static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + while(first != last) + *--resultEnd = eastl::move(*--last); + return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end. + } + }; + + // Specialization for moving non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const. + template<> + struct move_and_copy_backward_helper + { + template + static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + for(difference_type n = (last - first); n > 0; --n) + *--resultEnd = eastl::move(*--last); + return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end. + } + }; + + // Specialization for copying non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const. + // This specialization converts the random access BidirectionalIterator1 last-first to an integral type. There's simple way for us to take advantage of a random access output iterator, + // as the range is specified by the input instead of the output, and distance(first, last) for a non-random-access iterator is potentially slow. + template <> + struct move_and_copy_backward_helper + { + template + static BidirectionalIterator2 move_or_copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + for(difference_type n = (last - first); n > 0; --n) + *--resultEnd = *--last; + return resultEnd; // resultEnd now points to the beginning of the destination sequence instead of the end. + } + }; + + // Specialization for when we can use memmove/memcpy. See the notes above for what conditions allow this. + template + struct move_and_copy_backward_helper + { + template + static T* move_or_copy_backward(const T* first, const T* last, T* resultEnd) + { + return (T*)memmove(resultEnd - (last - first), first, (size_t)((uintptr_t)last - (uintptr_t)first)); + // We could use memcpy here if there's no range overlap, but memcpy is rarely much faster than memmove. + } + }; + + template + inline BidirectionalIterator2 move_and_copy_backward_chooser(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + typedef typename eastl::iterator_traits::iterator_category IIC; + typedef typename eastl::iterator_traits::iterator_category OIC; + typedef typename eastl::iterator_traits::value_type value_type_input; + typedef typename eastl::iterator_traits::value_type value_type_output; + + const bool canBeMemmoved = eastl::is_trivially_copyable::value && + eastl::is_same::value && + (eastl::is_pointer::value || eastl::is_same::value) && + (eastl::is_pointer::value || eastl::is_same::value); + + return eastl::move_and_copy_backward_helper::move_or_copy_backward(first, last, resultEnd); // Need to chose based on the input iterator tag and not the output iterator tag, because containers accept input ranges of iterator types different than self. + } + + + // We have a second layer of unwrap_iterator calls because the original iterator might be something like move_iterator > (i.e. doubly-wrapped). + template + inline BidirectionalIterator2 move_and_copy_backward_unwrapper(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + return BidirectionalIterator2(eastl::move_and_copy_backward_chooser(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), eastl::unwrap_iterator(resultEnd))); // Have to convert to BidirectionalIterator2 because result.base() could be a T* + } + + + /// move_backward + /// + /// The elements are moved in reverse order (the last element is moved first), but their relative order is preserved. + /// After this operation the elements in the moved-from range will still contain valid values of the + /// appropriate type, but not necessarily the same values as before the move. + /// Returns the beginning of the result range. + /// Note: When moving between containers, the dest range must be valid; this function doesn't resize containers. + /// Note: If result is within [first, last), move must be used instead of move_backward. + /// + /// Example usage: + /// eastl::move_backward(myArray.begin(), myArray.end(), myDestArray.end()); + /// + /// Reference implementation: + /// template + /// BidirectionalIterator2 move_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + /// { + /// while(last != first) + /// *--resultEnd = eastl::move(*--last); + /// return resultEnd; + /// } + /// + template + inline BidirectionalIterator2 move_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + return eastl::move_and_copy_backward_unwrapper(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), resultEnd); + } + + + /// copy_backward + /// + /// copies memory in the range of [first, last) to the range *ending* with result. + /// + /// Effects: Copies elements in the range [first, last) into the range + /// [result - (last - first), result) starting from last 1 and proceeding to first. + /// For each positive integer n <= (last - first), performs *(result n) = *(last - n). + /// + /// Requires: result shall not be in the range [first, last). + /// + /// Returns: result - (last - first). That is, returns the beginning of the result range. + /// + /// Complexity: Exactly 'last - first' assignments. + /// + template + inline BidirectionalIterator2 copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 resultEnd) + { + const bool isMove = eastl::is_move_iterator::value; EA_UNUSED(isMove); + + return eastl::move_and_copy_backward_unwrapper(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), resultEnd); + } + + + /// count + /// + /// Counts the number of items in the range of [first, last) which equal the input value. + /// + /// Effects: Returns the number of iterators i in the range [first, last) for which the + /// following corresponding conditions hold: *i == value. + /// + /// Complexity: At most 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of count is count_if and not another variation of count. + /// This is because both versions would have three parameters and there could be ambiguity. + /// + template + inline typename eastl::iterator_traits::difference_type + count(InputIterator first, InputIterator last, const T& value) + { + typename eastl::iterator_traits::difference_type result = 0; + + for(; first != last; ++first) + { + if(*first == value) + ++result; + } + return result; + } + + + // C++ doesn't define a count with predicate, as it can effectively be synthesized via count_if + // with an appropriate predicate. However, it's often simpler to just have count with a predicate. + template + inline typename eastl::iterator_traits::difference_type + count(InputIterator first, InputIterator last, const T& value, Predicate predicate) + { + typename eastl::iterator_traits::difference_type result = 0; + + for(; first != last; ++first) + { + if(predicate(*first, value)) + ++result; + } + return result; + } + + + /// count_if + /// + /// Counts the number of items in the range of [first, last) which match + /// the input value as defined by the input predicate function. + /// + /// Effects: Returns the number of iterators i in the range [first, last) for which the + /// following corresponding conditions hold: predicate(*i) != false. + /// + /// Complexity: At most 'last - first' applications of the corresponding predicate. + /// + /// Note: The non-predicate version of count_if is count and not another variation of count_if. + /// This is because both versions would have three parameters and there could be ambiguity. + /// + template + inline typename eastl::iterator_traits::difference_type + count_if(InputIterator first, InputIterator last, Predicate predicate) + { + typename eastl::iterator_traits::difference_type result = 0; + + for(; first != last; ++first) + { + if(predicate(*first)) + ++result; + } + return result; + } + + + /// find + /// + /// finds the value within the unsorted range of [first, last). + /// + /// Returns: The first iterator i in the range [first, last) for which + /// the following corresponding conditions hold: *i == value. + /// Returns last if no such iterator is found. + /// + /// Complexity: At most 'last - first' applications of the corresponding predicate. + /// This is a linear search and not a binary one. + /// + /// Note: The predicate version of find is find_if and not another variation of find. + /// This is because both versions would have three parameters and there could be ambiguity. + /// + template + inline InputIterator + find(InputIterator first, InputIterator last, const T& value) + { + while((first != last) && !(*first == value)) // Note that we always express value comparisons in terms of < or ==. + ++first; + return first; + } + + + // C++ doesn't define a find with predicate, as it can effectively be synthesized via find_if + // with an appropriate predicate. However, it's often simpler to just have find with a predicate. + template + inline InputIterator + find(InputIterator first, InputIterator last, const T& value, Predicate predicate) + { + while((first != last) && !predicate(*first, value)) + ++first; + return first; + } + + + + /// find_if + /// + /// finds the value within the unsorted range of [first, last). + /// + /// Returns: The first iterator i in the range [first, last) for which + /// the following corresponding conditions hold: pred(*i) != false. + /// Returns last if no such iterator is found. + /// If the sequence of elements to search for (i.e. first2 - last2) is empty, + /// the find always fails and last1 will be returned. + /// + /// Complexity: At most 'last - first' applications of the corresponding predicate. + /// + /// Note: The non-predicate version of find_if is find and not another variation of find_if. + /// This is because both versions would have three parameters and there could be ambiguity. + /// + template + inline InputIterator + find_if(InputIterator first, InputIterator last, Predicate predicate) + { + while((first != last) && !predicate(*first)) + ++first; + return first; + } + + + + /// find_if_not + /// + /// find_if_not works the same as find_if except it tests for if the predicate + /// returns false for the elements instead of true. + /// + template + inline InputIterator + find_if_not(InputIterator first, InputIterator last, Predicate predicate) + { + for(; first != last; ++first) + { + if(!predicate(*first)) + return first; + } + return last; + } + + + + + /// find_first_of + /// + /// find_first_of is similar to find in that it performs linear search through + /// a range of ForwardIterators. The difference is that while find searches + /// for one particular value, find_first_of searches for any of several values. + /// Specifically, find_first_of searches for the first occurrance in the + /// range [first1, last1) of any of the elements in [first2, last2). + /// This function is thus similar to the strpbrk standard C string function. + /// If the sequence of elements to search for (i.e. first2-last2) is empty, + /// the find always fails and last1 will be returned. + /// + /// Effects: Finds an element that matches one of a set of values. + /// + /// Returns: The first iterator i in the range [first1, last1) such that for some + /// integer j in the range [first2, last2) the following conditions hold: *i == *j. + /// Returns last1 if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the + /// corresponding predicate. + /// + template + ForwardIterator1 + find_first_of(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + for(; first1 != last1; ++first1) + { + for(ForwardIterator2 i = first2; i != last2; ++i) + { + if(*first1 == *i) + return first1; + } + } + return last1; + } + + + /// find_first_of + /// + /// find_first_of is similar to find in that it performs linear search through + /// a range of ForwardIterators. The difference is that while find searches + /// for one particular value, find_first_of searches for any of several values. + /// Specifically, find_first_of searches for the first occurrance in the + /// range [first1, last1) of any of the elements in [first2, last2). + /// This function is thus similar to the strpbrk standard C string function. + /// + /// Effects: Finds an element that matches one of a set of values. + /// + /// Returns: The first iterator i in the range [first1, last1) such that for some + /// integer j in the range [first2, last2) the following conditions hold: pred(*i, *j) != false. + /// Returns last1 if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the + /// corresponding predicate. + /// + template + ForwardIterator1 + find_first_of(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + for(; first1 != last1; ++first1) + { + for(ForwardIterator2 i = first2; i != last2; ++i) + { + if(predicate(*first1, *i)) + return first1; + } + } + return last1; + } + + + /// find_first_not_of + /// + /// Searches through first range for the first element that does not belong the second input range. + /// This is very much like the C++ string find_first_not_of function. + /// + /// Returns: The first iterator i in the range [first1, last1) such that for some + /// integer j in the range [first2, last2) the following conditions hold: !(*i == *j). + /// Returns last1 if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the + /// corresponding predicate. + /// + template + ForwardIterator1 + find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + for(; first1 != last1; ++first1) + { + if(eastl::find(first2, last2, *first1) == last2) + break; + } + + return first1; + } + + + + /// find_first_not_of + /// + /// Searches through first range for the first element that does not belong the second input range. + /// This is very much like the C++ string find_first_not_of function. + /// + /// Returns: The first iterator i in the range [first1, last1) such that for some + /// integer j in the range [first2, last2) the following conditions hold: pred(*i, *j) == false. + /// Returns last1 if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the + /// corresponding predicate. + /// + template + inline ForwardIterator1 + find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::value_type value_type; + + for(; first1 != last1; ++first1) + { + if(eastl::find_if(first2, last2, eastl::bind1st(predicate, *first1)) == last2) + break; + } + + return first1; + } + + + template + inline BidirectionalIterator1 + find_last_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + if((first1 != last1) && (first2 != last2)) + { + BidirectionalIterator1 it1(last1); + + while((--it1 != first1) && (eastl::find(first2, last2, *it1) == last2)) + ; // Do nothing + + if((it1 != first1) || (eastl::find(first2, last2, *it1) != last2)) + return it1; + } + + return last1; + } + + + template + BidirectionalIterator1 + find_last_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::value_type value_type; + + if((first1 != last1) && (first2 != last2)) + { + BidirectionalIterator1 it1(last1); + + while((--it1 != first1) && (eastl::find_if(first2, last2, eastl::bind1st(predicate, *it1)) == last2)) + ; // Do nothing + + if((it1 != first1) || (eastl::find_if(first2, last2, eastl::bind1st(predicate, *it1)) != last2)) + return it1; + } + + return last1; + } + + + template + inline BidirectionalIterator1 + find_last_not_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + if((first1 != last1) && (first2 != last2)) + { + BidirectionalIterator1 it1(last1); + + while((--it1 != first1) && (eastl::find(first2, last2, *it1) != last2)) + ; // Do nothing + + if((it1 != first1) || (eastl::find( first2, last2, *it1) == last2)) + return it1; + } + + return last1; + } + + + template + inline BidirectionalIterator1 + find_last_not_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::value_type value_type; + + if((first1 != last1) && (first2 != last2)) + { + BidirectionalIterator1 it1(last1); + + while((--it1 != first1) && (eastl::find_if(first2, last2, eastl::bind1st(predicate, *it1)) != last2)) + ; // Do nothing + + if((it1 != first1) || (eastl::find_if(first2, last2, eastl::bind1st(predicate, *it1))) != last2) + return it1; + } + + return last1; + } + + + + + /// for_each + /// + /// Calls the Function function for each value in the range [first, last). + /// Function takes a single parameter: the current value. + /// + /// Effects: Applies function to the result of dereferencing every iterator in + /// the range [first, last), starting from first and proceeding to last 1. + /// + /// Returns: function. + /// + /// Complexity: Applies function exactly 'last - first' times. + /// + /// Note: If function returns a result, the result is ignored. + /// + template + inline Function + for_each(InputIterator first, InputIterator last, Function function) + { + for(; first != last; ++first) + function(*first); + return function; + } + + /// for_each_n + /// + /// Calls the Function function for each value in the range [first, first + n). + /// Function takes a single parameter: the current value. + /// + /// Effects: Applies function to the result of dereferencing every iterator in + /// the range [first, first + n), starting from first and proceeding to last 1. + /// + /// Returns: first + n. + /// + /// Complexity: Applies function exactly 'first + n' times. + /// + /// Note: + //// * If function returns a result, the result is ignored. + //// * If n < 0, behaviour is undefined. + /// + template + EA_CPP14_CONSTEXPR inline InputIterator + for_each_n(InputIterator first, Size n, Function function) + { + for (Size i = 0; i < n; ++first, i++) + function(*first); + return first; + } + + + /// generate + /// + /// Iterates the range of [first, last) and assigns to each element the + /// result of the function generator. Generator is a function which takes + /// no arguments. + /// + /// Complexity: Exactly 'last - first' invocations of generator and assignments. + /// + template + inline void + generate(ForwardIterator first, ForwardIterator last, Generator generator) + { + for(; first != last; ++first) // We cannot call generate_n(first, last-first, generator) + *first = generator(); // because the 'last-first' might not be supported by the + } // given iterator. + + + /// generate_n + /// + /// Iterates an interator n times and assigns the result of generator + /// to each succeeding element. Generator is a function which takes + /// no arguments. + /// + /// Complexity: Exactly n invocations of generator and assignments. + /// + template + inline OutputIterator + generate_n(OutputIterator first, Size n, Generator generator) + { + for(; n > 0; --n, ++first) + *first = generator(); + return first; + } + + + /// transform + /// + /// Iterates the input range of [first, last) and the output iterator result + /// and assigns the result of unaryOperation(input) to result. + /// + /// Effects: Assigns through every iterator i in the range [result, result + (last1 - first1)) + /// a new corresponding value equal to unaryOperation(*(first1 + (i - result)). + /// + /// Requires: op shall not have any side effects. + /// + /// Returns: result + (last1 - first1). That is, returns the end of the output range. + /// + /// Complexity: Exactly 'last1 - first1' applications of unaryOperation. + /// + /// Note: result may be equal to first. + /// + template + inline OutputIterator + transform(InputIterator first, InputIterator last, OutputIterator result, UnaryOperation unaryOperation) + { + for(; first != last; ++first, ++result) + *result = unaryOperation(*first); + return result; + } + + + /// transform + /// + /// Iterates the input range of [first, last) and the output iterator result + /// and assigns the result of binaryOperation(input1, input2) to result. + /// + /// Effects: Assigns through every iterator i in the range [result, result + (last1 - first1)) + /// a new corresponding value equal to binaryOperation(*(first1 + (i - result), *(first2 + (i - result))). + /// + /// Requires: binaryOperation shall not have any side effects. + /// + /// Returns: result + (last1 - first1). That is, returns the end of the output range. + /// + /// Complexity: Exactly 'last1 - first1' applications of binaryOperation. + /// + /// Note: result may be equal to first1 or first2. + /// + template + inline OutputIterator + transform(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, OutputIterator result, BinaryOperation binaryOperation) + { + for(; first1 != last1; ++first1, ++first2, ++result) + *result = binaryOperation(*first1, *first2); + return result; + } + + + /// equal + /// + /// Returns: true if for every iterator i in the range [first1, last1) the + /// following corresponding conditions hold: predicate(*i, *(first2 + (i - first1))) != false. + /// Otherwise, returns false. + /// + /// Complexity: At most last1 first1 applications of the corresponding predicate. + /// + /// To consider: Make specializations of this for scalar types and random access + /// iterators that uses memcmp or some trick memory comparison function. + /// We should verify that such a thing results in an improvement. + /// + template + EA_CPP14_CONSTEXPR inline bool equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2) + { + for(; first1 != last1; ++first1, ++first2) + { + if(!(*first1 == *first2)) // Note that we always express value comparisons in terms of < or ==. + return false; + } + return true; + } + + /* Enable the following if there was shown to be some benefit. A glance and Microsoft VC++ memcmp + shows that it is not optimized in any way, much less one that would benefit us here. + + inline bool equal(const bool* first1, const bool* last1, const bool* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const char* first1, const char* last1, const char* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const unsigned char* first1, const unsigned char* last1, const unsigned char* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const signed char* first1, const signed char* last1, const signed char* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + #ifndef EA_WCHAR_T_NON_NATIVE + inline bool equal(const wchar_t* first1, const wchar_t* last1, const wchar_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + #endif + + inline bool equal(const int16_t* first1, const int16_t* last1, const int16_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const uint16_t* first1, const uint16_t* last1, const uint16_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const int32_t* first1, const int32_t* last1, const int32_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const uint32_t* first1, const uint32_t* last1, const uint32_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const int64_t* first1, const int64_t* last1, const int64_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + + inline bool equal(const uint64_t* first1, const uint64_t* last1, const uint64_t* first2) + { return (memcmp(first1, first2, (size_t)((uintptr_t)last1 - (uintptr_t)first1)) == 0); } + */ + + + + /// equal + /// + /// Returns: true if for every iterator i in the range [first1, last1) the + /// following corresponding conditions hold: pred(*i, *(first2 + (i first1))) != false. + /// Otherwise, returns false. + /// + /// Complexity: At most last1 first1 applications of the corresponding predicate. + /// + template + inline bool + equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate predicate) + { + for(; first1 != last1; ++first1, ++first2) + { + if(!predicate(*first1, *first2)) + return false; + } + return true; + } + + + + /// identical + /// + /// Returns true if the two input ranges are equivalent. + /// There is a subtle difference between this algorithm and + /// the 'equal' algorithm. The equal algorithm assumes the + /// two ranges are of equal length. This algorithm efficiently + /// compares two ranges for both length equality and for + /// element equality. There is no other standard algorithm + /// that can do this. + /// + /// Returns: true if the sequence of elements defined by the range + /// [first1, last1) is of the same length as the sequence of + /// elements defined by the range of [first2, last2) and if + /// the elements in these ranges are equal as per the + /// equal algorithm. + /// + /// Complexity: At most 'min((last1 - first1), (last2 - first2))' applications + /// of the corresponding comparison. + /// + template + bool identical(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2) + { + while((first1 != last1) && (first2 != last2) && (*first1 == *first2)) + { + ++first1; + ++first2; + } + return (first1 == last1) && (first2 == last2); + } + + + /// identical + /// + template + bool identical(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, BinaryPredicate predicate) + { + while((first1 != last1) && (first2 != last2) && predicate(*first1, *first2)) + { + ++first1; + ++first2; + } + return (first1 == last1) && (first2 == last2); + } + + + + /// lexicographical_compare + /// + /// Returns: true if the sequence of elements defined by the range + /// [first1, last1) is lexicographically less than the sequence of + /// elements defined by the range [first2, last2). Returns false otherwise. + /// + /// Complexity: At most 'min((last1 - first1), (last2 - first2))' applications + /// of the corresponding comparison. + /// + /// Note: If two sequences have the same number of elements and their + /// corresponding elements are equivalent, then neither sequence is + /// lexicographically less than the other. If one sequence is a prefix + /// of the other, then the shorter sequence is lexicographically less + /// than the longer sequence. Otherwise, the lexicographical comparison + /// of the sequences yields the same result as the comparison of the first + /// corresponding pair of elements that are not equivalent. + /// + template + inline bool + lexicographical_compare(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2) + { + for(; (first1 != last1) && (first2 != last2); ++first1, ++first2) + { + if(*first1 < *first2) + return true; + if(*first2 < *first1) + return false; + } + return (first1 == last1) && (first2 != last2); + } + + inline bool // Specialization for const char*. + lexicographical_compare(const char* first1, const char* last1, const char* first2, const char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for char*. + lexicographical_compare(char* first1, char* last1, char* first2, char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for const unsigned char*. + lexicographical_compare(const unsigned char* first1, const unsigned char* last1, const unsigned char* first2, const unsigned char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for unsigned char*. + lexicographical_compare(unsigned char* first1, unsigned char* last1, unsigned char* first2, unsigned char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for const signed char*. + lexicographical_compare(const signed char* first1, const signed char* last1, const signed char* first2, const signed char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + return result ? (result < 0) : (n1 < n2); + } + + inline bool // Specialization for signed char*. + lexicographical_compare(signed char* first1, signed char* last1, signed char* first2, signed char* last2) + { + const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + return result ? (result < 0) : (n1 < n2); + } + + #if defined(_MSC_VER) // If using the VC++ compiler (and thus bool is known to be a single byte)... + //Not sure if this is a good idea. + //inline bool // Specialization for const bool*. + //lexicographical_compare(const bool* first1, const bool* last1, const bool* first2, const bool* last2) + //{ + // const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + // const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + // return result ? (result < 0) : (n1 < n2); + //} + // + //inline bool // Specialization for bool*. + //lexicographical_compare(bool* first1, bool* last1, bool* first2, bool* last2) + //{ + // const ptrdiff_t n1(last1 - first1), n2(last2 - first2); + // const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2)); + // return result ? (result < 0) : (n1 < n2); + //} + #endif + + + + /// lexicographical_compare + /// + /// Returns: true if the sequence of elements defined by the range + /// [first1, last1) is lexicographically less than the sequence of + /// elements defined by the range [first2, last2). Returns false otherwise. + /// + /// Complexity: At most 'min((last1 -first1), (last2 - first2))' applications + /// of the corresponding comparison. + /// + /// Note: If two sequences have the same number of elements and their + /// corresponding elements are equivalent, then neither sequence is + /// lexicographically less than the other. If one sequence is a prefix + /// of the other, then the shorter sequence is lexicographically less + /// than the longer sequence. Otherwise, the lexicographical comparison + /// of the sequences yields the same result as the comparison of the first + /// corresponding pair of elements that are not equivalent. + /// + /// Note: False is always returned if range 1 is exhausted before range 2. + /// The result of this is that you can't do a successful reverse compare + /// (e.g. use greater<> as the comparison instead of less<>) unless the + /// two sequences are of identical length. What you want to do is reverse + /// the order of the arguments in order to get the desired effect. + /// + template + inline bool + lexicographical_compare(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, Compare compare) + { + for(; (first1 != last1) && (first2 != last2); ++first1, ++first2) + { + if(compare(*first1, *first2)) + return true; + if(compare(*first2, *first1)) + return false; + } + return (first1 == last1) && (first2 != last2); + } + + + /// mismatch + /// + /// Finds the first position where the two ranges [first1, last1) and + /// [first2, first2 + (last1 - first1)) differ. The two versions of + /// mismatch use different tests for whether elements differ. + /// + /// Returns: A pair of iterators i and j such that j == first2 + (i - first1) + /// and i is the first iterator in the range [first1, last1) for which the + /// following corresponding condition holds: !(*i == *(first2 + (i - first1))). + /// Returns the pair last1 and first2 + (last1 - first1) if such an iterator + /// i is not found. + /// + /// Complexity: At most last1 first1 applications of the corresponding predicate. + /// + template + inline eastl::pair + mismatch(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2) // , InputIterator2 last2) + { + while((first1 != last1) && (*first1 == *first2)) // && (first2 != last2) <- C++ standard mismatch function doesn't check first2/last2. + { + ++first1; + ++first2; + } + + return eastl::pair(first1, first2); + } + + + /// mismatch + /// + /// Finds the first position where the two ranges [first1, last1) and + /// [first2, first2 + (last1 - first1)) differ. The two versions of + /// mismatch use different tests for whether elements differ. + /// + /// Returns: A pair of iterators i and j such that j == first2 + (i - first1) + /// and i is the first iterator in the range [first1, last1) for which the + /// following corresponding condition holds: pred(*i, *(first2 + (i - first1))) == false. + /// Returns the pair last1 and first2 + (last1 - first1) if such an iterator + /// i is not found. + /// + /// Complexity: At most last1 first1 applications of the corresponding predicate. + /// + template + inline eastl::pair + mismatch(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, // InputIterator2 last2, + BinaryPredicate predicate) + { + while((first1 != last1) && predicate(*first1, *first2)) // && (first2 != last2) <- C++ standard mismatch function doesn't check first2/last2. + { + ++first1; + ++first2; + } + + return eastl::pair(first1, first2); + } + + + /// lower_bound + /// + /// Finds the position of the first element in a sorted range that has a value + /// greater than or equivalent to a specified value. + /// + /// Effects: Finds the first position into which value can be inserted without + /// violating the ordering. + /// + /// Returns: The furthermost iterator i in the range [first, last) such that + /// for any iterator j in the range [first, i) the following corresponding + /// condition holds: *j < value. + /// + /// Complexity: At most 'log(last - first) + 1' comparisons. + /// + /// Optimizations: We have no need to specialize this implementation for random + /// access iterators (e.g. contiguous array), as the code below will already + /// take advantage of them. + /// + template + ForwardIterator + lower_bound(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType d = eastl::distance(first, last); // This will be efficient for a random access iterator such as an array. + + while(d > 0) + { + ForwardIterator i = first; + DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, d2); // This will be efficient for a random access iterator such as an array. + + if(*i < value) + { + // Disabled because std::lower_bound doesn't specify (23.3.3.3, p3) this can be done: EASTL_VALIDATE_COMPARE(!(value < *i)); // Validate that the compare function is sane. + first = ++i; + d -= d2 + 1; + } + else + d = d2; + } + return first; + } + + + /// lower_bound + /// + /// Finds the position of the first element in a sorted range that has a value + /// greater than or equivalent to a specified value. The input Compare function + /// takes two arguments and returns true if the first argument is less than + /// the second argument. + /// + /// Effects: Finds the first position into which value can be inserted without + /// violating the ordering. + /// + /// Returns: The furthermost iterator i in the range [first, last) such that + /// for any iterator j in the range [first, i) the following corresponding + /// condition holds: compare(*j, value) != false. + /// + /// Complexity: At most 'log(last - first) + 1' comparisons. + /// + /// Optimizations: We have no need to specialize this implementation for random + /// access iterators (e.g. contiguous array), as the code below will already + /// take advantage of them. + /// + template + ForwardIterator + lower_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType d = eastl::distance(first, last); // This will be efficient for a random access iterator such as an array. + + while(d > 0) + { + ForwardIterator i = first; + DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, d2); // This will be efficient for a random access iterator such as an array. + + if(compare(*i, value)) + { + // Disabled because std::lower_bound doesn't specify (23.3.3.1, p3) this can be done: EASTL_VALIDATE_COMPARE(!compare(value, *i)); // Validate that the compare function is sane. + first = ++i; + d -= d2 + 1; + } + else + d = d2; + } + return first; + } + + + + /// upper_bound + /// + /// Finds the position of the first element in a sorted range that has a + /// value that is greater than a specified value. + /// + /// Effects: Finds the furthermost position into which value can be inserted + /// without violating the ordering. + /// + /// Returns: The furthermost iterator i in the range [first, last) such that + /// for any iterator j in the range [first, i) the following corresponding + /// condition holds: !(value < *j). + /// + /// Complexity: At most 'log(last - first) + 1' comparisons. + /// + template + ForwardIterator + upper_bound(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType len = eastl::distance(first, last); + + while(len > 0) + { + ForwardIterator i = first; + DifferenceType len2 = len >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, len2); + + if(!(value < *i)) // Note that we always express value comparisons in terms of < or ==. + { + first = ++i; + len -= len2 + 1; + } + else + { + // Disabled because std::upper_bound doesn't specify (23.3.3.2, p3) this can be done: EASTL_VALIDATE_COMPARE(!(*i < value)); // Validate that the compare function is sane. + len = len2; + } + } + return first; + } + + + /// upper_bound + /// + /// Finds the position of the first element in a sorted range that has a + /// value that is greater than a specified value. The input Compare function + /// takes two arguments and returns true if the first argument is less than + /// the second argument. + /// + /// Effects: Finds the furthermost position into which value can be inserted + /// without violating the ordering. + /// + /// Returns: The furthermost iterator i in the range [first, last) such that + /// for any iterator j in the range [first, i) the following corresponding + /// condition holds: compare(value, *j) == false. + /// + /// Complexity: At most 'log(last - first) + 1' comparisons. + /// + template + ForwardIterator + upper_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType len = eastl::distance(first, last); + + while(len > 0) + { + ForwardIterator i = first; + DifferenceType len2 = len >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, len2); + + if(!compare(value, *i)) + { + first = ++i; + len -= len2 + 1; + } + else + { + // Disabled because std::upper_bound doesn't specify (23.3.3.2, p3) this can be done: EASTL_VALIDATE_COMPARE(!compare(*i, value)); // Validate that the compare function is sane. + len = len2; + } + } + return first; + } + + + /// equal_range + /// + /// Effects: Finds the largest subrange [i, j) such that the value can be inserted + /// at any iterator k in it without violating the ordering. k satisfies the + /// corresponding conditions: !(*k < value) && !(value < *k). + /// + /// Complexity: At most '2 * log(last - first) + 1' comparisons. + /// + template + pair + equal_range(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef pair ResultType; + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType d = eastl::distance(first, last); + + while(d > 0) + { + ForwardIterator i(first); + DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, d2); + + if(*i < value) + { + EASTL_VALIDATE_COMPARE(!(value < *i)); // Validate that the compare function is sane. + first = ++i; + d -= d2 + 1; + } + else if(value < *i) + { + EASTL_VALIDATE_COMPARE(!(*i < value)); // Validate that the compare function is sane. + d = d2; + last = i; + } + else + { + ForwardIterator j(i); + + return ResultType(eastl::lower_bound(first, i, value), + eastl::upper_bound(++j, last, value)); + } + } + return ResultType(first, first); + } + + + /// equal_range + /// + /// Effects: Finds the largest subrange [i, j) such that the value can be inserted + /// at any iterator k in it without violating the ordering. k satisfies the + /// corresponding conditions: compare(*k, value) == false && compare(value, *k) == false. + /// + /// Complexity: At most '2 * log(last - first) + 1' comparisons. + /// + template + pair + equal_range(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + typedef pair ResultType; + typedef typename eastl::iterator_traits::difference_type DifferenceType; + + DifferenceType d = eastl::distance(first, last); + + while(d > 0) + { + ForwardIterator i(first); + DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure. + + eastl::advance(i, d2); + + if(compare(*i, value)) + { + EASTL_VALIDATE_COMPARE(!compare(value, *i)); // Validate that the compare function is sane. + first = ++i; + d -= d2 + 1; + } + else if(compare(value, *i)) + { + EASTL_VALIDATE_COMPARE(!compare(*i, value)); // Validate that the compare function is sane. + d = d2; + last = i; + } + else + { + ForwardIterator j(i); + + return ResultType(eastl::lower_bound(first, i, value, compare), + eastl::upper_bound(++j, last, value, compare)); + } + } + return ResultType(first, first); + } + + + /// replace + /// + /// Effects: Substitutes elements referred by the iterator i in the range [first, last) + /// with new_value, when the following corresponding conditions hold: *i == old_value. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of replace is replace_if and not another variation of replace. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + template + inline void + replace(ForwardIterator first, ForwardIterator last, const T& old_value, const T& new_value) + { + for(; first != last; ++first) + { + if(*first == old_value) + *first = new_value; + } + } + + + /// replace_if + /// + /// Effects: Substitutes elements referred by the iterator i in the range [first, last) + /// with new_value, when the following corresponding conditions hold: predicate(*i) != false. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of replace_if is replace and not another variation of replace_if. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + template + inline void + replace_if(ForwardIterator first, ForwardIterator last, Predicate predicate, const T& new_value) + { + for(; first != last; ++first) + { + if(predicate(*first)) + *first = new_value; + } + } + + + /// remove_copy + /// + /// Effects: Copies all the elements referred to by the iterator i in the range + /// [first, last) for which the following corresponding condition does not hold: + /// *i == value. + /// + /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + template + inline OutputIterator + remove_copy(InputIterator first, InputIterator last, OutputIterator result, const T& value) + { + for(; first != last; ++first) + { + if(!(*first == value)) // Note that we always express value comparisons in terms of < or ==. + { + *result = move(*first); + ++result; + } + } + return result; + } + + + /// remove_copy_if + /// + /// Effects: Copies all the elements referred to by the iterator i in the range + /// [first, last) for which the following corresponding condition does not hold: + /// predicate(*i) != false. + /// + /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + template + inline OutputIterator + remove_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate) + { + for(; first != last; ++first) + { + if(!predicate(*first)) + { + *result = eastl::move(*first); + ++result; + } + } + return result; + } + + + /// remove + /// + /// Effects: Eliminates all the elements referred to by iterator i in the + /// range [first, last) for which the following corresponding condition + /// holds: *i == value. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of remove is remove_if and not another variation of remove. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + /// Note: Since this function moves the element to the back of the heap and + /// doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. + /// + /// Example usage: + /// vector intArray; + /// ... + /// intArray.erase(remove(intArray.begin(), intArray.end(), 4), intArray.end()); // Erase all elements of value 4. + /// + template + inline ForwardIterator + remove(ForwardIterator first, ForwardIterator last, const T& value) + { + first = eastl::find(first, last, value); + if(first != last) + { + ForwardIterator i(first); + return eastl::remove_copy(++i, last, first, value); + } + return first; + } + + + /// remove_if + /// + /// Effects: Eliminates all the elements referred to by iterator i in the + /// range [first, last) for which the following corresponding condition + /// holds: predicate(*i) != false. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of remove_if is remove and not another variation of remove_if. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + /// Note: Since this function moves the element to the back of the heap and + /// doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. + /// + /// Example usage: + /// vector intArray; + /// ... + /// intArray.erase(remove(intArray.begin(), intArray.end(), bind2nd(less(), (int)3)), intArray.end()); // Erase all elements less than 3. + /// + template + inline ForwardIterator + remove_if(ForwardIterator first, ForwardIterator last, Predicate predicate) + { + first = eastl::find_if(first, last, predicate); + if(first != last) + { + ForwardIterator i(first); + return eastl::remove_copy_if(++i, last, first, predicate); + } + return first; + } + + + /// replace_copy + /// + /// Effects: Assigns to every iterator i in the range [result, result + (last - first)) + /// either new_value or *(first + (i - result)) depending on whether the following + /// corresponding conditions hold: *(first + (i - result)) == old_value. + /// + /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap. + /// + /// Returns: result + (last - first). + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of replace_copy is replace_copy_if and not another variation of replace_copy. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + template + inline OutputIterator + replace_copy(InputIterator first, InputIterator last, OutputIterator result, const T& old_value, const T& new_value) + { + for(; first != last; ++first, ++result) + *result = (*first == old_value) ? new_value : *first; + return result; + } + + + /// replace_copy_if + /// + /// Effects: Assigns to every iterator i in the range [result, result + (last - first)) + /// either new_value or *(first + (i - result)) depending on whether the following + /// corresponding conditions hold: predicate(*(first + (i - result))) != false. + /// + /// Requires: The ranges [first, last) and [result, result+(lastfirst)) shall not overlap. + /// + /// Returns: result + (last - first). + /// + /// Complexity: Exactly 'last - first' applications of the corresponding predicate. + /// + /// Note: The predicate version of replace_copy_if is replace_copy and not another variation of replace_copy_if. + /// This is because both versions would have the same parameter count and there could be ambiguity. + /// + template + inline OutputIterator + replace_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate, const T& new_value) + { + for(; first != last; ++first, ++result) + *result = predicate(*first) ? new_value : *first; + return result; + } + + + + + // reverse + // + // We provide helper functions which allow reverse to be implemented more + // efficiently for some types of iterators and types. + // + template + inline void reverse_impl(BidirectionalIterator first, BidirectionalIterator last, EASTL_ITC_NS::bidirectional_iterator_tag) + { + for(; (first != last) && (first != --last); ++first) // We are not allowed to use operator <, <=, >, >= with a + eastl::iter_swap(first, last); // generic (bidirectional or otherwise) iterator. + } + + template + inline void reverse_impl(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag) + { + if(first != last) + { + for(; first < --last; ++first) // With a random access iterator, we can use operator < to more efficiently implement + eastl::iter_swap(first, last); // this algorithm. A generic iterator doesn't necessarily have an operator < defined. + } + } + + /// reverse + /// + /// Reverses the values within the range [first, last). + /// + /// Effects: For each nonnegative integer i <= (last - first) / 2, + /// applies swap to all pairs of iterators first + i, (last i) - 1. + /// + /// Complexity: Exactly '(last - first) / 2' swaps. + /// + template + inline void reverse(BidirectionalIterator first, BidirectionalIterator last) + { + typedef typename eastl::iterator_traits::iterator_category IC; + eastl::reverse_impl(first, last, IC()); + } + + + + /// reverse_copy + /// + /// Copies the range [first, last) in reverse order to the result. + /// + /// Effects: Copies the range [first, last) to the range + /// [result, result + (last - first)) such that for any nonnegative + /// integer i < (last - first) the following assignment takes place: + /// *(result + (last - first) - i) = *(first + i) + /// + /// Requires: The ranges [first, last) and [result, result + (last - first)) + /// shall not overlap. + /// + /// Returns: result + (last - first). That is, returns the end of the output range. + /// + /// Complexity: Exactly 'last - first' assignments. + /// + template + inline OutputIterator + reverse_copy(BidirectionalIterator first, BidirectionalIterator last, OutputIterator result) + { + for(; first != last; ++result) + *result = *--last; + return result; + } + + + + /// search + /// + /// Search finds a subsequence within the range [first1, last1) that is identical to [first2, last2) + /// when compared element-by-element. It returns an iterator pointing to the beginning of that + /// subsequence, or else last1 if no such subsequence exists. As such, it is very much like + /// the C strstr function, with the primary difference being that strstr uses 0-terminated strings + /// whereas search uses an end iterator to specify the end of a string. + /// + /// Returns: The first iterator i in the range [first1, last1 - (last2 - first2)) such that for + /// any nonnegative integer n less than 'last2 - first2' the following corresponding condition holds: + /// *(i + n) == *(first2 + n). Returns last1 if no such iterator is found. + /// + /// Complexity: At most (last1 first1) * (last2 first2) applications of the corresponding predicate. + /// + template + ForwardIterator1 + search(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + if(first2 != last2) // If there is anything to search for... + { + // We need to make a special case for a pattern of one element, + // as the logic below prevents one element patterns from working. + ForwardIterator2 temp2(first2); + ++temp2; + + if(temp2 != last2) // If what we are searching for has a length > 1... + { + ForwardIterator1 cur1(first1); + ForwardIterator2 p2; + + while(first1 != last1) + { + // The following loop is the equivalent of eastl::find(first1, last1, *first2) + while((first1 != last1) && !(*first1 == *first2)) + ++first1; + + if(first1 != last1) + { + p2 = temp2; + cur1 = first1; + + if(++cur1 != last1) + { + while(*cur1 == *p2) + { + if(++p2 == last2) + return first1; + + if(++cur1 == last1) + return last1; + } + + ++first1; + continue; + } + } + return last1; + } + + // Fall through to the end. + } + else + return eastl::find(first1, last1, *first2); + } + + return first1; + + + #if 0 + /* Another implementation which is a little more simpler but executes a little slower on average. + typedef typename eastl::iterator_traits::difference_type difference_type_1; + typedef typename eastl::iterator_traits::difference_type difference_type_2; + + const difference_type_2 d2 = eastl::distance(first2, last2); + + for(difference_type_1 d1 = eastl::distance(first1, last1); d1 >= d2; ++first1, --d1) + { + ForwardIterator1 temp1 = first1; + + for(ForwardIterator2 temp2 = first2; ; ++temp1, ++temp2) + { + if(temp2 == last2) + return first1; + if(!(*temp1 == *temp2)) + break; + } + } + + return last1; + */ + #endif + } + + + /// search + /// + /// Search finds a subsequence within the range [first1, last1) that is identical to [first2, last2) + /// when compared element-by-element. It returns an iterator pointing to the beginning of that + /// subsequence, or else last1 if no such subsequence exists. As such, it is very much like + /// the C strstr function, with the only difference being that strstr uses 0-terminated strings + /// whereas search uses an end iterator to specify the end of a string. + /// + /// Returns: The first iterator i in the range [first1, last1 - (last2 - first2)) such that for + /// any nonnegative integer n less than 'last2 - first2' the following corresponding condition holds: + /// predicate(*(i + n), *(first2 + n)) != false. Returns last1 if no such iterator is found. + /// + /// Complexity: At most (last1 first1) * (last2 first2) applications of the corresponding predicate. + /// + template + ForwardIterator1 + search(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::difference_type difference_type_1; + typedef typename eastl::iterator_traits::difference_type difference_type_2; + + difference_type_2 d2 = eastl::distance(first2, last2); + + if(d2 != 0) + { + ForwardIterator1 i(first1); + eastl::advance(i, d2); + + for(difference_type_1 d1 = eastl::distance(first1, last1); d1 >= d2; --d1) + { + if(eastl::equal(first1, i, first2, predicate)) + return first1; + if(d1 > d2) // To do: Find a way to make the algorithm more elegant. + { + ++first1; + ++i; + } + } + return last1; + } + return first1; // Just like with strstr, we return first1 if the match string is empty. + } + + + + // search_n helper functions + // + template + ForwardIterator // Generic implementation. + search_n_impl(ForwardIterator first, ForwardIterator last, Size count, const T& value, EASTL_ITC_NS::forward_iterator_tag) + { + if(count <= 0) + return first; + + Size d1 = (Size)eastl::distance(first, last); // Should d1 be of type Size, ptrdiff_t, or iterator_traits::difference_type? + // The problem with using iterator_traits::difference_type is that + if(count > d1) // ForwardIterator may not be a true iterator but instead something like a pointer. + return last; + + for(; d1 >= count; ++first, --d1) + { + ForwardIterator i(first); + + for(Size n = 0; n < count; ++n, ++i, --d1) + { + if(!(*i == value)) // Note that we always express value comparisons in terms of < or ==. + goto not_found; + } + return first; + + not_found: + first = i; + } + return last; + } + + template inline + RandomAccessIterator // Random access iterator implementation. Much faster than generic implementation. + search_n_impl(RandomAccessIterator first, RandomAccessIterator last, Size count, const T& value, EASTL_ITC_NS::random_access_iterator_tag) + { + if(count <= 0) + return first; + else if(count == 1) + return find(first, last, value); + else if(last > first) + { + RandomAccessIterator lookAhead; + RandomAccessIterator backTrack; + + Size skipOffset = (count - 1); + Size tailSize = (Size)(last - first); + Size remainder; + Size prevRemainder; + + for(lookAhead = first + skipOffset; tailSize >= count; lookAhead += count) + { + tailSize -= count; + + if(*lookAhead == value) + { + remainder = skipOffset; + + for(backTrack = lookAhead - 1; *backTrack == value; --backTrack) + { + if(--remainder == 0) + return (lookAhead - skipOffset); // success + } + + if(remainder <= tailSize) + { + prevRemainder = remainder; + + while(*(++lookAhead) == value) + { + if(--remainder == 0) + return (backTrack + 1); // success + } + tailSize -= (prevRemainder - remainder); + } + else + return last; // failure + } + + // lookAhead here is always pointing to the element of the last mismatch. + } + } + + return last; // failure + } + + + /// search_n + /// + /// Returns: The first iterator i in the range [first, last count) such that + /// for any nonnegative integer n less than count the following corresponding + /// conditions hold: *(i + n) == value, pred(*(i + n),value) != false. + /// Returns last if no such iterator is found. + /// + /// Complexity: At most '(last1 - first1) * count' applications of the corresponding predicate. + /// + template + ForwardIterator + search_n(ForwardIterator first, ForwardIterator last, Size count, const T& value) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return eastl::search_n_impl(first, last, count, value, IC()); + } + + + /// binary_search + /// + /// Returns: true if there is an iterator i in the range [first last) that + /// satisfies the corresponding conditions: !(*i < value) && !(value < *i). + /// + /// Complexity: At most 'log(last - first) + 2' comparisons. + /// + /// Note: The reason binary_search returns bool instead of an iterator is + /// that search_n, lower_bound, or equal_range already return an iterator. + /// However, there are arguments that binary_search should return an iterator. + /// Note that we provide binary_search_i (STL extension) to return an iterator. + /// + /// To use search_n to find an item, do this: + /// iterator i = search_n(begin, end, 1, value); + /// To use lower_bound to find an item, do this: + /// iterator i = lower_bound(begin, end, value); + /// if((i != last) && !(value < *i)) + /// + /// It turns out that the above lower_bound method is as fast as binary_search + /// would be if it returned an iterator. + /// + template + inline bool + binary_search(ForwardIterator first, ForwardIterator last, const T& value) + { + // To do: This can be made slightly faster by not using lower_bound. + ForwardIterator i(eastl::lower_bound(first, last, value)); + return ((i != last) && !(value < *i)); // Note that we always express value comparisons in terms of < or ==. + } + + + /// binary_search + /// + /// Returns: true if there is an iterator i in the range [first last) that + /// satisfies the corresponding conditions: compare(*i, value) == false && + /// compare(value, *i) == false. + /// + /// Complexity: At most 'log(last - first) + 2' comparisons. + /// + /// Note: See comments above regarding the bool return value of binary_search. + /// + template + inline bool + binary_search(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + // To do: This can be made slightly faster by not using lower_bound. + ForwardIterator i(eastl::lower_bound(first, last, value, compare)); + return ((i != last) && !compare(value, *i)); + } + + + /// binary_search_i + /// + /// Returns: iterator if there is an iterator i in the range [first last) that + /// satisfies the corresponding conditions: !(*i < value) && !(value < *i). + /// Returns last if the value is not found. + /// + /// Complexity: At most 'log(last - first) + 2' comparisons. + /// + template + inline ForwardIterator + binary_search_i(ForwardIterator first, ForwardIterator last, const T& value) + { + // To do: This can be made slightly faster by not using lower_bound. + ForwardIterator i(eastl::lower_bound(first, last, value)); + if((i != last) && !(value < *i)) // Note that we always express value comparisons in terms of < or ==. + return i; + return last; + } + + + /// binary_search_i + /// + /// Returns: iterator if there is an iterator i in the range [first last) that + /// satisfies the corresponding conditions: !(*i < value) && !(value < *i). + /// Returns last if the value is not found. + /// + /// Complexity: At most 'log(last - first) + 2' comparisons. + /// + template + inline ForwardIterator + binary_search_i(ForwardIterator first, ForwardIterator last, const T& value, Compare compare) + { + // To do: This can be made slightly faster by not using lower_bound. + ForwardIterator i(eastl::lower_bound(first, last, value, compare)); + if((i != last) && !compare(value, *i)) + return i; + return last; + } + + + /// unique + /// + /// Given a sorted range, this function removes duplicated items. + /// Note that if you have a container then you will probably want + /// to call erase on the container with the return value if your + /// goal is to remove the duplicated items from the container. + /// + /// Effects: Eliminates all but the first element from every consecutive + /// group of equal elements referred to by the iterator i in the range + /// [first, last) for which the following corresponding condition holds: + /// *i == *(i - 1). + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: If the range (last - first) is not empty, exactly (last - first) + /// applications of the corresponding predicate, otherwise no applications of the predicate. + /// + /// Example usage: + /// vector intArray; + /// ... + /// intArray.erase(unique(intArray.begin(), intArray.end()), intArray.end()); + /// + template + ForwardIterator unique(ForwardIterator first, ForwardIterator last) + { + first = eastl::adjacent_find(first, last); + + if(first != last) // We expect that there are duplicated items, else the user wouldn't be calling this function. + { + ForwardIterator dest(first); + + for(++first; first != last; ++first) + { + if(!(*dest == *first)) // Note that we always express value comparisons in terms of < or ==. + *++dest = *first; + } + return ++dest; + } + return last; + } + + + /// unique + /// + /// Given a sorted range, this function removes duplicated items. + /// Note that if you have a container then you will probably want + /// to call erase on the container with the return value if your + /// goal is to remove the duplicated items from the container. + /// + /// Effects: Eliminates all but the first element from every consecutive + /// group of equal elements referred to by the iterator i in the range + /// [first, last) for which the following corresponding condition holds: + /// predicate(*i, *(i - 1)) != false. + /// + /// Returns: The end of the resulting range. + /// + /// Complexity: If the range (last - first) is not empty, exactly (last - first) + /// applications of the corresponding predicate, otherwise no applications of the predicate. + /// + template + ForwardIterator unique(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate) + { + first = eastl::adjacent_find(first, last, predicate); + + if(first != last) // We expect that there are duplicated items, else the user wouldn't be calling this function. + { + ForwardIterator dest(first); + + for(++first; first != last; ++first) + { + if(!predicate(*dest, *first)) + *++dest = *first; + } + return ++dest; + } + return last; + } + + + + // find_end + // + // We provide two versions here, one for a bidirectional iterators and one for + // regular forward iterators. Given that we are searching backward, it's a bit + // more efficient if we can use backwards iteration to implement our search, + // though this requires an iterator that can be reversed. + // + template + ForwardIterator1 + find_end_impl(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + EASTL_ITC_NS::forward_iterator_tag, EASTL_ITC_NS::forward_iterator_tag) + { + if(first2 != last2) // We have to do this check because the search algorithm below will return first1 (and not last1) if the first2/last2 range is empty. + { + for(ForwardIterator1 result(last1); ; ) + { + const ForwardIterator1 resultNext(eastl::search(first1, last1, first2, last2)); + + if(resultNext != last1) // If another sequence was found... + { + first1 = result = resultNext; + ++first1; + } + else + return result; + } + } + return last1; + } + + template + BidirectionalIterator1 + find_end_impl(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + BidirectionalIterator2 first2, BidirectionalIterator2 last2, + EASTL_ITC_NS::bidirectional_iterator_tag, EASTL_ITC_NS::bidirectional_iterator_tag) + { + typedef eastl::reverse_iterator reverse_iterator1; + typedef eastl::reverse_iterator reverse_iterator2; + + reverse_iterator1 rresult(eastl::search(reverse_iterator1(last1), reverse_iterator1(first1), + reverse_iterator2(last2), reverse_iterator2(first2))); + if(rresult.base() != first1) // If we found something... + { + BidirectionalIterator1 result(rresult.base()); + + eastl::advance(result, -eastl::distance(first2, last2)); // We have an opportunity to optimize this, as the + return result; // search function already calculates this distance. + } + return last1; + } + + /// find_end + /// + /// Finds the last occurrence of the second sequence in the first sequence. + /// As such, this function is much like the C string function strrstr and it + /// is also the same as a reversed version of 'search'. It is called find_end + /// instead of the possibly more consistent search_end simply because the C++ + /// standard algorithms have such naming. + /// + /// Returns an iterator between first1 and last1 if the sequence is found. + /// returns last1 (the end of the first seqence) if the sequence is not found. + /// + template + inline ForwardIterator1 + find_end(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2) + { + typedef typename eastl::iterator_traits::iterator_category IC1; + typedef typename eastl::iterator_traits::iterator_category IC2; + + return eastl::find_end_impl(first1, last1, first2, last2, IC1(), IC2()); + } + + + + + // To consider: Fold the predicate and non-predicate versions of + // this algorithm into a single function. + template + ForwardIterator1 + find_end_impl(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate, + EASTL_ITC_NS::forward_iterator_tag, EASTL_ITC_NS::forward_iterator_tag) + { + if(first2 != last2) // We have to do this check because the search algorithm below will return first1 (and not last1) if the first2/last2 range is empty. + { + for(ForwardIterator1 result = last1; ; ) + { + const ForwardIterator1 resultNext(eastl::search(first1, last1, first2, last2, predicate)); + + if(resultNext != last1) // If another sequence was found... + { + first1 = result = resultNext; + ++first1; + } + else + return result; + } + } + return last1; + } + + template + BidirectionalIterator1 + find_end_impl(BidirectionalIterator1 first1, BidirectionalIterator1 last1, + BidirectionalIterator2 first2, BidirectionalIterator2 last2, + BinaryPredicate predicate, + EASTL_ITC_NS::bidirectional_iterator_tag, EASTL_ITC_NS::bidirectional_iterator_tag) + { + typedef eastl::reverse_iterator reverse_iterator1; + typedef eastl::reverse_iterator reverse_iterator2; + + reverse_iterator1 rresult(eastl::search + (reverse_iterator1(last1), reverse_iterator1(first1), + reverse_iterator2(last2), reverse_iterator2(first2), + predicate)); + if(rresult.base() != first1) // If we found something... + { + BidirectionalIterator1 result(rresult.base()); + eastl::advance(result, -eastl::distance(first2, last2)); + return result; + } + return last1; + } + + + /// find_end + /// + /// Effects: Finds a subsequence of equal values in a sequence. + /// + /// Returns: The last iterator i in the range [first1, last1 - (last2 - first2)) + /// such that for any nonnegative integer n < (last2 - first2), the following + /// corresponding conditions hold: pred(*(i+n),*(first2+n)) != false. Returns + /// last1 if no such iterator is found. + /// + /// Complexity: At most (last2 - first2) * (last1 - first1 - (last2 - first2) + 1) + /// applications of the corresponding predicate. + /// + template + inline ForwardIterator1 + find_end(ForwardIterator1 first1, ForwardIterator1 last1, + ForwardIterator2 first2, ForwardIterator2 last2, + BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::iterator_category IC1; + typedef typename eastl::iterator_traits::iterator_category IC2; + + return eastl::find_end_impl + (first1, last1, first2, last2, predicate, IC1(), IC2()); + } + + + + /// set_difference + /// + /// set_difference iterates over both input ranges and copies elements present + /// in the first range but not the second to the output range. + /// + /// Effects: Copies the elements of the range [first1, last1) which are not + /// present in the range [first2, last2) to the range beginning at result. + /// The elements in the constructed range are sorted. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The output range shall not overlap with either of the original ranges. + /// + /// Returns: The end of the output range. + /// + /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + template + OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result) + { + while((first1 != last1) && (first2 != last2)) + { + if(*first1 < *first2) + { + *result = *first1; + ++first1; + ++result; + } + else if(*first2 < *first1) + ++first2; + else + { + ++first1; + ++first2; + } + } + + return eastl::copy(first1, last1, result); + } + + + template + OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result, Compare compare) + { + while((first1 != last1) && (first2 != last2)) + { + if(compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + *result = *first1; + ++first1; + ++result; + } + else if(compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + ++first2; + } + else + { + ++first1; + ++first2; + } + } + + return eastl::copy(first1, last1, result); + } + + + + /// set_symmetric_difference + /// + /// set_difference iterates over both input ranges and copies elements present + /// in the either range but not the other to the output range. + /// + /// Effects: Copies the elements of the range [first1, last1) which are not + /// present in the range [first2, last2), and the elements of the range [first2, last2) + /// which are not present in the range [first1, last1) to the range beginning at result. + /// The elements in the constructed range are sorted. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The resulting range shall not overlap with either of the original ranges. + /// + /// Returns: The end of the constructed range. + /// + /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + template + OutputIterator set_symmetric_difference(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result) + { + while((first1 != last1) && (first2 != last2)) + { + if(*first1 < *first2) + { + *result = *first1; + ++first1; + ++result; + } + else if(*first2 < *first1) + { + *result = *first2; + ++first2; + ++result; + } + else + { + ++first1; + ++first2; + } + } + + return eastl::copy(first2, last2, eastl::copy(first1, last1, result)); + } + + + template + OutputIterator set_symmetric_difference(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result, Compare compare) + { + while((first1 != last1) && (first2 != last2)) + { + if(compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + *result = *first1; + ++first1; + ++result; + } + else if(compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + *result = *first2; + ++first2; + ++result; + } + else + { + ++first1; + ++first2; + } + } + + return eastl::copy(first2, last2, eastl::copy(first1, last1, result)); + } + + + + + /// set_intersection + /// + /// set_intersection over both ranges and copies elements present in + /// both ranges to the output range. + /// + /// Effects: Constructs a sorted intersection of the elements from the + /// two ranges; that is, the set of elements that are present in both of the ranges. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The resulting range shall not overlap with either of the original ranges. + /// + /// Returns: The end of the constructed range. + /// + /// Complexity: At most 2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + /// Note: The copying operation is stable; if an element is present in both ranges, + /// the one from the first range is copied. + /// + template + OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result) + { + while((first1 != last1) && (first2 != last2)) + { + if(*first1 < *first2) + ++first1; + else if(*first2 < *first1) + ++first2; + else + { + *result = *first1; + ++first1; + ++first2; + ++result; + } + } + + return result; + } + + + template + OutputIterator set_intersection(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result, Compare compare) + { + while((first1 != last1) && (first2 != last2)) + { + if(compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + ++first1; + } + else if(compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + ++first2; + } + else + { + *result = *first1; + ++first1; + ++first2; + ++result; + } + } + + return result; + } + + + + /// set_union + /// + /// set_union iterators over both ranges and copies elements present in + /// both ranges to the output range. + /// + /// Effects: Constructs a sorted union of the elements from the two ranges; + /// that is, the set of elements that are present in one or both of the ranges. + /// + /// Requires: The input ranges must be sorted. + /// Requires: The resulting range shall not overlap with either of the original ranges. + /// + /// Returns: The end of the constructed range. + /// + /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons. + /// + /// Note: The copying operation is stable; if an element is present in both ranges, + /// the one from the first range is copied. + /// + template + OutputIterator set_union(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result) + { + while((first1 != last1) && (first2 != last2)) + { + if(*first1 < *first2) + { + *result = *first1; + ++first1; + } + else if(*first2 < *first1) + { + *result = *first2; + ++first2; + } + else + { + *result = *first1; + ++first1; + ++first2; + } + ++result; + } + + return eastl::copy(first2, last2, eastl::copy(first1, last1, result)); + } + + + template + OutputIterator set_union(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + OutputIterator result, Compare compare) + { + while((first1 != last1) && (first2 != last2)) + { + if(compare(*first1, *first2)) + { + EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane. + *result = *first1; + ++first1; + } + else if(compare(*first2, *first1)) + { + EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane. + *result = *first2; + ++first2; + } + else + { + *result = *first1; + ++first1; + ++first2; + } + ++result; + } + + return eastl::copy(first2, last2, eastl::copy(first1, last1, result)); + } + + + /// is_permutation + /// + template + bool is_permutation(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + // Skip past any equivalent initial elements. + while((first1 != last1) && (*first1 == *first2)) + { + ++first1; + ++first2; + } + + if(first1 != last1) + { + const difference_type first1Size = eastl::distance(first1, last1); + ForwardIterator2 last2 = first2; + eastl::advance(last2, first1Size); + + for(ForwardIterator1 i = first1; i != last1; ++i) + { + if(i == eastl::find(first1, i, *i)) + { + const difference_type c = eastl::count(first2, last2, *i); + + if((c == 0) || (c != eastl::count(i, last1, *i))) + return false; + } + } + } + + return true; + } + + /// is_permutation + /// + template + bool is_permutation(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, BinaryPredicate predicate) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + // Skip past any equivalent initial elements. + while((first1 != last1) && predicate(*first1, *first2)) + { + ++first1; + ++first2; + } + + if(first1 != last1) + { + const difference_type first1Size = eastl::distance(first1, last1); + ForwardIterator2 last2 = first2; + eastl::advance(last2, first1Size); + + for(ForwardIterator1 i = first1; i != last1; ++i) + { + if(i == eastl::find(first1, i, *i, predicate)) + { + const difference_type c = eastl::count(first2, last2, *i, predicate); + + if((c == 0) || (c != eastl::count(i, last1, *i, predicate))) + return false; + } + } + } + + return true; + } + + + /// next_permutation + /// + /// mutates the range [first, last) to the next permutation. Returns true if the + /// new range is not the final permutation (sorted like the starting permutation). + /// Permutations start with a sorted range, and false is returned when next_permutation + /// results in the initial sorted range, or if the range has <= 1 element. + /// Note that elements are compared by operator < (as usual) and that elements deemed + /// equal via this are not rearranged. + /// + /// http://marknelson.us/2002/03/01/next-permutation/ + /// Basically we start with an ordered range and reverse it's order one specifically + /// chosen swap and reverse at a time. It happens that this require going through every + /// permutation of the range. We use the same variable names as the document above. + /// + /// To consider: Significantly improved permutation/combination functionality: + /// http://home.roadrunner.com/~hinnant/combinations.html + /// + /// Example usage: + /// vector intArray; + /// // + /// sort(intArray.begin(), intArray.end()); + /// do { + /// // + /// } while(next_permutation(intArray.begin(), intArray.end())); + /// + + template + bool next_permutation(BidirectionalIterator first, BidirectionalIterator last, Compare compare) + { + if(first != last) // If there is anything in the range... + { + BidirectionalIterator i = last; + + if(first != --i) // If the range has more than one item... + { + for(;;) + { + BidirectionalIterator ii(i), j; + + if(compare(*--i, *ii)) // Find two consecutive values where the first is less than the second. + { + j = last; + while(!compare(*i, *--j)) // Find the final value that's greater than the first (it may be equal to the second). + {} + eastl::iter_swap(i, j); // Swap the first and the final. + eastl::reverse(ii, last); // Reverse the ranget from second to last. + return true; + } + + if(i == first) // There are no two consecutive values where the first is less than the second, meaning the range is in reverse order. The reverse ordered range is always the last permutation. + { + eastl::reverse(first, last); + break; // We are done. + } + } + } + } + + return false; + } + + template + bool next_permutation(BidirectionalIterator first, BidirectionalIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + + return next_permutation(first, last, eastl::less()); + } + + + + /// rotate + /// + /// Effects: For each non-negative integer i < (last - first), places the element from the + /// position first + i into position first + (i + (last - middle)) % (last - first). + /// + /// Returns: first + (last - middle). That is, returns where first went to. + /// + /// Remarks: This is a left rotate. + /// + /// Requires: [first,middle) and [middle,last) shall be valid ranges. ForwardIterator shall + /// satisfy the requirements of ValueSwappable (17.6.3.2). The type of *first shall satisfy + /// the requirements of MoveConstructible (Table 20) and the requirements of MoveAssignable. + /// + /// Complexity: At most last - first swaps. + /// + /// Note: While rotate works on ForwardIterators (e.g. slist) and BidirectionalIterators (e.g. list), + /// you can get much better performance (O(1) instead of O(n)) with slist and list rotation by + /// doing splice operations on those lists instead of calling this rotate function. + /// + /// http://www.cs.bell-labs.com/cm/cs/pearls/s02b.pdf / http://books.google.com/books?id=kse_7qbWbjsC&pg=PA14&lpg=PA14&dq=Programming+Pearls+flipping+hands + /// http://books.google.com/books?id=tjOlkl7ecVQC&pg=PA189&lpg=PA189&dq=stepanov+Elements+of+Programming+rotate + /// http://stackoverflow.com/questions/21160875/why-is-stdrotate-so-fast + /// + /// Strategy: + /// - We handle the special case of (middle == first) and (middle == last) no-ops + /// up front in the main rotate entry point. + /// - There's a basic ForwardIterator implementation (rotate_general_impl) which is + /// a fallback implementation that's not as fast as others but works for all cases. + /// - There's a slightly better BidirectionalIterator implementation. + /// - We have specialized versions for rotating elements that are is_trivially_move_assignable. + /// These versions will use memmove for when we have a RandomAccessIterator. + /// - We have a specialized version for rotating by only a single position, as that allows us + /// (with any iterator type) to avoid a lot of logic involved with algorithms like "flipping hands" + /// and achieve near optimal O(n) behavior. it turns out that rotate-by-one is a common use + /// case in practice. + /// + namespace Internal + { + template + ForwardIterator rotate_general_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { + using eastl::swap; + + ForwardIterator current = middle; + + do { + swap(*first++, *current++); + + if(first == middle) + middle = current; + } while(current != last); + + ForwardIterator result = first; + current = middle; + + while(current != last) + { + swap(*first++, *current++); + + if(first == middle) + middle = current; + else if(current == last) + current = middle; + } + + return result; // result points to first + (last - middle). + } + + + template + ForwardIterator move_rotate_left_by_one(ForwardIterator first, ForwardIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + + value_type temp(eastl::move(*first)); + ForwardIterator result = eastl::move(eastl::next(first), last, first); // Note that while our template type is BidirectionalIterator, if the actual + *result = eastl::move(temp); // iterator is a RandomAccessIterator then this move will be a memmove for trivial types. + + return result; // result points to the final element in the range. + } + + + template + BidirectionalIterator move_rotate_right_by_one(BidirectionalIterator first, BidirectionalIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + + BidirectionalIterator beforeLast = eastl::prev(last); + value_type temp(eastl::move(*beforeLast)); + BidirectionalIterator result = eastl::move_backward(first, beforeLast, last); // Note that while our template type is BidirectionalIterator, if the actual + *first = eastl::move(temp); // iterator is a RandomAccessIterator then this move will be a memmove for trivial types. + + return result; // result points to the first element in the range. + } + + template + struct rotate_helper + { + template + static ForwardIterator rotate_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { return Internal::rotate_general_impl(first, middle, last); } + }; + + template <> + struct rotate_helper + { + template + static ForwardIterator rotate_impl(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { + if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case. + return Internal::move_rotate_left_by_one(first, last); + return Internal::rotate_general_impl(first, middle, last); + } + }; + + template <> + struct rotate_helper + { + template + static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last) + { return Internal::rotate_general_impl(first, middle, last); } // rotate_general_impl outperforms the flipping hands algorithm. + + /* + // Simplest "flipping hands" implementation. Disabled because it's slower on average than rotate_general_impl. + template + static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last) + { + eastl::reverse(first, middle); + eastl::reverse(middle, last); + eastl::reverse(first, last); + return first + (last - middle); // This can be slow for large ranges because operator + and - are O(n). + } + + // Smarter "flipping hands" implementation, but still disabled because benchmarks are showing it to be slower than rotate_general_impl. + template + static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last) + { + // This is the "flipping hands" algorithm. + eastl::reverse_impl(first, middle, EASTL_ITC_NS::bidirectional_iterator_tag()); // Reverse the left side. + eastl::reverse_impl(middle, last, EASTL_ITC_NS::bidirectional_iterator_tag()); // Reverse the right side. + + // Reverse the entire range. + while((first != middle) && (middle != last)) + { + eastl::iter_swap(first, --last); + ++first; + } + + if(first == middle) // Finish reversing the entire range. + { + eastl::reverse_impl(middle, last, bidirectional_iterator_tag()); + return last; + } + else + { + eastl::reverse_impl(first, middle, bidirectional_iterator_tag()); + return first; + } + } + */ + }; + + template <> + struct rotate_helper + { + template + static BidirectionalIterator rotate_impl(BidirectionalIterator first, BidirectionalIterator middle, BidirectionalIterator last) + { + if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case. + return Internal::move_rotate_left_by_one(first, last); + if(eastl::next(middle) == last) + return Internal::move_rotate_right_by_one(first, last); + return Internal::rotate_general_impl(first, middle, last); + } + }; + + template + inline Integer greatest_common_divisor(Integer x, Integer y) + { + do { + Integer t = (x % y); + x = y; + y = t; + } while(y); + + return x; + } + + template <> + struct rotate_helper + { + // This is the juggling algorithm, using move operations. + // In practice this implementation is about 25% faster than rotate_general_impl. We may want to + // consider sticking with just rotate_general_impl and avoid the code generation of this function. + template + static RandomAccessIterator rotate_impl(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last) + { + typedef typename iterator_traits::difference_type difference_type; + typedef typename iterator_traits::value_type value_type; + + const difference_type m1 = (middle - first); + const difference_type m2 = (last - middle); + const difference_type g = Internal::greatest_common_divisor(m1, m2); + value_type temp; + + for(RandomAccessIterator p = first + g; p != first;) + { + temp = eastl::move(*--p); + RandomAccessIterator p1 = p; + RandomAccessIterator p2 = p + m1; + do + { + *p1 = eastl::move(*p2); + p1 = p2; + const difference_type d = (last - p2); + + if(m1 < d) + p2 += m1; + else + p2 = first + (m1 - d); + } while(p2 != p); + + *p1 = eastl::move(temp); + } + + return first + m2; + } + }; + + template <> + struct rotate_helper + { + // Experiments were done which tested the performance of using an intermediate buffer + // to do memcpy's to as opposed to executing a swapping algorithm. It turns out this is + // actually slower than even rotate_general_impl, partly because the average case involves + // memcpy'ing a quarter of the element range twice. Experiments were done with various kinds + // of PODs with various element counts. + + template + static RandomAccessIterator rotate_impl(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last) + { + if(eastl::next(first) == middle) // If moving trivial types by a single element, memcpy is fast for that case. + return Internal::move_rotate_left_by_one(first, last); + if(eastl::next(middle) == last) + return Internal::move_rotate_right_by_one(first, last); + if((last - first) < 32) // For small ranges rotate_general_impl is faster. + return Internal::rotate_general_impl(first, middle, last); + return Internal::rotate_helper::rotate_impl(first, middle, last); + } + }; + + } // namespace Internal + + + template + ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { + if(middle != first) + { + if(middle != last) + { + typedef typename eastl::iterator_traits::iterator_category IC; + typedef typename eastl::iterator_traits::value_type value_type; + + return Internal::rotate_helper::value || // This is the best way of telling if we can move types via memmove, but without a conforming C++11 compiler it usually returns false. + eastl::is_pod::value || // This is a more conservative way of telling if we can move types via memmove, and most compilers support it, but it doesn't have as full of coverage as is_trivially_move_assignable. + eastl::is_scalar::value> // This is the most conservative means and works with all compilers, but works only for scalars. + ::rotate_impl(first, middle, last); + } + + return first; + } + + return last; + } + + + + /// rotate_copy + /// + /// Similar to rotate except writes the output to the OutputIterator and + /// returns an OutputIterator to the element past the last element copied + /// (i.e. result + (last - first)) + /// + template + OutputIterator rotate_copy(ForwardIterator first, ForwardIterator middle, ForwardIterator last, OutputIterator result) + { + return eastl::copy(first, middle, eastl::copy(middle, last, result)); + } + + + + /// clamp + /// + /// Returns a reference to a clamped value within the range of [lo, hi]. + /// + /// http://en.cppreference.com/w/cpp/algorithm/clamp + /// + template + EA_CONSTEXPR const T& clamp(const T& v, const T& lo, const T& hi) + { + return clamp(v, lo, hi, eastl::less<>()); + } + + template + EA_CONSTEXPR const T& clamp(const T& v, const T& lo, const T& hi, Compare comp) + { + // code collapsed to a single line due to constexpr requirements + return [&] { EASTL_ASSERT(!comp(hi, lo)); }(), + comp(v, lo) ? lo : comp(hi, v) ? hi : v; + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/include/EASTL/allocator.h b/include/EASTL/allocator.h new file mode 100644 index 0000000..ad20e4d --- /dev/null +++ b/include/EASTL/allocator.h @@ -0,0 +1,395 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ALLOCATOR_H +#define EASTL_ALLOCATOR_H + + +#include +#include +#include + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// alloc_flags + /// + /// Defines allocation flags. + /// + enum alloc_flags + { + MEM_TEMP = 0, // Low memory, not necessarily actually temporary. + MEM_PERM = 1 // High memory, for things that won't be unloaded. + }; + + + /// allocator + /// + /// In this allocator class, note that it is not templated on any type and + /// instead it simply allocates blocks of memory much like the C malloc and + /// free functions. It can be thought of as similar to C++ std::allocator. + /// The flags parameter has meaning that is specific to the allocation + /// + /// C++11's std::allocator (20.6.9) doesn't have a move constructor or assignment + /// operator. This is possibly because std::allocators are associated with types + /// instead of as instances. The potential non-equivalance of C++ std::allocator + /// instances has been a source of some acknowledged design problems. + /// We don't implement support for move construction or assignment in eastl::allocator, + /// but users can define their own allocators which do have move functions and + /// the eastl containers are compatible with such allocators (i.e. nothing unexpected + /// will happen). + /// + class EASTL_API allocator + { + public: + EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME)); + allocator(const allocator& x); + allocator(const allocator& x, const char* pName); + + allocator& operator=(const allocator& x); + + void* allocate(size_t n, int flags = 0); + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0); + void deallocate(void* p, size_t n); + + const char* get_name() const; + void set_name(const char* pName); + + protected: + #if EASTL_NAME_ENABLED + const char* mpName; // Debug name, used to track memory. + #endif + }; + + bool operator==(const allocator& a, const allocator& b); + bool operator!=(const allocator& a, const allocator& b); + + + + /// dummy_allocator + /// + /// Defines an allocator which does nothing. It returns NULL from allocate calls. + /// + class EASTL_API dummy_allocator + { + public: + EASTL_ALLOCATOR_EXPLICIT dummy_allocator(const char* = NULL) { } + dummy_allocator(const dummy_allocator&) { } + dummy_allocator(const dummy_allocator&, const char*) { } + + dummy_allocator& operator=(const dummy_allocator&) { return *this; } + + void* allocate(size_t, int = 0) { return NULL; } + void* allocate(size_t, size_t, size_t, int = 0) { return NULL; } + void deallocate(void*, size_t) { } + + const char* get_name() const { return ""; } + void set_name(const char*) { } + }; + + inline bool operator==(const dummy_allocator&, const dummy_allocator&) { return true; } + inline bool operator!=(const dummy_allocator&, const dummy_allocator&) { return false; } + + + + /// Defines a static default allocator which is constant across all types. + /// This is different from get_default_allocator, which is is bound at + /// compile-time and expected to differ per allocator type. + /// Currently this Default Allocator applies only to CoreAllocatorAdapter. + /// To consider: This naming of this function is too similar to get_default_allocator + /// and instead should be named something like GetStaticDefaultAllocator. + EASTL_API allocator* GetDefaultAllocator(); + EASTL_API allocator* SetDefaultAllocator(allocator* pAllocator); + + + /// get_default_allocator + /// + /// This templated function allows the user to implement a default allocator + /// retrieval function that any part of EASTL can use. EASTL containers take + /// an Allocator parameter which identifies an Allocator class to use. But + /// different kinds of allocators have different mechanisms for retrieving + /// a default allocator instance, and some don't even intrinsically support + /// such functionality. The user can override this get_default_allocator + /// function in order to provide the glue between EASTL and whatever their + /// system's default allocator happens to be. + /// + /// Example usage: + /// MyAllocatorType* gpSystemAllocator; + /// + /// MyAllocatorType* get_default_allocator(const MyAllocatorType*) + /// { return gpSystemAllocator; } + /// + template + Allocator* get_default_allocator(const Allocator*); + + EASTLAllocatorType* get_default_allocator(const EASTLAllocatorType*); + + + /// default_allocfreemethod + /// + /// Implements a default allocfreemethod which uses the default global allocator. + /// This version supports only default alignment. + /// + void* default_allocfreemethod(size_t n, void* pBuffer, void* /*pContext*/); + + + /// allocate_memory + /// + /// This is a memory allocation dispatching function. + /// To do: Make aligned and unaligned specializations. + /// Note that to do this we will need to use a class with a static + /// function instead of a standalone function like below. + /// + template + void* allocate_memory(Allocator& a, size_t n, size_t alignment, size_t alignmentOffset); + + +} // namespace eastl + + + + + + +#ifndef EASTL_USER_DEFINED_ALLOCATOR // If the user hasn't declared that he has defined a different allocator implementation elsewhere... + + EA_DISABLE_ALL_VC_WARNINGS() + #include + EA_RESTORE_ALL_VC_WARNINGS() + + #if !EASTL_DLL // If building a regular library and not building EASTL as a DLL... + // It is expected that the application define the following + // versions of operator new for the application. Either that or the + // user needs to override the implementation of the allocator class. + void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line); + void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line); + #endif + + namespace eastl + { + inline allocator::allocator(const char* EASTL_NAME(pName)) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + + inline allocator::allocator(const allocator& EASTL_NAME(alloc)) + { + #if EASTL_NAME_ENABLED + mpName = alloc.mpName; + #endif + } + + + inline allocator::allocator(const allocator&, const char* EASTL_NAME(pName)) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + + inline allocator& allocator::operator=(const allocator& EASTL_NAME(alloc)) + { + #if EASTL_NAME_ENABLED + mpName = alloc.mpName; + #endif + return *this; + } + + + inline const char* allocator::get_name() const + { + #if EASTL_NAME_ENABLED + return mpName; + #else + return EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + + inline void allocator::set_name(const char* EASTL_NAME(pName)) + { + #if EASTL_NAME_ENABLED + mpName = pName; + #endif + } + + + inline void* allocator::allocate(size_t n, int flags) + { + #if EASTL_NAME_ENABLED + #define pName mpName + #else + #define pName EASTL_ALLOCATOR_DEFAULT_NAME + #endif + + #if EASTL_DLL + return allocate(n, EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT, 0, flags); + #elif (EASTL_DEBUGPARAMS_LEVEL <= 0) + return ::new((char*)0, flags, 0, (char*)0, 0) char[n]; + #elif (EASTL_DEBUGPARAMS_LEVEL == 1) + return ::new( pName, flags, 0, (char*)0, 0) char[n]; + #else + return ::new( pName, flags, 0, __FILE__, __LINE__) char[n]; + #endif + } + + + inline void* allocator::allocate(size_t n, size_t alignment, size_t offset, int flags) + { + #if EASTL_DLL + // We currently have no support for implementing flags when + // using the C runtime library operator new function. The user + // can use SetDefaultAllocator to override the default allocator. + EA_UNUSED(offset); EA_UNUSED(flags); + + size_t adjustedAlignment = (alignment > EA_PLATFORM_PTR_SIZE) ? alignment : EA_PLATFORM_PTR_SIZE; + + void* p = new char[n + adjustedAlignment + EA_PLATFORM_PTR_SIZE]; + void* pPlusPointerSize = (void*)((uintptr_t)p + EA_PLATFORM_PTR_SIZE); + void* pAligned = (void*)(((uintptr_t)pPlusPointerSize + adjustedAlignment - 1) & ~(adjustedAlignment - 1)); + + void** pStoredPtr = (void**)pAligned - 1; + EASTL_ASSERT(pStoredPtr >= p); + *(pStoredPtr) = p; + + EASTL_ASSERT(((size_t)pAligned & ~(alignment - 1)) == (size_t)pAligned); + + return pAligned; + #elif (EASTL_DEBUGPARAMS_LEVEL <= 0) + return ::new(alignment, offset, (char*)0, flags, 0, (char*)0, 0) char[n]; + #elif (EASTL_DEBUGPARAMS_LEVEL == 1) + return ::new(alignment, offset, pName, flags, 0, (char*)0, 0) char[n]; + #else + return ::new(alignment, offset, pName, flags, 0, __FILE__, __LINE__) char[n]; + #endif + + #undef pName // See above for the definition of this. + } + + + inline void allocator::deallocate(void* p, size_t) + { + #if EASTL_DLL + if (p != nullptr) + { + void* pOriginalAllocation = *((void**)p - 1); + delete[](char*)pOriginalAllocation; + } + #else + delete[](char*)p; + #endif + } + + + inline bool operator==(const allocator&, const allocator&) + { + return true; // All allocators are considered equal, as they merely use global new/delete. + } + + + inline bool operator!=(const allocator&, const allocator&) + { + return false; // All allocators are considered equal, as they merely use global new/delete. + } + + + } // namespace eastl + + +#endif // EASTL_USER_DEFINED_ALLOCATOR + + + +namespace eastl +{ + + template + inline Allocator* get_default_allocator(const Allocator*) + { + return NULL; // By default we return NULL; the user must make specialization of this function in order to provide their own implementation. + } + + + inline EASTLAllocatorType* get_default_allocator(const EASTLAllocatorType*) + { + return EASTLAllocatorDefault(); // For the built-in allocator EASTLAllocatorType, we happen to already have a function for returning the default allocator instance, so we provide it. + } + + + inline void* default_allocfreemethod(size_t n, void* pBuffer, void* /*pContext*/) + { + EASTLAllocatorType* const pAllocator = EASTLAllocatorDefault(); + + if(pBuffer) // If freeing... + { + EASTLFree(*pAllocator, pBuffer, n); + return NULL; // The return value is meaningless for the free. + } + else // allocating + return EASTLAlloc(*pAllocator, n); + } + + + /// allocate_memory + /// + /// This is a memory allocation dispatching function. + /// To do: Make aligned and unaligned specializations. + /// Note that to do this we will need to use a class with a static + /// function instead of a standalone function like below. + /// + template + inline void* allocate_memory(Allocator& a, size_t n, size_t alignment, size_t alignmentOffset) + { + void *result; + if (alignment <= EASTL_ALLOCATOR_MIN_ALIGNMENT) + { + result = EASTLAlloc(a, n); + // Ensure the result is correctly aligned. An assertion likely indicates a mismatch between EASTL_ALLOCATOR_MIN_ALIGNMENT and the minimum alignment + // of EASTLAlloc. If there is a mismatch it may be necessary to define EASTL_ALLOCATOR_MIN_ALIGNMENT to be the minimum alignment of EASTLAlloc, or + // to increase the alignment of EASTLAlloc to match EASTL_ALLOCATOR_MIN_ALIGNMENT. + EASTL_ASSERT((reinterpret_cast(result)& ~(alignment - 1)) == reinterpret_cast(result)); + } + else + { + result = EASTLAllocAligned(a, n, alignment, alignmentOffset); + // Ensure the result is correctly aligned. An assertion here may indicate a bug in the allocator. + auto resultMinusOffset = (char*)result - alignmentOffset; + EA_UNUSED(resultMinusOffset); + EASTL_ASSERT((reinterpret_cast(resultMinusOffset)& ~(alignment - 1)) == reinterpret_cast(resultMinusOffset)); + } + return result; + } + +} + + +#endif // Header include guard + + + + + + + + + + + + + + + + diff --git a/include/EASTL/allocator_malloc.h b/include/EASTL/allocator_malloc.h new file mode 100644 index 0000000..31f8dec --- /dev/null +++ b/include/EASTL/allocator_malloc.h @@ -0,0 +1,130 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ALLOCATOR_MALLOC_H +#define EASTL_ALLOCATOR_MALLOC_H + + +#include +#include +#include + + +// EASTL_ALIGNED_MALLOC_AVAILABLE +// +// Identifies if the standard library provides a built-in aligned version of malloc. +// Defined as 0 or 1, depending on the standard library or platform availability. +// None of the viable C functions provides for an aligned malloc with offset, so we +// don't consider that supported in any case. +// +// Options for aligned allocations: +// C11 aligned_alloc http://linux.die.net/man/3/aligned_alloc +// glibc memalign http://linux.die.net/man/3/posix_memalign +// Posix posix_memalign http://pubs.opengroup.org/onlinepubs/000095399/functions/posix_memalign.html +// VC++ _aligned_malloc http://msdn.microsoft.com/en-us/library/8z34s9c6%28VS.80%29.aspx This is not suitable, since it has a limitation that you need to free via _aligned_free. +// +#if !defined EASTL_ALIGNED_MALLOC_AVAILABLE + #if defined(EA_PLATFORM_POSIX) && !defined(EA_PLATFORM_APPLE) + // memalign is more consistently available than posix_memalign, though its location isn't consistent across + // platforms and compiler libraries. Typically it's declared in one of three headers: stdlib.h, malloc.h, or malloc/malloc.h + #include // memalign, posix_memalign. + #define EASTL_ALIGNED_MALLOC_AVAILABLE 1 + + #if EA_HAS_INCLUDE_AVAILABLE + #if EA_HAS_INCLUDE() + #include + #elif EA_HAS_INCLUDE() + #include + #endif + #elif defined(EA_PLATFORM_BSD) + #include + #elif defined(EA_COMPILER_CLANG) + #if __has_include() + #include + #elif __has_include() + #include + #endif + #else + #include + #endif + #else + #define EASTL_ALIGNED_MALLOC_AVAILABLE 0 + #endif +#endif + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////////////// + // allocator_malloc + // + // Implements an EASTL allocator that uses malloc/free as opposed to + // new/delete or PPMalloc Malloc/Free. + // + // Example usage: + // vector intVector; + // + class allocator_malloc + { + public: + allocator_malloc(const char* = NULL) + { } + + allocator_malloc(const allocator_malloc&) + { } + + allocator_malloc(const allocator_malloc&, const char*) + { } + + allocator_malloc& operator=(const allocator_malloc&) + { return *this; } + + bool operator==(const allocator_malloc&) + { return true; } + + bool operator!=(const allocator_malloc&) + { return false; } + + void* allocate(size_t n, int /*flags*/ = 0) + { return malloc(n); } + + void* allocate(size_t n, size_t alignment, size_t alignmentOffset, int /*flags*/ = 0) + { + #if EASTL_ALIGNED_MALLOC_AVAILABLE + if((alignmentOffset % alignment) == 0) // We check for (offset % alignmnent == 0) instead of (offset == 0) because any block which is aligned on e.g. 64 also is aligned at an offset of 64 by definition. + return memalign(alignment, n); // memalign is more consistently available than posix_memalign. + #else + if((alignment <= EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT) && ((alignmentOffset % alignment) == 0)) + return malloc(n); + #endif + return NULL; + } + + void deallocate(void* p, size_t /*n*/) + { free(p); } + + const char* get_name() const + { return "allocator_malloc"; } + + void set_name(const char*) + { } + }; + + +} // namespace eastl + + + +#endif // Header include guard + + + + + + + + + diff --git a/include/EASTL/any.h b/include/EASTL/any.h new file mode 100644 index 0000000..ef8a312 --- /dev/null +++ b/include/EASTL/any.h @@ -0,0 +1,652 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the eastl::any which is part of the C++ standard STL +// library specification. +// +// eastl::any is a type-safe container for single values of any type. Our +// implementation makes use of the "small local buffer" optimization to avoid +// unnecessary dynamic memory allocation if the specified type is a eligible to +// be stored in its local buffer. The user type must satisfy the size +// requirements and must be no-throw move-constructible to qualify for the local +// buffer optimization. +// +// To consider: Implement a fixed_any variant to allow users to customize +// the size of the "small local buffer" optimization. +// +// http://en.cppreference.com/w/cpp/utility/any +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ANY_H +#define EASTL_ANY_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +#include +#include +#if EASTL_RTTI_ENABLED + #include +#endif +#if EASTL_EXCEPTIONS_ENABLED + #include +#endif + + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////////////// + // bad_any_cast + // + // The type thrown by any_cast on failure. + // + // http://en.cppreference.com/w/cpp/utility/any/bad_any_cast + // + #if EASTL_EXCEPTIONS_ENABLED + struct bad_cast : std::exception + { + const char* what() const EA_NOEXCEPT EA_OVERRIDE + { return "bad cast"; } + }; + + struct bad_any_cast : public bad_cast + { + const char* what() const EA_NOEXCEPT EA_OVERRIDE + { return "bad_any_cast"; } + }; + #endif + + namespace Internal + { + // utility to switch between exceptions and asserts + inline void DoBadAnyCast() + { + #if EASTL_EXCEPTIONS_ENABLED + throw bad_any_cast(); + #else + EASTL_ASSERT_MSG(false, "bad_any_cast\n"); + + // NOTE(rparolin): CRASH! + // You crashed here because you requested a type that was not contained in the object. + // We choose to intentionally crash here instead of returning invalid data to the calling + // code which could cause hard to track down bugs. + *((volatile int*)0) = 0xDEADC0DE; + #endif + } + + template + void* DefaultConstruct(Args&&... args) + { + auto* pMem = EASTLAllocatorDefault()->allocate(sizeof(T), alignof(T), 0); + + return ::new(pMem) T(eastl::forward(args)...); + } + + template + void DefaultDestroy(T* p) + { + p->~T(); + + EASTLAllocatorDefault()->deallocate(static_cast(p), sizeof(T)); + } + } + + + /////////////////////////////////////////////////////////////////////////////// + // 20.7.3, class any + // + class any + { + ////////////////////////////////////////////////////////////////////////////////////////// + // storage_operation + // + // operations supported by the storage handler + // + enum class storage_operation + { + GET, + DESTROY, + COPY, + MOVE, + TYPE_INFO + }; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // storage + // + // the underlying storage type which enables the switching between objects stored in + // the heap and objects stored within the any type. + // + union storage + { + typedef aligned_storage_t<4 * sizeof(void*), alignment_of::value> internal_storage_t; + + void* external_storage = nullptr; + internal_storage_t internal_storage; + }; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // use_internal_storage + // + // determines when the "local buffer optimization" is used + // + template + using use_internal_storage = bool_constant + < + is_nothrow_move_constructible::value + && (sizeof(T) <= sizeof(storage)) && + (alignment_of::value % alignment_of::value == 0) + >; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // non-member friend functions + // + template friend const ValueType* any_cast(const any* pAny) EA_NOEXCEPT; + template friend ValueType* any_cast(any* pAny) EA_NOEXCEPT; + template friend ValueType any_cast(const any& operand); + template friend ValueType any_cast(any& operand); + template friend ValueType any_cast(any&& operand); + + //Adding Unsafe any cast operations + template friend const ValueType* unsafe_any_cast(const any* pAny) EA_NOEXCEPT; + template friend ValueType* unsafe_any_cast(any* pAny) EA_NOEXCEPT; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // internal storage handler + // + template + struct storage_handler_internal + { + template + static void construct(storage& s, V&& v) + { + ::new(&s.internal_storage) T(eastl::forward(v)); + } + + template + static void construct_inplace(storage& s, Args... args) + { + ::new(&s.internal_storage) T(eastl::forward(args)...); + } + + template + static void construct_inplace(storage& s, std::initializer_list il, Args&&... args) + { + ::new(&s.internal_storage) NT(il, eastl::forward(args)...); + } + + static inline void destroy(any& refAny) + { + T& t = *static_cast(static_cast(&refAny.m_storage.internal_storage)); + EA_UNUSED(t); + t.~T(); + + refAny.m_handler = nullptr; + } + + static void* handler_func(storage_operation op, const any* pThis, any* pOther) + { + switch (op) + { + case storage_operation::GET: + { + EASTL_ASSERT(pThis); + return (void*)(&pThis->m_storage.internal_storage); + } + break; + + case storage_operation::DESTROY: + { + EASTL_ASSERT(pThis); + destroy(const_cast(*pThis)); + } + break; + + case storage_operation::COPY: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pOther); + construct(pOther->m_storage, *(T*)(&pThis->m_storage.internal_storage)); + } + break; + + case storage_operation::MOVE: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pOther); + construct(pOther->m_storage, eastl::move(*(T*)(&pThis->m_storage.internal_storage))); + destroy(const_cast(*pThis)); + } + break; + + case storage_operation::TYPE_INFO: + { + #if EASTL_RTTI_ENABLED + return (void*)&typeid(T); + #endif + } + break; + + default: + { + EASTL_ASSERT_MSG(false, "unknown storage operation\n"); + } + break; + }; + + return nullptr; + } + }; + + + + ////////////////////////////////////////////////////////////////////////////////////////// + // external storage handler + // + template + struct storage_handler_external + { + template + static inline void construct(storage& s, V&& v) + { + s.external_storage = Internal::DefaultConstruct(eastl::forward(v)); + } + + template + static inline void construct_inplace(storage& s, Args... args) + { + s.external_storage = Internal::DefaultConstruct(eastl::forward(args)...); + } + + template + static inline void construct_inplace(storage& s, std::initializer_list il, Args&&... args) + { + s.external_storage = Internal::DefaultConstruct(il, eastl::forward(args)...); + } + + static inline void destroy(any& refAny) + { + Internal::DefaultDestroy(static_cast(refAny.m_storage.external_storage)); + + refAny.m_handler = nullptr; + } + + static void* handler_func(storage_operation op, const any* pThis, any* pOther) + { + switch (op) + { + case storage_operation::GET: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pThis->m_storage.external_storage); + return static_cast(pThis->m_storage.external_storage); + } + break; + + case storage_operation::DESTROY: + { + EASTL_ASSERT(pThis); + destroy(*const_cast(pThis)); + } + break; + + case storage_operation::COPY: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pOther); + construct(pOther->m_storage, *static_cast(pThis->m_storage.external_storage)); + } + break; + + case storage_operation::MOVE: + { + EASTL_ASSERT(pThis); + EASTL_ASSERT(pOther); + construct(pOther->m_storage, eastl::move(*(T*)(pThis->m_storage.external_storage))); + destroy(const_cast(*pThis)); + } + break; + + case storage_operation::TYPE_INFO: + { + #if EASTL_RTTI_ENABLED + return (void*)&typeid(T); + #endif + } + break; + + default: + { + EASTL_ASSERT_MSG(false, "unknown storage operation\n"); + } + break; + }; + + return nullptr; + } + }; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // storage_handler_ptr + // + // defines the function signature of the storage handler that both the internal and + // external storage handlers must implement to retrieve the underlying type of the any + // object. + // + using storage_handler_ptr = void* (*)(storage_operation, const any*, any*); + + + ////////////////////////////////////////////////////////////////////////////////////////// + // storage_handler + // + // based on the specified type T we select the appropriate underlying storage handler + // based on the 'use_internal_storage' trait. + // + template + using storage_handler = typename conditional::value, + storage_handler_internal, + storage_handler_external>::type; + + + ////////////////////////////////////////////////////////////////////////////////////////// + // data layout + // + storage m_storage; + storage_handler_ptr m_handler; + + public: + #ifndef EA_COMPILER_GNUC + // TODO(rparolin): renable constexpr for GCC + EA_CONSTEXPR + #endif + any() EA_NOEXCEPT + : m_storage(), m_handler(nullptr) {} + + any(const any& other) : m_handler(nullptr) + { + if (other.m_handler) + { + // NOTE(rparolin): You can not simply copy the underlying + // storage because it could hold a pointer to an object on the + // heap which breaks the copy semantics of the language. + other.m_handler(storage_operation::COPY, &other, this); + m_handler = other.m_handler; + } + } + + any(any&& other) EA_NOEXCEPT : m_handler(nullptr) + { + if(other.m_handler) + { + // NOTE(rparolin): You can not simply move the underlying + // storage because because the storage class has effectively + // type erased user type so we have to defer to the handler + // function to get the type back and pass on the move request. + m_handler = eastl::move(other.m_handler); + other.m_handler(storage_operation::MOVE, &other, this); + } + } + + ~any() { reset(); } + + template + any(ValueType&& value, + typename eastl::enable_if::type, any>::value>::type* = 0) + { + typedef decay_t DecayedValueType; + static_assert(is_copy_constructible::value, "ValueType must be copy-constructible"); + storage_handler::construct(m_storage, eastl::forward(value)); + m_handler = &storage_handler::handler_func; + } + + template + explicit any(in_place_type_t, Args&&... args) + { + typedef storage_handler> StorageHandlerT; + static_assert(eastl::is_constructible::value, "T must be constructible with Args..."); + + StorageHandlerT::construct_inplace(m_storage, eastl::forward(args)...); + m_handler = &StorageHandlerT::handler_func; + } + + template + explicit any(in_place_type_t, + std::initializer_list il, + Args&&... args, + typename eastl::enable_if&, Args...>::value, + void>::type* = 0) + { + typedef storage_handler> StorageHandlerT; + + StorageHandlerT::construct_inplace(m_storage, il, eastl::forward(args)...); + m_handler = &StorageHandlerT::handler_func; + } + + // 20.7.3.2, assignments + template + any& operator=(ValueType&& value) + { + static_assert(is_copy_constructible>::value, "ValueType must be copy-constructible"); + any(eastl::forward(value)).swap(*this); + return *this; + } + + any& operator=(const any& other) + { + any(other).swap(*this); + return *this; + } + + any& operator=(any&& other) EA_NOEXCEPT + { + any(eastl::move(other)).swap(*this); + return *this; + } + + // 20.7.3.3, modifiers + #if EASTL_VARIADIC_TEMPLATES_ENABLED + template + void emplace(Args&&... args) + { + typedef storage_handler> StorageHandlerT; + static_assert(eastl::is_constructible::value, "T must be constructible with Args..."); + + reset(); + StorageHandlerT::construct_inplace(m_storage, eastl::forward(args)...); + m_handler = &StorageHandlerT::handler_func; + } + + template + typename eastl::enable_if&, Args...>::value, void>::type + emplace(std::initializer_list il, Args&&... args) + { + typedef storage_handler> StorageHandlerT; + + reset(); + StorageHandlerT::construct_inplace(m_storage, il, eastl::forward(args)...); + m_handler = &StorageHandlerT::handler_func; + } + #endif + + void reset() EA_NOEXCEPT + { + if(m_handler) + m_handler(storage_operation::DESTROY, this, nullptr); + } + + void swap(any& other) EA_NOEXCEPT + { + if(this == &other) + return; + + if(m_handler && other.m_handler) + { + any tmp; + tmp.m_handler = other.m_handler; + other.m_handler(storage_operation::MOVE, &other, &tmp); + + other.m_handler = m_handler; + m_handler(storage_operation::MOVE, this, &other); + + m_handler = tmp.m_handler; + tmp.m_handler(storage_operation::MOVE, &tmp, this); + } + else if (m_handler == nullptr && other.m_handler) + { + eastl::swap(m_handler, other.m_handler); + m_handler(storage_operation::MOVE, &other, this); + } + else if(m_handler && other.m_handler == nullptr) + { + eastl::swap(m_handler, other.m_handler); + other.m_handler(storage_operation::MOVE, this, &other); + } + //else if (m_handler == nullptr && other.m_handler == nullptr) + //{ + // // nothing to swap + //} + } + + // 20.7.3.4, observers + bool has_value() const EA_NOEXCEPT { return m_handler != nullptr; } + + #if EASTL_RTTI_ENABLED + inline const std::type_info& type() const EA_NOEXCEPT + { + if(m_handler) + { + auto* pTypeInfo = m_handler(storage_operation::TYPE_INFO, this, nullptr); + return *static_cast(pTypeInfo); + } + else + { + return typeid(void); + } + } + #endif + }; + + + + ////////////////////////////////////////////////////////////////////////////////////////// + // 20.7.4, non-member functions + // + inline void swap(any& rhs, any& lhs) EA_NOEXCEPT { rhs.swap(lhs); } + + + ////////////////////////////////////////////////////////////////////////////////////////// + // 20.7.4, The non-member any_cast functions provide type-safe access to the contained object. + // + template + inline ValueType any_cast(const any& operand) + { + static_assert(eastl::is_reference::value || eastl::is_copy_constructible::value, + "ValueType must be a reference or copy constructible"); + + auto* p = any_cast::type>::type>(&operand); + + if(p == nullptr) + Internal::DoBadAnyCast(); + + return *p; + } + + template + inline ValueType any_cast(any& operand) + { + static_assert(eastl::is_reference::value || eastl::is_copy_constructible::value, + "ValueType must be a reference or copy constructible"); + + auto* p = any_cast::type>(&operand); + + if(p == nullptr) + Internal::DoBadAnyCast(); + + return *p; + } + + template + inline ValueType any_cast(any&& operand) + { + static_assert(eastl::is_reference::value || eastl::is_copy_constructible::value, + "ValueType must be a reference or copy constructible"); + + auto* p = any_cast::type>(&operand); + + if (p == nullptr) + Internal::DoBadAnyCast(); + + return *p; + } + + // NOTE(rparolin): The runtime type check was commented out because in DLL builds the templated function pointer + // value will be different -- completely breaking the validation mechanism. Due to the fact that eastl::any uses + // type erasure we can't refesh (on copy/move) the cached function pointer to the internal handler function because + // we don't statically know the type. + template + inline const ValueType* any_cast(const any* pAny) EA_NOEXCEPT + { + return (pAny && pAny->m_handler EASTL_IF_NOT_DLL(== &any::storage_handler>::handler_func) + #if EASTL_RTTI_ENABLED + && pAny->type() == typeid(typename remove_reference::type) + #endif + ) ? + static_cast(pAny->m_handler(any::storage_operation::GET, pAny, nullptr)) : + nullptr; + } + + template + inline ValueType* any_cast(any* pAny) EA_NOEXCEPT + { + return (pAny && pAny->m_handler EASTL_IF_NOT_DLL(== &any::storage_handler>::handler_func) + #if EASTL_RTTI_ENABLED + && pAny->type() == typeid(typename remove_reference::type) + #endif + ) ? + static_cast(pAny->m_handler(any::storage_operation::GET, pAny, nullptr)) : + nullptr; + } + + //Unsafe operations - use with caution + template + inline const ValueType* unsafe_any_cast(const any* pAny) EA_NOEXCEPT + { + return unsafe_any_cast(const_cast(pAny)); + } + + template + inline ValueType* unsafe_any_cast(any* pAny) EA_NOEXCEPT + { + return static_cast(pAny->m_handler(any::storage_operation::GET, pAny, nullptr)); + } + + ////////////////////////////////////////////////////////////////////////////////////////// + // make_any + // + #if EASTL_VARIADIC_TEMPLATES_ENABLED + template + inline any make_any(Args&&... args) + { + return any(eastl::in_place, eastl::forward(args)...); + } + + template + inline any make_any(std::initializer_list il, Args&&... args) + { + return any(eastl::in_place, il, eastl::forward(args)...); + } + #endif + +} // namespace eastl + +#endif // EASTL_ANY_H diff --git a/include/EASTL/array.h b/include/EASTL/array.h new file mode 100644 index 0000000..c871b0b --- /dev/null +++ b/include/EASTL/array.h @@ -0,0 +1,495 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// Implements a templated array class as per the C++ standard TR1 (technical +// report 1, which is a list of proposed C++ library amendments). +// The primary distinctions between this array and TR1 array are: +// - array::size_type is defined as eastl_size_t instead of size_t in order +// to save memory and run faster on 64 bit systems. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ARRAY_H +#define EASTL_ARRAY_H + + +#include +#include +#include +#include +#include + +#if EASTL_EXCEPTIONS_ENABLED + EA_DISABLE_ALL_VC_WARNINGS() + #include // std::out_of_range, std::length_error. + EA_RESTORE_ALL_VC_WARNINGS() +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////// + /// array + /// + /// Implements a templated array class as per the C++ standard TR1. + /// This class allows you to use a built-in C style array like an STL vector. + /// It does not let you change its size, as it is just like a C built-in array. + /// Our implementation here strives to remove function call nesting, as that + /// makes it hard for us to profile debug builds due to function call overhead. + /// Note that this is intentionally a struct with public data, as per the + /// C++ standard update proposal requirements. + /// + /// Example usage: + /// array a = { { 0, 1, 2, 3, 4 } }; // Strict compilers such as GCC require the double brackets. + /// a[2] = 4; + /// for(array::iterator i = a.begin(); i < a.end(); ++i) + /// *i = 0; + /// + template + struct array + { + public: + typedef array this_type; + typedef T value_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* iterator; + typedef const value_type* const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + + public: + enum + { + count = N + }; + + // Note that the member data is intentionally public. + // This allows for aggregate initialization of the + // object (e.g. array a = { 0, 3, 2, 4 }; ) + value_type mValue[N ? N : 1]; + + public: + // We intentionally provide no constructor, destructor, or assignment operator. + + void fill(const value_type& value); + + // Unlike the swap function for other containers, array::swap takes linear time, + // may exit via an exception, and does not cause iterators to become associated with the other container. + void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable::value); + + EA_CPP14_CONSTEXPR iterator begin() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_iterator begin() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_iterator cbegin() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR iterator end() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_iterator end() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_iterator cend() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR reverse_iterator rbegin() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_reverse_iterator rbegin() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_reverse_iterator crbegin() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR reverse_iterator rend() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_reverse_iterator rend() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const_reverse_iterator crend() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR bool empty() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR size_type size() const EA_NOEXCEPT; + EA_CPP14_CONSTEXPR size_type max_size() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR T* data() EA_NOEXCEPT; + EA_CPP14_CONSTEXPR const T* data() const EA_NOEXCEPT; + + EA_CPP14_CONSTEXPR reference operator[](size_type i); + EA_CPP14_CONSTEXPR const_reference operator[](size_type i) const; + EA_CPP14_CONSTEXPR const_reference at(size_type i) const; + EA_CPP14_CONSTEXPR reference at(size_type i); + + EA_CPP14_CONSTEXPR reference front(); + EA_CPP14_CONSTEXPR const_reference front() const; + + EA_CPP14_CONSTEXPR reference back(); + EA_CPP14_CONSTEXPR const_reference back() const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + + }; // class array + + + /////////////////////////////////////////////////////////////////////////// + // template deduction guides + /////////////////////////////////////////////////////////////////////////// + #ifdef __cpp_deduction_guides + template array(T, U...) -> array; + #endif + + + /////////////////////////////////////////////////////////////////////// + // array + /////////////////////////////////////////////////////////////////////// + + + template + inline void array::fill(const value_type& value) + { + eastl::fill_n(&mValue[0], N, value); + } + + + template + inline void array::swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable::value) + { + eastl::swap_ranges(&mValue[0], &mValue[N], &x.mValue[0]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::iterator + array::begin() EA_NOEXCEPT + { + return &mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_iterator + array::begin() const EA_NOEXCEPT + { + return &mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_iterator + array::cbegin() const EA_NOEXCEPT + { + return &mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::iterator + array::end() EA_NOEXCEPT + { + return &mValue[N]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_iterator + array::end() const EA_NOEXCEPT + { + return &mValue[N]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_iterator + array::cend() const EA_NOEXCEPT + { + return &mValue[N]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reverse_iterator + array::rbegin() EA_NOEXCEPT + { + return reverse_iterator(&mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator + array::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(&mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator + array::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(&mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reverse_iterator + array::rend() EA_NOEXCEPT + { + return reverse_iterator(&mValue[0]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator + array::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(static_cast(&mValue[0])); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator + array::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(static_cast(&mValue[0])); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::size_type + array::size() const EA_NOEXCEPT + { + return (size_type)N; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::size_type + array::max_size() const EA_NOEXCEPT + { + return (size_type)N; + } + + + template + EA_CPP14_CONSTEXPR inline bool array::empty() const EA_NOEXCEPT + { + return (N == 0); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reference + array::operator[](size_type i) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(i >= N)) + EASTL_FAIL_MSG("array::operator[] -- out of range"); + #endif + + EA_ANALYSIS_ASSUME(i < N); + return mValue[i]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reference + array::operator[](size_type i) const + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(i >= N)) + EASTL_FAIL_MSG("array::operator[] -- out of range"); + + #endif + + EA_ANALYSIS_ASSUME(i < N); + return mValue[i]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reference + array::front() + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(empty())) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("array::front -- empty array"); + #endif + + return mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reference + array::front() const + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(empty())) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("array::front -- empty array"); + #endif + + return mValue[0]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reference + array::back() + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(empty())) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("array::back -- empty array"); + #endif + + return mValue[N - 1]; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reference + array::back() const + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(empty())) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("array::back -- empty array"); + #endif + + return mValue[N - 1]; + } + + + template + EA_CPP14_CONSTEXPR inline T* array::data() EA_NOEXCEPT + { + return mValue; + } + + + template + EA_CPP14_CONSTEXPR inline const T* array::data() const EA_NOEXCEPT + { + return mValue; + } + + + template + EA_CPP14_CONSTEXPR inline typename array::const_reference array::at(size_type i) const + { + #if EASTL_EXCEPTIONS_ENABLED + if(EASTL_UNLIKELY(i >= N)) + throw std::out_of_range("array::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(i >= N)) + EASTL_FAIL_MSG("array::at -- out of range"); + #endif + + EA_ANALYSIS_ASSUME(i < N); + return static_cast(mValue[i]); + } + + + template + EA_CPP14_CONSTEXPR inline typename array::reference array::at(size_type i) + { + #if EASTL_EXCEPTIONS_ENABLED + if(EASTL_UNLIKELY(i >= N)) + throw std::out_of_range("array::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(i >= N)) + EASTL_FAIL_MSG("array::at -- out of range"); + #endif + + EA_ANALYSIS_ASSUME(i < N); + return static_cast(mValue[i]); + } + + + template + inline bool array::validate() const + { + return true; // There is nothing to do. + } + + + template + inline int array::validate_iterator(const_iterator i) const + { + if(i >= mValue) + { + if(i < (mValue + N)) + return (isf_valid | isf_current | isf_can_dereference); + + if(i <= (mValue + N)) + return (isf_valid | isf_current); + } + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + EA_CPP14_CONSTEXPR inline bool operator==(const array& a, const array& b) + { + return eastl::equal(&a.mValue[0], &a.mValue[N], &b.mValue[0]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator<(const array& a, const array& b) + { + return eastl::lexicographical_compare(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator!=(const array& a, const array& b) + { + return !eastl::equal(&a.mValue[0], &a.mValue[N], &b.mValue[0]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator>(const array& a, const array& b) + { + return eastl::lexicographical_compare(&b.mValue[0], &b.mValue[N], &a.mValue[0], &a.mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator<=(const array& a, const array& b) + { + return !eastl::lexicographical_compare(&b.mValue[0], &b.mValue[N], &a.mValue[0], &a.mValue[N]); + } + + + template + EA_CPP14_CONSTEXPR inline bool operator>=(const array& a, const array& b) + { + return !eastl::lexicographical_compare(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N]); + } + + + template + inline void swap(array& a, array& b) + { + eastl::swap_ranges(&a.mValue[0], &a.mValue[N], &b.mValue[0]); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + diff --git a/include/EASTL/bitset.h b/include/EASTL/bitset.h new file mode 100644 index 0000000..f20feb6 --- /dev/null +++ b/include/EASTL/bitset.h @@ -0,0 +1,2254 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a bitset much like the C++ std::bitset class. +// The primary distinctions between this list and std::bitset are: +// - bitset is more efficient than some other std::bitset implementations, +// notably the bitset that comes with Microsoft and other 1st party platforms. +// - bitset is savvy to an environment that doesn't have exception handling, +// as is sometimes the case with console or embedded environments. +// - bitset is savvy to environments in which 'unsigned long' is not the +// most efficient integral data type. std::bitset implementations use +// unsigned long, even if it is an inefficient integer type. +// - bitset removes as much function calls as practical, in order to allow +// debug builds to run closer in speed and code footprint to release builds. +// - bitset doesn't support string functionality. We can add this if +// it is deemed useful. +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_BITSET_H +#define EASTL_BITSET_H + + +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) +#endif +#include +#include +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +#if EASTL_EXCEPTIONS_ENABLED + #ifdef _MSC_VER + #pragma warning(push, 0) + #endif + #include // std::out_of_range, std::length_error. + #ifdef _MSC_VER + #pragma warning(pop) + #endif +#endif + +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable: 4127) // Conditional expression is constant +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + // To consider: Enable this for backwards compatibility with any user code that might be using BitsetWordType: + // #define BitsetWordType EASTL_BITSET_WORD_TYPE_DEFAULT + + + /// BITSET_WORD_COUNT + /// + /// Defines the number of words we use, based on the number of bits. + /// nBitCount refers to the number of bits in a bitset. + /// WordType refers to the type of integer word which stores bitet data. By default it is BitsetWordType. + /// + #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x can't handle the simpler declaration below. + #define BITSET_WORD_COUNT(nBitCount, WordType) (nBitCount == 0 ? 1 : ((nBitCount - 1) / (8 * sizeof(WordType)) + 1)) + #else + #define BITSET_WORD_COUNT(nBitCount, WordType) ((nBitCount - 1) / (8 * sizeof(WordType)) + 1) + #endif + + + /// EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING + /// Before GCC 4.7 the '-Warray-bounds' buggy and was very likely to issue false positives for loops that are + /// difficult to evaluate. + /// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=45978 + /// + #if defined(__GNUC__) && (EA_COMPILER_VERSION > 4007) && defined(EA_PLATFORM_ANDROID) // Earlier than GCC 4.7 + #define EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING 1 + #else + #define EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING 0 + #endif + + + + /// BitsetBase + /// + /// This is a default implementation that works for any number of words. + /// + template // Templated on the number of words used to hold the bitset and the word type. + struct BitsetBase + { + typedef WordType word_type; + typedef BitsetBase this_type; + #if EASTL_BITSET_SIZE_T + typedef size_t size_type; + #else + typedef eastl_size_t size_type; + #endif + + enum { + kBitsPerWord = (8 * sizeof(word_type)), + kBitsPerWordMask = (kBitsPerWord - 1), + kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))) + }; + + public: + word_type mWord[NW]; + + public: + BitsetBase(); + BitsetBase(uint32_t value); // This exists only for compatibility with std::bitset, which has a 'long' constructor. + //BitsetBase(uint64_t value); // Disabled because it causes conflicts with the 32 bit version with existing user code. Use from_uint64 to init from a uint64_t instead. + + void operator&=(const this_type& x); + void operator|=(const this_type& x); + void operator^=(const this_type& x); + + void operator<<=(size_type n); + void operator>>=(size_type n); + + void flip(); + void set(); + void set(size_type i, bool value); + void reset(); + + bool operator==(const this_type& x) const; + + bool any() const; + size_type count() const; + + void from_uint32(uint32_t value); + void from_uint64(uint64_t value); + + unsigned long to_ulong() const; + uint32_t to_uint32() const; + uint64_t to_uint64() const; + + word_type& DoGetWord(size_type i); + word_type DoGetWord(size_type i) const; + + size_type DoFindFirst() const; + size_type DoFindNext(size_type last_find) const; + + size_type DoFindLast() const; // Returns NW * kBitsPerWord (the bit count) if no bits are set. + size_type DoFindPrev(size_type last_find) const; // Returns NW * kBitsPerWord (the bit count) if no bits are set. + + }; // class BitsetBase + + + + /// BitsetBase<1, WordType> + /// + /// This is a specialization for a bitset that fits within one word. + /// + template + struct BitsetBase<1, WordType> + { + typedef WordType word_type; + typedef BitsetBase<1, WordType> this_type; + #if EASTL_BITSET_SIZE_T + typedef size_t size_type; + #else + typedef eastl_size_t size_type; + #endif + + enum { + kBitsPerWord = (8 * sizeof(word_type)), + kBitsPerWordMask = (kBitsPerWord - 1), + kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))) + }; + + public: + word_type mWord[1]; // Defined as an array of 1 so that bitset can treat this BitsetBase like others. + + public: + BitsetBase(); + BitsetBase(uint32_t value); + //BitsetBase(uint64_t value); // Disabled because it causes conflicts with the 32 bit version with existing user code. Use from_uint64 instead. + + void operator&=(const this_type& x); + void operator|=(const this_type& x); + void operator^=(const this_type& x); + + void operator<<=(size_type n); + void operator>>=(size_type n); + + void flip(); + void set(); + void set(size_type i, bool value); + void reset(); + + bool operator==(const this_type& x) const; + + bool any() const; + size_type count() const; + + void from_uint32(uint32_t value); + void from_uint64(uint64_t value); + + unsigned long to_ulong() const; + uint32_t to_uint32() const; + uint64_t to_uint64() const; + + word_type& DoGetWord(size_type); + word_type DoGetWord(size_type) const; + + size_type DoFindFirst() const; + size_type DoFindNext(size_type last_find) const; + + size_type DoFindLast() const; // Returns 1 * kBitsPerWord (the bit count) if no bits are set. + size_type DoFindPrev(size_type last_find) const; // Returns 1 * kBitsPerWord (the bit count) if no bits are set. + + }; // BitsetBase<1, WordType> + + + + /// BitsetBase<2, WordType> + /// + /// This is a specialization for a bitset that fits within two words. + /// The difference here is that we avoid branching (ifs and loops). + /// + template + struct BitsetBase<2, WordType> + { + typedef WordType word_type; + typedef BitsetBase<2, WordType> this_type; + #if EASTL_BITSET_SIZE_T + typedef size_t size_type; + #else + typedef eastl_size_t size_type; + #endif + + enum { + kBitsPerWord = (8 * sizeof(word_type)), + kBitsPerWordMask = (kBitsPerWord - 1), + kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))) + }; + + public: + word_type mWord[2]; + + public: + BitsetBase(); + BitsetBase(uint32_t value); + //BitsetBase(uint64_t value); // Disabled because it causes conflicts with the 32 bit version with existing user code. Use from_uint64 instead. + + void operator&=(const this_type& x); + void operator|=(const this_type& x); + void operator^=(const this_type& x); + + void operator<<=(size_type n); + void operator>>=(size_type n); + + void flip(); + void set(); + void set(size_type i, bool value); + void reset(); + + bool operator==(const this_type& x) const; + + bool any() const; + size_type count() const; + + void from_uint32(uint32_t value); + void from_uint64(uint64_t value); + + unsigned long to_ulong() const; + uint32_t to_uint32() const; + uint64_t to_uint64() const; + + word_type& DoGetWord(size_type); + word_type DoGetWord(size_type) const; + + size_type DoFindFirst() const; + size_type DoFindNext(size_type last_find) const; + + size_type DoFindLast() const; // Returns 2 * kBitsPerWord (the bit count) if no bits are set. + size_type DoFindPrev(size_type last_find) const; // Returns 2 * kBitsPerWord (the bit count) if no bits are set. + + }; // BitsetBase<2, WordType> + + + + + /// bitset + /// + /// Implements a bitset much like the C++ std::bitset. + /// + /// As of this writing we don't implement a specialization of bitset<0>, + /// as it is deemed an academic exercise that nobody would actually + /// use and it would increase code space and provide little practical + /// benefit. Note that this doesn't mean bitset<0> isn't supported; + /// it means that our version of it isn't as efficient as it would be + /// if a specialization was made for it. + /// + /// - N can be any unsigned (non-zero) value, though memory usage is + /// linear with respect to N, so large values of N use large amounts of memory. + /// - WordType must be one of [uint16_t, uint32_t, uint64_t, uint128_t] + /// and the compiler must support the type. By default the WordType is + /// the largest native register type that the target platform supports. + /// + template + class bitset : private BitsetBase + { + public: + typedef BitsetBase base_type; + typedef bitset this_type; + typedef WordType word_type; + typedef typename base_type::size_type size_type; + + enum + { + kBitsPerWord = (8 * sizeof(word_type)), + kBitsPerWordMask = (kBitsPerWord - 1), + kBitsPerWordShift = ((kBitsPerWord == 8) ? 3 : ((kBitsPerWord == 16) ? 4 : ((kBitsPerWord == 32) ? 5 : (((kBitsPerWord == 64) ? 6 : 7))))), + kSize = N, // The number of bits the bitset holds + kWordSize = sizeof(word_type), // The size of individual words the bitset uses to hold the bits. + kWordCount = BITSET_WORD_COUNT(N, WordType) // The number of words the bitset uses to hold the bits. sizeof(bitset) == kWordSize * kWordCount. + }; + + using base_type::mWord; + using base_type::DoGetWord; + using base_type::DoFindFirst; + using base_type::DoFindNext; + using base_type::DoFindLast; + using base_type::DoFindPrev; + using base_type::to_ulong; + using base_type::to_uint32; + using base_type::to_uint64; + using base_type::count; + using base_type::any; + + public: + /// reference + /// + /// A reference is a reference to a specific bit in the bitset. + /// The C++ standard specifies that this be a nested class, + /// though it is not clear if a non-nested reference implementation + /// would be non-conforming. + /// + class reference + { + protected: + friend class bitset; + + word_type* mpBitWord; + size_type mnBitIndex; + + reference(){} // The C++ standard specifies that this is private. + + public: + reference(const bitset& x, size_type i); + + reference& operator=(bool value); + reference& operator=(const reference& x); + + bool operator~() const; + operator bool() const // Defined inline because CodeWarrior fails to be able to compile it outside. + { return (*mpBitWord & (static_cast(1) << (mnBitIndex & kBitsPerWordMask))) != 0; } + + reference& flip(); + }; + + public: + friend class reference; + + bitset(); + bitset(uint32_t value); + //bitset(uint64_t value); // Disabled because it causes conflicts with the 32 bit version with existing user code. Use from_uint64 instead. + + // We don't define copy constructor and operator= because + // the compiler-generated versions will suffice. + + this_type& operator&=(const this_type& x); + this_type& operator|=(const this_type& x); + this_type& operator^=(const this_type& x); + + this_type& operator<<=(size_type n); + this_type& operator>>=(size_type n); + + this_type& set(); + this_type& set(size_type i, bool value = true); + + this_type& reset(); + this_type& reset(size_type i); + + this_type& flip(); + this_type& flip(size_type i); + this_type operator~() const; + + reference operator[](size_type i); + bool operator[](size_type i) const; + + const word_type* data() const; + word_type* data(); + + void from_uint32(uint32_t value); + void from_uint64(uint64_t value); + + //unsigned long to_ulong() const; // We inherit this from the base class. + //uint32_t to_uint32() const; + //uint64_t to_uint64() const; + + //size_type count() const; // We inherit this from the base class. + size_type size() const; + + bool operator==(const this_type& x) const; + bool operator!=(const this_type& x) const; + + bool test(size_type i) const; + //bool any() const; // We inherit this from the base class. + bool all() const; + bool none() const; + + this_type operator<<(size_type n) const; + this_type operator>>(size_type n) const; + + // Finds the index of the first "on" bit, returns kSize if none are set. + size_type find_first() const; + + // Finds the index of the next "on" bit after last_find, returns kSize if none are set. + size_type find_next(size_type last_find) const; + + // Finds the index of the last "on" bit, returns kSize if none are set. + size_type find_last() const; + + // Finds the index of the last "on" bit before last_find, returns kSize if none are set. + size_type find_prev(size_type last_find) const; + + }; // bitset + + + + + + + + /// BitsetCountBits + /// + /// This is a fast trick way to count bits without branches nor memory accesses. + /// + inline uint32_t BitsetCountBits(uint64_t x) + { + // GCC 3.x's implementation of UINT64_C is broken and fails to deal with + // the code below correctly. So we make a workaround for it. Earlier and + // later versions of GCC don't have this bug. + + #if defined(__GNUC__) && (__GNUC__ == 3) + x = x - ((x >> 1) & 0x5555555555555555ULL); + x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); + x = (x + (x >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + return (uint32_t)((x * 0x0101010101010101ULL) >> 56); + #else + x = x - ((x >> 1) & UINT64_C(0x5555555555555555)); + x = (x & UINT64_C(0x3333333333333333)) + ((x >> 2) & UINT64_C(0x3333333333333333)); + x = (x + (x >> 4)) & UINT64_C(0x0F0F0F0F0F0F0F0F); + return (uint32_t)((x * UINT64_C(0x0101010101010101)) >> 56); + #endif + } + + inline uint32_t BitsetCountBits(uint32_t x) + { + x = x - ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + return (uint32_t)((x * 0x01010101) >> 24); + } + + inline uint32_t BitsetCountBits(uint16_t x) + { + return BitsetCountBits((uint32_t)x); + } + + inline uint32_t BitsetCountBits(uint8_t x) + { + return BitsetCountBits((uint32_t)x); + } + + + // const static char kBitsPerUint16[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 }; + #define EASTL_BITSET_COUNT_STRING "\0\1\1\2\1\2\2\3\1\2\2\3\2\3\3\4" + + + inline uint32_t GetFirstBit(uint8_t x) + { + if(x) + { + uint32_t n = 1; + + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (uint32_t)(n - (x & 1)); + } + + return 8; + } + + inline uint32_t GetFirstBit(uint16_t x) // To do: Update this to use VC++ _BitScanForward, _BitScanForward64; GCC __builtin_ctz, __builtin_ctzl. VC++ __lzcnt16, __lzcnt, __lzcnt64 requires recent CPUs (2013+) and probably can't be used. http://en.wikipedia.org/wiki/Haswell_%28microarchitecture%29#New_features + { + if(x) + { + uint32_t n = 1; + + if((x & 0x000000FF) == 0) { n += 8; x >>= 8; } + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (uint32_t)(n - (x & 1)); + } + + return 16; + } + + inline uint32_t GetFirstBit(uint32_t x) + { + if(x) + { + uint32_t n = 1; + + if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; } + if((x & 0x000000FF) == 0) { n += 8; x >>= 8; } + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (n - (x & 1)); + } + + return 32; + } + + inline uint32_t GetFirstBit(uint64_t x) + { + if(x) + { + uint32_t n = 1; + + if((x & 0xFFFFFFFF) == 0) { n += 32; x >>= 32; } + if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; } + if((x & 0x000000FF) == 0) { n += 8; x >>= 8; } + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (n - ((uint32_t)x & 1)); + } + + return 64; + } + + + #if EASTL_INT128_SUPPORTED + inline uint32_t GetFirstBit(eastl_uint128_t x) + { + if(x) + { + uint32_t n = 1; + + if((x & UINT64_C(0xFFFFFFFFFFFFFFFF)) == 0) { n += 64; x >>= 64; } + if((x & 0xFFFFFFFF) == 0) { n += 32; x >>= 32; } + if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; } + if((x & 0x000000FF) == 0) { n += 8; x >>= 8; } + if((x & 0x0000000F) == 0) { n += 4; x >>= 4; } + if((x & 0x00000003) == 0) { n += 2; x >>= 2; } + + return (n - ((uint32_t)x & 1)); + } + + return 128; + } + #endif + + inline uint32_t GetLastBit(uint8_t x) + { + if(x) + { + uint32_t n = 0; + + if(x & 0xFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFE) { n += 1; } + + return n; + } + + return 8; + } + + inline uint32_t GetLastBit(uint16_t x) + { + if(x) + { + uint32_t n = 0; + + if(x & 0xFF00) { n += 8; x >>= 8; } + if(x & 0xFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFE) { n += 1; } + + return n; + } + + return 16; + } + + inline uint32_t GetLastBit(uint32_t x) + { + if(x) + { + uint32_t n = 0; + + if(x & 0xFFFF0000) { n += 16; x >>= 16; } + if(x & 0xFFFFFF00) { n += 8; x >>= 8; } + if(x & 0xFFFFFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFFFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFFFFFE) { n += 1; } + + return n; + } + + return 32; + } + + inline uint32_t GetLastBit(uint64_t x) + { + if(x) + { + uint32_t n = 0; + + if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; } + if(x & 0xFFFF0000) { n += 16; x >>= 16; } + if(x & 0xFFFFFF00) { n += 8; x >>= 8; } + if(x & 0xFFFFFFF0) { n += 4; x >>= 4; } + if(x & 0xFFFFFFFC) { n += 2; x >>= 2; } + if(x & 0xFFFFFFFE) { n += 1; } + + return n; + } + + return 64; + } + + #if EASTL_INT128_SUPPORTED + inline uint32_t GetLastBit(eastl_uint128_t x) + { + if(x) + { + uint32_t n = 0; + + eastl_uint128_t mask(UINT64_C(0xFFFFFFFF00000000)); // There doesn't seem to exist compiler support for INT128_C() by any compiler. EAStdC's int128_t supports it though. + mask <<= 64; + + if(x & mask) { n += 64; x >>= 64; } + if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; } + if(x & UINT64_C(0x00000000FFFF0000)) { n += 16; x >>= 16; } + if(x & UINT64_C(0x00000000FFFFFF00)) { n += 8; x >>= 8; } + if(x & UINT64_C(0x00000000FFFFFFF0)) { n += 4; x >>= 4; } + if(x & UINT64_C(0x00000000FFFFFFFC)) { n += 2; x >>= 2; } + if(x & UINT64_C(0x00000000FFFFFFFE)) { n += 1; } + + return n; + } + + return 128; + } + #endif + + + + + /////////////////////////////////////////////////////////////////////////// + // BitsetBase + // + // We tried two forms of array access here: + // for(word_type *pWord(mWord), *pWordEnd(mWord + NW); pWord < pWordEnd; ++pWord) + // *pWord = ... + // and + // for(size_t i = 0; i < NW; i++) + // mWord[i] = ... + // + // For our tests (~NW < 16), the latter (using []) access resulted in faster code. + /////////////////////////////////////////////////////////////////////////// + + template + inline BitsetBase::BitsetBase() + { + reset(); + } + + + template + inline BitsetBase::BitsetBase(uint32_t value) + { + // This implementation assumes that sizeof(value) <= sizeof(word_type). + //EASTL_CT_ASSERT(sizeof(value) <= sizeof(word_type)); Disabled because we now have support for uint8_t and uint16_t word types. It would be nice to have a runtime assert that tested this. + + reset(); + mWord[0] = static_cast(value); + } + + + /* + template + inline BitsetBase::BitsetBase(uint64_t value) + { + reset(); + + #if(EA_PLATFORM_WORD_SIZE == 4) + mWord[0] = static_cast(value); + + EASTL_CT_ASSERT(NW > 2); // We can assume this because we have specializations of BitsetBase for <1> and <2>. + //if(NW > 1) // NW is a template constant, but it would be a little messy to take advantage of it's const-ness. + mWord[1] = static_cast(value >> 32); + #else + mWord[0] = static_cast(value); + #endif + } + */ + + + template + inline void BitsetBase::operator&=(const this_type& x) + { + for(size_t i = 0; i < NW; i++) + mWord[i] &= x.mWord[i]; + } + + + template + inline void BitsetBase::operator|=(const this_type& x) + { + for(size_t i = 0; i < NW; i++) + mWord[i] |= x.mWord[i]; + } + + + template + inline void BitsetBase::operator^=(const this_type& x) + { + for(size_t i = 0; i < NW; i++) + mWord[i] ^= x.mWord[i]; + } + + + template + inline void BitsetBase::operator<<=(size_type n) + { + const size_type nWordShift = (size_type)(n >> kBitsPerWordShift); + + if(nWordShift) + { + for(int i = (int)(NW - 1); i >= 0; --i) + mWord[i] = (nWordShift <= (size_type)i) ? mWord[i - nWordShift] : (word_type)0; + } + + if(n &= kBitsPerWordMask) + { + for(size_t i = (NW - 1); i > 0; --i) + mWord[i] = (word_type)((mWord[i] << n) | (mWord[i - 1] >> (kBitsPerWord - n))); + mWord[0] <<= n; + } + + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase::operator>>=(size_type n) + { + const size_type nWordShift = (size_type)(n >> kBitsPerWordShift); + + if(nWordShift) + { + for(size_t i = 0; i < NW; ++i) + mWord[i] = ((nWordShift < (NW - i)) ? mWord[i + nWordShift] : (word_type)0); + } + + if(n &= kBitsPerWordMask) + { + for(size_t i = 0; i < (NW - 1); ++i) + mWord[i] = (word_type)((mWord[i] >> n) | (mWord[i + 1] << (kBitsPerWord - n))); + mWord[NW - 1] >>= n; + } + } + + + template + inline void BitsetBase::flip() + { + for(size_t i = 0; i < NW; i++) + mWord[i] = ~mWord[i]; + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase::set() + { + for(size_t i = 0; i < NW; i++) + mWord[i] = static_cast(~static_cast(0)); + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase::set(size_type i, bool value) + { + if(value) + mWord[i >> kBitsPerWordShift] |= (static_cast(1) << (i & kBitsPerWordMask)); + else + mWord[i >> kBitsPerWordShift] &= ~(static_cast(1) << (i & kBitsPerWordMask)); + } + + + template + inline void BitsetBase::reset() + { + if(NW > 16) // This is a constant expression and should be optimized away. + { + // This will be fastest if compiler intrinsic function optimizations are enabled. + memset(mWord, 0, sizeof(mWord)); + } + else + { + for(size_t i = 0; i < NW; i++) + mWord[i] = 0; + } + } + + + template + inline bool BitsetBase::operator==(const this_type& x) const + { + for(size_t i = 0; i < NW; i++) + { + if(mWord[i] != x.mWord[i]) + return false; + } + return true; + } + + + template + inline bool BitsetBase::any() const + { + for(size_t i = 0; i < NW; i++) + { + if(mWord[i]) + return true; + } + return false; + } + + + template + inline typename BitsetBase::size_type + BitsetBase::count() const + { + size_type n = 0; + + for(size_t i = 0; i < NW; i++) + { + #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later + #if(EA_PLATFORM_WORD_SIZE == 4) + n += (size_type)__builtin_popcountl(mWord[i]); + #else + n += (size_type)__builtin_popcountll(mWord[i]); + #endif + #elif defined(__GNUC__) && (__GNUC__ < 3) + n += BitsetCountBits(mWord[i]); // GCC 2.x compiler inexplicably blows up on the code below. + #else + // todo: use __popcnt16, __popcnt, __popcnt64 for msvc builds + // https://msdn.microsoft.com/en-us/library/bb385231(v=vs.140).aspx + for(word_type w = mWord[i]; w; w >>= 4) + n += EASTL_BITSET_COUNT_STRING[w & 0xF]; + + // Version which seems to run slower in benchmarks: + // n += BitsetCountBits(mWord[i]); + #endif + + } + return n; + } + + + template + inline void BitsetBase::from_uint32(uint32_t value) + { + reset(); + mWord[0] = static_cast(value); + } + + + template + inline void BitsetBase::from_uint64(uint64_t value) + { + reset(); + + #if(EA_PLATFORM_WORD_SIZE == 4) + mWord[0] = static_cast(value); + + EASTL_CT_ASSERT(NW > 2); // We can assume this because we have specializations of BitsetBase for <1> and <2>. + //if(NW > 1) // NW is a template constant, but it would be a little messy to take advantage of it's const-ness. + mWord[1] = static_cast(value >> 32); + #else + mWord[0] = static_cast(value); + #endif + } + + + template + inline unsigned long BitsetBase::to_ulong() const + { + #if EASTL_EXCEPTIONS_ENABLED + for(size_t i = 1; i < NW; ++i) + { + if(mWord[i]) + throw std::overflow_error("BitsetBase::to_ulong"); + } + #endif + return (unsigned long)mWord[0]; // Todo: We need to deal with the case whereby sizeof(word_type) < sizeof(unsigned long) + } + + + template + inline uint32_t BitsetBase::to_uint32() const + { + #if EASTL_EXCEPTIONS_ENABLED + // Verify that high words or bits are not set and thus that to_uint32 doesn't lose information. + for(size_t i = 1; i < NW; ++i) + { + if(mWord[i]) + throw std::overflow_error("BitsetBase::to_uint32"); + } + + #if(EA_PLATFORM_WORD_SIZE > 4) // if we have 64 bit words... + if(mWord[0] >> 32) + throw std::overflow_error("BitsetBase::to_uint32"); + #endif + #endif + + return (uint32_t)mWord[0]; + } + + + template + inline uint64_t BitsetBase::to_uint64() const + { + #if EASTL_EXCEPTIONS_ENABLED + // Verify that high words are not set and thus that to_uint64 doesn't lose information. + + EASTL_CT_ASSERT(NW > 2); // We can assume this because we have specializations of BitsetBase for <1> and <2>. + for(size_t i = 2; i < NW; ++i) + { + if(mWord[i]) + throw std::overflow_error("BitsetBase::to_uint64"); + } + #endif + + #if(EA_PLATFORM_WORD_SIZE == 4) + EASTL_CT_ASSERT(NW > 2); // We can assume this because we have specializations of BitsetBase for <1> and <2>. + return (mWord[1] << 32) | mWord[0]; + #else + return (uint64_t)mWord[0]; + #endif + } + + + template + inline typename BitsetBase::word_type& + BitsetBase::DoGetWord(size_type i) + { + return mWord[i >> kBitsPerWordShift]; + } + + + template + inline typename BitsetBase::word_type + BitsetBase::DoGetWord(size_type i) const + { + return mWord[i >> kBitsPerWordShift]; + } + + + template + inline typename BitsetBase::size_type + BitsetBase::DoFindFirst() const + { + for(size_type word_index = 0; word_index < NW; ++word_index) + { + const size_type fbiw = GetFirstBit(mWord[word_index]); + + if(fbiw != kBitsPerWord) + return (word_index * kBitsPerWord) + fbiw; + } + + return (size_type)NW * kBitsPerWord; + } + + +#if EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING +EA_DISABLE_GCC_WARNING(-Warray-bounds) +#endif + + template + inline typename BitsetBase::size_type + BitsetBase::DoFindNext(size_type last_find) const + { + // Start looking from the next bit. + ++last_find; + + // Set initial state based on last find. + size_type word_index = static_cast(last_find >> kBitsPerWordShift); + size_type bit_index = static_cast(last_find & kBitsPerWordMask); + + // To do: There probably is a more elegant way to write looping below. + if(word_index < NW) + { + // Mask off previous bits of the word so our search becomes a "find first". + word_type this_word = mWord[word_index] & (~static_cast(0) << bit_index); + + for(;;) + { + const size_type fbiw = GetFirstBit(this_word); + + if(fbiw != kBitsPerWord) + return (word_index * kBitsPerWord) + fbiw; + + if(++word_index < NW) + this_word = mWord[word_index]; + else + break; + } + } + + return (size_type)NW * kBitsPerWord; + } + +#if EASTL_DISABLE_BITSET_ARRAYBOUNDS_WARNING +EA_RESTORE_GCC_WARNING() +#endif + + + + template + inline typename BitsetBase::size_type + BitsetBase::DoFindLast() const + { + for(size_type word_index = (size_type)NW; word_index > 0; --word_index) + { + const size_type lbiw = GetLastBit(mWord[word_index - 1]); + + if(lbiw != kBitsPerWord) + return ((word_index - 1) * kBitsPerWord) + lbiw; + } + + return (size_type)NW * kBitsPerWord; + } + + + template + inline typename BitsetBase::size_type + BitsetBase::DoFindPrev(size_type last_find) const + { + if(last_find > 0) + { + // Set initial state based on last find. + size_type word_index = static_cast(last_find >> kBitsPerWordShift); + size_type bit_index = static_cast(last_find & kBitsPerWordMask); + + // Mask off subsequent bits of the word so our search becomes a "find last". + word_type mask = (~static_cast(0) >> (kBitsPerWord - 1 - bit_index)) >> 1; // We do two shifts here because many CPUs ignore requests to shift 32 bit integers by 32 bits, which could be the case above. + word_type this_word = mWord[word_index] & mask; + + for(;;) + { + const size_type lbiw = GetLastBit(this_word); + + if(lbiw != kBitsPerWord) + return (word_index * kBitsPerWord) + lbiw; + + if(word_index > 0) + this_word = mWord[--word_index]; + else + break; + } + } + + return (size_type)NW * kBitsPerWord; + } + + + + /////////////////////////////////////////////////////////////////////////// + // BitsetBase<1, WordType> + /////////////////////////////////////////////////////////////////////////// + + template + inline BitsetBase<1, WordType>::BitsetBase() + { + mWord[0] = 0; + } + + + template + inline BitsetBase<1, WordType>::BitsetBase(uint32_t value) + { + // This implementation assumes that sizeof(value) <= sizeof(word_type). + //EASTL_CT_ASSERT(sizeof(value) <= sizeof(word_type)); Disabled because we now have support for uint8_t and uint16_t word types. It would be nice to have a runtime assert that tested this. + + mWord[0] = static_cast(value); + } + + + /* + template + inline BitsetBase<1, WordType>::BitsetBase(uint64_t value) + { + #if(EA_PLATFORM_WORD_SIZE == 4) + EASTL_ASSERT(value <= 0xffffffff); + mWord[0] = static_cast(value); // This potentially loses data, but that's what the user is requesting. + #else + mWord[0] = static_cast(value); + #endif + } + */ + + + template + inline void BitsetBase<1, WordType>::operator&=(const this_type& x) + { + mWord[0] &= x.mWord[0]; + } + + + template + inline void BitsetBase<1, WordType>::operator|=(const this_type& x) + { + mWord[0] |= x.mWord[0]; + } + + + template + inline void BitsetBase<1, WordType>::operator^=(const this_type& x) + { + mWord[0] ^= x.mWord[0]; + } + + + template + inline void BitsetBase<1, WordType>::operator<<=(size_type n) + { + mWord[0] <<= n; + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<1, WordType>::operator>>=(size_type n) + { + mWord[0] >>= n; + } + + + template + inline void BitsetBase<1, WordType>::flip() + { + mWord[0] = ~mWord[0]; + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<1, WordType>::set() + { + mWord[0] = static_cast(~static_cast(0)); + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<1, WordType>::set(size_type i, bool value) + { + if(value) + mWord[0] |= (static_cast(1) << i); + else + mWord[0] &= ~(static_cast(1) << i); + } + + + template + inline void BitsetBase<1, WordType>::reset() + { + mWord[0] = 0; + } + + + template + inline bool BitsetBase<1, WordType>::operator==(const this_type& x) const + { + return mWord[0] == x.mWord[0]; + } + + + template + inline bool BitsetBase<1, WordType>::any() const + { + return mWord[0] != 0; + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::count() const + { + #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later + #if(EA_PLATFORM_WORD_SIZE == 4) + return (size_type)__builtin_popcountl(mWord[0]); + #else + return (size_type)__builtin_popcountll(mWord[0]); + #endif + #elif defined(__GNUC__) && (__GNUC__ < 3) + return BitsetCountBits(mWord[0]); // GCC 2.x compiler inexplicably blows up on the code below. + #else + size_type n = 0; + for(word_type w = mWord[0]; w; w >>= 4) + n += EASTL_BITSET_COUNT_STRING[w & 0xF]; + return n; + #endif + } + + + template + inline void BitsetBase<1, WordType>::from_uint32(uint32_t value) + { + mWord[0] = static_cast(value); + } + + + template + inline void BitsetBase<1, WordType>::from_uint64(uint64_t value) + { + #if(EA_PLATFORM_WORD_SIZE == 4) + EASTL_ASSERT(value <= 0xffffffff); + mWord[0] = static_cast(value); // This potentially loses data, but that's what the user is requesting. + #else + mWord[0] = static_cast(value); + #endif + } + + + template + inline unsigned long BitsetBase<1, WordType>::to_ulong() const + { + #if EASTL_EXCEPTIONS_ENABLED + #if((EA_PLATFORM_WORD_SIZE > 4) && defined(EA_PLATFORM_MICROSOFT)) // If we are using 64 bit words but ulong is less than 64 bits... Microsoft platforms alone use a 32 bit long under 64 bit platforms. + // Verify that high bits are not set and thus that to_ulong doesn't lose information. + if(mWord[0] >> 32) + throw std::overflow_error("BitsetBase::to_ulong"); + #endif + #endif + + return static_cast(mWord[0]); + } + + + template + inline uint32_t BitsetBase<1, WordType>::to_uint32() const + { + #if EASTL_EXCEPTIONS_ENABLED + #if(EA_PLATFORM_WORD_SIZE > 4) // If we are using 64 bit words... + // Verify that high bits are not set and thus that to_uint32 doesn't lose information. + if(mWord[0] >> 32) + throw std::overflow_error("BitsetBase::to_uint32"); + #endif + #endif + + return static_cast(mWord[0]); + } + + + template + inline uint64_t BitsetBase<1, WordType>::to_uint64() const + { + // This implementation is the same regardless of the word size, and there is no possibility of overflow_error. + return static_cast(mWord[0]); + } + + + template + inline typename BitsetBase<1, WordType>::word_type& + BitsetBase<1, WordType>::DoGetWord(size_type) + { + return mWord[0]; + } + + + template + inline typename BitsetBase<1, WordType>::word_type + BitsetBase<1, WordType>::DoGetWord(size_type) const + { + return mWord[0]; + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::DoFindFirst() const + { + return GetFirstBit(mWord[0]); + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::DoFindNext(size_type last_find) const + { + if(++last_find < kBitsPerWord) + { + // Mask off previous bits of word so our search becomes a "find first". + const word_type this_word = mWord[0] & ((~static_cast(0)) << last_find); + + return GetFirstBit(this_word); + } + + return kBitsPerWord; + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::DoFindLast() const + { + return GetLastBit(mWord[0]); + } + + + template + inline typename BitsetBase<1, WordType>::size_type + BitsetBase<1, WordType>::DoFindPrev(size_type last_find) const + { + if(last_find > 0) + { + // Mask off previous bits of word so our search becomes a "find first". + const word_type this_word = mWord[0] & ((~static_cast(0)) >> (kBitsPerWord - last_find)); + + return GetLastBit(this_word); + } + + return kBitsPerWord; + } + + + + + /////////////////////////////////////////////////////////////////////////// + // BitsetBase<2, WordType> + /////////////////////////////////////////////////////////////////////////// + + template + inline BitsetBase<2, WordType>::BitsetBase() + { + mWord[0] = 0; + mWord[1] = 0; + } + + + template + inline BitsetBase<2, WordType>::BitsetBase(uint32_t value) + { + // This implementation assumes that sizeof(value) <= sizeof(word_type). + //EASTL_CT_ASSERT(sizeof(value) <= sizeof(word_type)); Disabled because we now have support for uint8_t and uint16_t word types. It would be nice to have a runtime assert that tested this. + + mWord[0] = static_cast(value); + mWord[1] = 0; + } + + + /* + template + inline BitsetBase<2, WordType>::BitsetBase(uint64_t value) + { + #if(EA_PLATFORM_WORD_SIZE == 4) + mWord[0] = static_cast(value); + mWord[1] = static_cast(value >> 32); + #else + mWord[0] = static_cast(value); + mWord[1] = 0; + #endif + } + */ + + + template + inline void BitsetBase<2, WordType>::operator&=(const this_type& x) + { + mWord[0] &= x.mWord[0]; + mWord[1] &= x.mWord[1]; + } + + + template + inline void BitsetBase<2, WordType>::operator|=(const this_type& x) + { + mWord[0] |= x.mWord[0]; + mWord[1] |= x.mWord[1]; + } + + + template + inline void BitsetBase<2, WordType>::operator^=(const this_type& x) + { + mWord[0] ^= x.mWord[0]; + mWord[1] ^= x.mWord[1]; + } + + + template + inline void BitsetBase<2, WordType>::operator<<=(size_type n) + { + if(n) // to avoid a shift by kBitsPerWord, which is undefined + { + if(EASTL_UNLIKELY(n >= kBitsPerWord)) // parent expected to handle high bits and n >= 64 + { + mWord[1] = mWord[0]; + mWord[0] = 0; + n -= kBitsPerWord; + } + + mWord[1] = (mWord[1] << n) | (mWord[0] >> (kBitsPerWord - n)); // Intentionally use | instead of +. + mWord[0] <<= n; + // We let the parent class turn off any upper bits. + } + } + + + template + inline void BitsetBase<2, WordType>::operator>>=(size_type n) + { + if(n) // to avoid a shift by kBitsPerWord, which is undefined + { + if(EASTL_UNLIKELY(n >= kBitsPerWord)) // parent expected to handle n >= 64 + { + mWord[0] = mWord[1]; + mWord[1] = 0; + n -= kBitsPerWord; + } + + mWord[0] = (mWord[0] >> n) | (mWord[1] << (kBitsPerWord - n)); // Intentionally use | instead of +. + mWord[1] >>= n; + } + } + + + template + inline void BitsetBase<2, WordType>::flip() + { + mWord[0] = ~mWord[0]; + mWord[1] = ~mWord[1]; + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<2, WordType>::set() + { + mWord[0] = ~static_cast(0); + mWord[1] = ~static_cast(0); + // We let the parent class turn off any upper bits. + } + + + template + inline void BitsetBase<2, WordType>::set(size_type i, bool value) + { + if(value) + mWord[i >> kBitsPerWordShift] |= (static_cast(1) << (i & kBitsPerWordMask)); + else + mWord[i >> kBitsPerWordShift] &= ~(static_cast(1) << (i & kBitsPerWordMask)); + } + + + template + inline void BitsetBase<2, WordType>::reset() + { + mWord[0] = 0; + mWord[1] = 0; + } + + + template + inline bool BitsetBase<2, WordType>::operator==(const this_type& x) const + { + return (mWord[0] == x.mWord[0]) && (mWord[1] == x.mWord[1]); + } + + + template + inline bool BitsetBase<2, WordType>::any() const + { + // Or with two branches: { return (mWord[0] != 0) || (mWord[1] != 0); } + return (mWord[0] | mWord[1]) != 0; + } + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::count() const + { + #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) // GCC 3.4 or later + #if(EA_PLATFORM_WORD_SIZE == 4) + return (size_type)__builtin_popcountl(mWord[0]) + (size_type)__builtin_popcountl(mWord[1]); + #else + return (size_type)__builtin_popcountll(mWord[0]) + (size_type)__builtin_popcountll(mWord[1]); + #endif + + #else + return BitsetCountBits(mWord[0]) + BitsetCountBits(mWord[1]); + #endif + } + + + template + inline void BitsetBase<2, WordType>::from_uint32(uint32_t value) + { + mWord[0] = static_cast(value); + mWord[1] = 0; + } + + + template + inline void BitsetBase<2, WordType>::from_uint64(uint64_t value) + { + #if(EA_PLATFORM_WORD_SIZE == 4) + mWord[0] = static_cast(value); + mWord[1] = static_cast(value >> 32); + #else + mWord[0] = static_cast(value); + mWord[1] = 0; + #endif + } + + + template + inline unsigned long BitsetBase<2, WordType>::to_ulong() const + { + #if EASTL_EXCEPTIONS_ENABLED + if(mWord[1]) + throw std::overflow_error("BitsetBase::to_ulong"); + #endif + return (unsigned long)mWord[0]; // Todo: We need to deal with the case whereby sizeof(word_type) < sizeof(unsigned long) + } + + + template + inline uint32_t BitsetBase<2, WordType>::to_uint32() const + { + #if EASTL_EXCEPTIONS_ENABLED + // Verify that high words or bits are not set and thus that to_uint32 doesn't lose information. + + #if(EA_PLATFORM_WORD_SIZE == 4) + if(mWord[1]) + throw std::overflow_error("BitsetBase::to_uint32"); + #else + if(mWord[1] || (mWord[0] >> 32)) + throw std::overflow_error("BitsetBase::to_uint32"); + #endif + #endif + + return (uint32_t)mWord[0]; + } + + + template + inline uint64_t BitsetBase<2, WordType>::to_uint64() const + { + #if(EA_PLATFORM_WORD_SIZE == 4) + // There can't possibly be an overflow_error here. + + return ((uint64_t)mWord[1] << 32) | mWord[0]; + #else + #if EASTL_EXCEPTIONS_ENABLED + if(mWord[1]) + throw std::overflow_error("BitsetBase::to_uint64"); + #endif + + return (uint64_t)mWord[0]; + #endif + } + + + template + inline typename BitsetBase<2, WordType>::word_type& + BitsetBase<2, WordType>::DoGetWord(size_type i) + { + return mWord[i >> kBitsPerWordShift]; + } + + + template + inline typename BitsetBase<2, WordType>::word_type + BitsetBase<2, WordType>::DoGetWord(size_type i) const + { + return mWord[i >> kBitsPerWordShift]; + } + + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::DoFindFirst() const + { + size_type fbiw = GetFirstBit(mWord[0]); + + if(fbiw != kBitsPerWord) + return fbiw; + + fbiw = GetFirstBit(mWord[1]); + + if(fbiw != kBitsPerWord) + return kBitsPerWord + fbiw; + + return 2 * kBitsPerWord; + } + + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::DoFindNext(size_type last_find) const + { + // If the last find was in the first word, we must check it and then possibly the second. + if(++last_find < (size_type)kBitsPerWord) + { + // Mask off previous bits of word so our search becomes a "find first". + word_type this_word = mWord[0] & ((~static_cast(0)) << last_find); + + // Step through words. + size_type fbiw = GetFirstBit(this_word); + + if(fbiw != kBitsPerWord) + return fbiw; + + fbiw = GetFirstBit(mWord[1]); + + if(fbiw != kBitsPerWord) + return kBitsPerWord + fbiw; + } + else if(last_find < (size_type)(2 * kBitsPerWord)) + { + // The last find was in the second word, remove the bit count of the first word from the find. + last_find -= kBitsPerWord; + + // Mask off previous bits of word so our search becomes a "find first". + word_type this_word = mWord[1] & ((~static_cast(0)) << last_find); + + const size_type fbiw = GetFirstBit(this_word); + + if(fbiw != kBitsPerWord) + return kBitsPerWord + fbiw; + } + + return 2 * kBitsPerWord; + } + + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::DoFindLast() const + { + size_type lbiw = GetLastBit(mWord[1]); + + if(lbiw != kBitsPerWord) + return kBitsPerWord + lbiw; + + lbiw = GetLastBit(mWord[0]); + + if(lbiw != kBitsPerWord) + return lbiw; + + return 2 * kBitsPerWord; + } + + + template + inline typename BitsetBase<2, WordType>::size_type + BitsetBase<2, WordType>::DoFindPrev(size_type last_find) const + { + // If the last find was in the second word, we must check it and then possibly the first. + if(last_find > (size_type)kBitsPerWord) + { + // This has the same effect as last_find %= kBitsPerWord in our case. + last_find -= kBitsPerWord; + + // Mask off previous bits of word so our search becomes a "find first". + word_type this_word = mWord[1] & ((~static_cast(0)) >> (kBitsPerWord - last_find)); + + // Step through words. + size_type lbiw = GetLastBit(this_word); + + if(lbiw != kBitsPerWord) + return kBitsPerWord + lbiw; + + lbiw = GetLastBit(mWord[0]); + + if(lbiw != kBitsPerWord) + return lbiw; + } + else if(last_find != 0) + { + // Mask off previous bits of word so our search becomes a "find first". + word_type this_word = mWord[0] & ((~static_cast(0)) >> (kBitsPerWord - last_find)); + + const size_type lbiw = GetLastBit(this_word); + + if(lbiw != kBitsPerWord) + return lbiw; + } + + return 2 * kBitsPerWord; + } + + + + /////////////////////////////////////////////////////////////////////////// + // bitset::reference + /////////////////////////////////////////////////////////////////////////// + + template + inline bitset::reference::reference(const bitset& x, size_type i) + : mpBitWord(&const_cast(x).DoGetWord(i)), + mnBitIndex(i & kBitsPerWordMask) + { // We have an issue here because the above is casting away the const-ness of the source bitset. + // Empty + } + + + template + inline typename bitset::reference& + bitset::reference::operator=(bool value) + { + if(value) + *mpBitWord |= (static_cast(1) << (mnBitIndex & kBitsPerWordMask)); + else + *mpBitWord &= ~(static_cast(1) << (mnBitIndex & kBitsPerWordMask)); + return *this; + } + + + template + inline typename bitset::reference& + bitset::reference::operator=(const reference& x) + { + if(*x.mpBitWord & (static_cast(1) << (x.mnBitIndex & kBitsPerWordMask))) + *mpBitWord |= (static_cast(1) << (mnBitIndex & kBitsPerWordMask)); + else + *mpBitWord &= ~(static_cast(1) << (mnBitIndex & kBitsPerWordMask)); + return *this; + } + + + template + inline bool bitset::reference::operator~() const + { + return (*mpBitWord & (static_cast(1) << (mnBitIndex & kBitsPerWordMask))) == 0; + } + + + //Defined inline in the class because Metrowerks fails to be able to compile it here. + //template + //inline bitset::reference::operator bool() const + //{ + // return (*mpBitWord & (static_cast(1) << (mnBitIndex & kBitsPerWordMask))) != 0; + //} + + + template + inline typename bitset::reference& + bitset::reference::flip() + { + *mpBitWord ^= static_cast(1) << (mnBitIndex & kBitsPerWordMask); + return *this; + } + + + + + /////////////////////////////////////////////////////////////////////////// + // bitset + /////////////////////////////////////////////////////////////////////////// + + template + inline bitset::bitset() + : base_type() + { + // Empty. The base class will set all bits to zero. + } + + EA_DISABLE_VC_WARNING(6313) + template + inline bitset::bitset(uint32_t value) + : base_type(value) + { + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. + } + EA_RESTORE_VC_WARNING() + + /* + template + inline bitset::bitset(uint64_t value) + : base_type(value) + { + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... + mWord[kWordCount - 1] &= ~(~static_cast(0) << (N & kBitsPerWordMask)); // This clears any high unused bits. + } + */ + + + template + inline typename bitset::this_type& + bitset::operator&=(const this_type& x) + { + base_type::operator&=(x); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::operator|=(const this_type& x) + { + base_type::operator|=(x); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::operator^=(const this_type& x) + { + base_type::operator^=(x); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::operator<<=(size_type n) + { + if(EASTL_LIKELY((intptr_t)n < (intptr_t)N)) + { + EA_DISABLE_VC_WARNING(6313) + base_type::operator<<=(n); + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly. + EA_RESTORE_VC_WARNING() + } + else + base_type::reset(); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::operator>>=(size_type n) + { + if(EASTL_LIKELY(n < N)) + base_type::operator>>=(n); + else + base_type::reset(); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::set() + { + base_type::set(); // This sets all bits. + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly. + return *this; + } + + + template + inline typename bitset::this_type& + bitset::set(size_type i, bool value) + { + if(i < N) + base_type::set(i, value); + else + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::set -- out of range"); + #endif + + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("bitset::set"); + #endif + } + + return *this; + } + + + template + inline typename bitset::this_type& + bitset::reset() + { + base_type::reset(); + return *this; + } + + + template + inline typename bitset::this_type& + bitset::reset(size_type i) + { + if(EASTL_LIKELY(i < N)) + DoGetWord(i) &= ~(static_cast(1) << (i & kBitsPerWordMask)); + else + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::reset -- out of range"); + #endif + + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("bitset::reset"); + #endif + } + + return *this; + } + + + template + inline typename bitset::this_type& + bitset::flip() + { + EA_DISABLE_VC_WARNING(6313) + base_type::flip(); + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly. + return *this; + EA_RESTORE_VC_WARNING() + } + + + template + inline typename bitset::this_type& + bitset::flip(size_type i) + { + if(EASTL_LIKELY(i < N)) + DoGetWord(i) ^= (static_cast(1) << (i & kBitsPerWordMask)); + else + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::flip -- out of range"); + #endif + + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("bitset::flip"); + #endif + } + return *this; + } + + + template + inline typename bitset::this_type + bitset::operator~() const + { + return this_type(*this).flip(); + } + + + template + inline typename bitset::reference + bitset::operator[](size_type i) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::operator[] -- out of range"); + #endif + + return reference(*this, i); + } + + + template + inline bool bitset::operator[](size_type i) const + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(i < N))) + EASTL_FAIL_MSG("bitset::operator[] -- out of range"); + #endif + + return (DoGetWord(i) & (static_cast(1) << (i & kBitsPerWordMask))) != 0; + } + + + template + inline const typename bitset::word_type* bitset::data() const + { + return base_type::mWord; + } + + + template + inline typename bitset::word_type* bitset::data() + { + return base_type::mWord; + } + + + template + inline void bitset::from_uint32(uint32_t value) + { + base_type::from_uint32(value); + + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly. + } + + + template + inline void bitset::from_uint64(uint64_t value) + { + base_type::from_uint64(value); + + if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32. + mWord[kWordCount - 1] &= ~(static_cast(~static_cast(0)) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly. + } + + + // template + // inline unsigned long bitset::to_ulong() const + // { + // return base_type::to_ulong(); + // } + + + // template + // inline uint32_t bitset::to_uint32() const + // { + // return base_type::to_uint32(); + // } + + + // template + // inline uint64_t bitset::to_uint64() const + // { + // return base_type::to_uint64(); + // } + + + // template + // inline typename bitset::size_type + // bitset::count() const + // { + // return base_type::count(); + // } + + + template + inline typename bitset::size_type + bitset::size() const + { + return (size_type)N; + } + + + template + inline bool bitset::operator==(const this_type& x) const + { + return base_type::operator==(x); + } + + + template + inline bool bitset::operator!=(const this_type& x) const + { + return !base_type::operator==(x); + } + + + template + inline bool bitset::test(size_type i) const + { + if(EASTL_UNLIKELY(i < N)) + return (DoGetWord(i) & (static_cast(1) << (i & kBitsPerWordMask))) != 0; + + #if EASTL_ASSERT_ENABLED + EASTL_FAIL_MSG("bitset::test -- out of range"); + #endif + + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("bitset::test"); + #else + return false; + #endif + } + + + // template + // inline bool bitset::any() const + // { + // return base_type::any(); + // } + + + template + inline bool bitset::all() const + { + return count() == size(); + } + + + template + inline bool bitset::none() const + { + return !base_type::any(); + } + + + template + inline typename bitset::this_type + bitset::operator<<(size_type n) const + { + return this_type(*this).operator<<=(n); + } + + + template + inline typename bitset::this_type + bitset::operator>>(size_type n) const + { + return this_type(*this).operator>>=(n); + } + + + template + inline typename bitset::size_type + bitset::find_first() const + { + const size_type i = base_type::DoFindFirst(); + + if(i < kSize) + return i; + // Else i could be the base type bit count, so we clamp it to our size. + + return kSize; + } + + + template + inline typename bitset::size_type + bitset::find_next(size_type last_find) const + { + const size_type i = base_type::DoFindNext(last_find); + + if(i < kSize) + return i; + // Else i could be the base type bit count, so we clamp it to our size. + + return kSize; + } + + + template + inline typename bitset::size_type + bitset::find_last() const + { + const size_type i = base_type::DoFindLast(); + + if(i < kSize) + return i; + // Else i could be the base type bit count, so we clamp it to our size. + + return kSize; + } + + + template + inline typename bitset::size_type + bitset::find_prev(size_type last_find) const + { + const size_type i = base_type::DoFindPrev(last_find); + + if(i < kSize) + return i; + // Else i could be the base type bit count, so we clamp it to our size. + + return kSize; + } + + + + /////////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////////// + + template + inline bitset operator&(const bitset& a, const bitset& b) + { + // We get betting inlining when we don't declare temporary variables. + return bitset(a).operator&=(b); + } + + + template + inline bitset operator|(const bitset& a, const bitset& b) + { + return bitset(a).operator|=(b); + } + + + template + inline bitset operator^(const bitset& a, const bitset& b) + { + return bitset(a).operator^=(b); + } + + +} // namespace eastl + + +#if defined(_MSC_VER) + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/bitvector.h b/include/EASTL/bitvector.h new file mode 100644 index 0000000..ec2bdae --- /dev/null +++ b/include/EASTL/bitvector.h @@ -0,0 +1,1492 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// Implements a bit vector, which is essentially a vector of bool but which +// uses bits instead of bytes. It is thus similar to the original std::vector. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// Note: This code is not yet complete: it isn't tested and doesn't yet +// support containers other than vector. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_BITVECTOR_H +#define EASTL_BITVECTOR_H + + +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable: 4480) // nonstandard extension used: specifying underlying type for enum +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// EASTL_BITVECTOR_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_BITVECTOR_DEFAULT_NAME + #define EASTL_BITVECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " bitvector" // Unless the user overrides something, this is "EASTL bitvector". + #endif + + /// EASTL_BITVECTOR_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_BITVECTOR_DEFAULT_ALLOCATOR + #define EASTL_BITVECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_BITVECTOR_DEFAULT_NAME) + #endif + + + + /// BitvectorWordType + /// Defines the integral data type used by bitvector. + typedef EASTL_BITSET_WORD_TYPE_DEFAULT BitvectorWordType; + + + template + class bitvector_const_iterator; + + + template + class bitvector_reference + { + public: + typedef eastl_size_t size_type; + bitvector_reference(Element* ptr, eastl_size_t i); + + bitvector_reference& operator=(bool value); + bitvector_reference& operator=(const bitvector_reference& rhs); + + operator bool() const // Defined here because some compilers fail otherwise. + { return (*mpBitWord & (Element(1) << mnBitIndex)) != 0; } + + protected: + friend class bitvector_const_iterator; + + Element* mpBitWord; + size_type mnBitIndex; + + bitvector_reference() {} + void CopyFrom(const bitvector_reference& rhs); + }; + + + + template + class bitvector_const_iterator + { + public: + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + typedef bitvector_const_iterator this_type; + typedef bool value_type; + typedef bitvector_reference reference_type; + typedef ptrdiff_t difference_type; + typedef Element element_type; + typedef element_type* pointer; // This is wrong. It needs to be someting that acts as a pointer to a bit. + typedef element_type& reference; // This is not right. It needs to be someting that acts as a pointer to a bit. + typedef eastl_size_t size_type; + + protected: + reference_type mReference; + + enum + { + kBitCount = (8 * sizeof(Element)) + }; + + public: + bool operator*() const; + bool operator[](difference_type n) const; + + bitvector_const_iterator(); + bitvector_const_iterator(const element_type* p, eastl_size_t i); + bitvector_const_iterator(const reference_type& referenceType); + + bitvector_const_iterator& operator++(); + bitvector_const_iterator operator++(int); + bitvector_const_iterator& operator--(); + bitvector_const_iterator operator--(int); + + bitvector_const_iterator& operator+=(difference_type dist); + bitvector_const_iterator& operator-=(difference_type dist); + bitvector_const_iterator operator+ (difference_type dist) const; + bitvector_const_iterator operator- (difference_type dist) const; + + difference_type operator-(const this_type& rhs) const; + + bitvector_const_iterator& operator= (const this_type& rhs); + + bool operator==(const this_type& rhs) const; + bool operator!=(const this_type& rhs) const; + + bool operator< (const this_type& rhs) const; + bool operator<=(const this_type& rhs) const; + bool operator> (const this_type& rhs) const; + bool operator>=(const this_type& rhs) const; + + int validate(const element_type* pStart, const element_type* pEnd, eastl_size_t nExtraBits) const; + + protected: + template + friend class bitvector; + + reference_type& get_reference_type() { return mReference; } + }; + + + + template + class bitvector_iterator : public bitvector_const_iterator + { + public: + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + typedef bitvector_iterator this_type; + typedef bitvector_const_iterator base_type; + typedef bool value_type; + typedef bitvector_reference reference_type; + typedef ptrdiff_t difference_type; + typedef Element element_type; + typedef element_type* pointer; // This is wrong. It needs to be someting that acts as a pointer to a bit. + typedef element_type& reference; // This is not right. It needs to be someting that acts as a pointer to a bit. + + public: + reference_type operator*() const; + reference_type operator[](difference_type n) const; + + bitvector_iterator(); + bitvector_iterator(element_type* p, eastl_size_t i); + bitvector_iterator(reference_type& referenceType); + + bitvector_iterator& operator++() { base_type::operator++(); return *this; } + bitvector_iterator& operator--() { base_type::operator--(); return *this; } + bitvector_iterator operator++(int); + bitvector_iterator operator--(int); + + bitvector_iterator& operator+=(difference_type dist) { base_type::operator+=(dist); return *this; } + bitvector_iterator& operator-=(difference_type dist) { base_type::operator-=(dist); return *this; } + bitvector_iterator operator+ (difference_type dist) const; + bitvector_iterator operator- (difference_type dist) const; + + // We need this here because we are overloading operator-, so for some reason the + // other overload of the function can't be found unless it's explicitly specified. + difference_type operator-(const base_type& rhs) const { return base_type::operator-(rhs); } + }; + + + + /// bitvector + /// + /// Implements an array of bits treated as boolean values. + /// bitvector is similar to vector but uses bits instead of bytes and + /// allows the user to use other containers such as deque instead of vector. + /// bitvector is different from bitset in that bitset is less flexible but + /// uses less memory and has higher performance. + /// + /// To consider: Rename the Element template parameter to WordType, for + /// consistency with bitset. + /// + template > + class bitvector + { + public: + typedef bitvector this_type; + typedef bool value_type; + typedef bitvector_reference reference; + typedef bool const_reference; + typedef bitvector_iterator iterator; + typedef bitvector_const_iterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef Allocator allocator_type; + typedef Element element_type; + typedef Container container_type; + typedef eastl_size_t size_type; + typedef ptrdiff_t difference_type; + + #if defined(_MSC_VER) && (_MSC_VER >= 1400) && (_MSC_VER <= 1600) && !EASTL_STD_CPP_ONLY // _MSC_VER of 1400 means VS2005, 1600 means VS2010. VS2012 generates errors with usage of enum:size_type. + enum : size_type { // Use Microsoft enum language extension, allowing for smaller debug symbols than using a static const. Users have been affected by this. + npos = container_type::npos, + kMaxSize = container_type::kMaxSize + }; + #else + static const size_type npos = container_type::npos; /// 'npos' means non-valid position or simply non-position. + static const size_type kMaxSize = container_type::kMaxSize; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues. + #endif + + enum + { + kBitCount = 8 * sizeof(Element) + }; + + protected: + container_type mContainer; + size_type mFreeBitCount; // Unused bits in the last word of mContainer. + + public: + bitvector(); + explicit bitvector(const allocator_type& allocator); + explicit bitvector(size_type n, const allocator_type& allocator = EASTL_BITVECTOR_DEFAULT_ALLOCATOR); + bitvector(size_type n, value_type value, const allocator_type& allocator = EASTL_BITVECTOR_DEFAULT_ALLOCATOR); + bitvector(const bitvector& copy); + + template + bitvector(InputIterator first, InputIterator last); + + bitvector& operator=(const bitvector& x); + void swap(this_type& x); + + template + void assign(InputIterator first, InputIterator last); + + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + bool empty() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + size_type capacity() const EA_NOEXCEPT; + + void resize(size_type n, value_type value); + void resize(size_type n); + void reserve(size_type n); + void set_capacity(size_type n = npos); // Revises the capacity to the user-specified value. Resizes the container to match the capacity if the requested capacity n is less than the current size. If n == npos then the capacity is reallocated (if necessary) such that capacity == size. + + void push_back(); + void push_back(value_type value); + void pop_back(); + + reference front(); + const_reference front() const; + reference back(); + const_reference back() const; + + bool test(size_type n, bool defaultValue) const; // Returns true if the bit index is < size() and set. Returns defaultValue if the bit is >= size(). + void set(size_type n, bool value); // Resizes the container to accomodate n if necessary. + + reference at(size_type n); // throws an out_of_range exception if n is invalid. + const_reference at(size_type n) const; + + reference operator[](size_type n); // behavior is undefined if n is invalid. + const_reference operator[](size_type n) const; + + /* + Work in progress: + template iterator find_first(); // Finds the lowest "on" bit. + template iterator find_next(const_iterator it); // Finds the next lowest "on" bit after it. + template iterator find_last(); // Finds the index of the last "on" bit, returns size if none are set. + template iterator find_prev(const_iterator it); // Finds the index of the last "on" bit before last_find, returns size if none are set. + + template const_iterator find_first() const; // Finds the lowest "on" bit. + template const_iterator find_next(const_iterator it) const; // Finds the next lowest "on" bit after it. + template const_iterator find_last() const; // Finds the index of the last "on" bit, returns size if none are set. + template const_iterator find_prev(const_iterator it) const; // Finds the index of the last "on" bit before last_find, returns size if none are set. + */ + + element_type* data() EA_NOEXCEPT; + const element_type* data() const EA_NOEXCEPT; + + iterator insert(const_iterator position, value_type value); + void insert(const_iterator position, size_type n, value_type value); + + // template Not yet implemented. See below for disabled definition. + // void insert(const_iterator position, InputIterator first, InputIterator last); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + reverse_iterator erase(const_reverse_iterator position); + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last); + + void clear(); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + container_type& get_container(); + const container_type& get_container() const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + }; + + + + + /////////////////////////////////////////////////////////////////////// + // bitvector_reference + /////////////////////////////////////////////////////////////////////// + + template + bitvector_reference::bitvector_reference(Element* p, eastl_size_t i) + : mpBitWord(p), + mnBitIndex(i) + { + } + + + template + bitvector_reference& + bitvector_reference::operator=(bool value) + { + const Element mask = (Element)(Element(1) << mnBitIndex); + + if(value) + *mpBitWord |= mask; + else + *mpBitWord &= ~mask; + + return *this; + } + + + template + bitvector_reference& + bitvector_reference::operator=(const bitvector_reference& rhs) + { + return (*this = (bool)rhs); + } + + + template + void bitvector_reference::CopyFrom(const bitvector_reference& rhs) + { + mpBitWord = rhs.mpBitWord; + mnBitIndex = rhs.mnBitIndex; + } + + + + + /////////////////////////////////////////////////////////////////////// + // bitvector_const_iterator + /////////////////////////////////////////////////////////////////////// + + template + bitvector_const_iterator::bitvector_const_iterator() + : mReference(0, 0) + { + } + + + template + bitvector_const_iterator::bitvector_const_iterator(const Element* p, eastl_size_t i) + : mReference(const_cast(p), i) // const_cast is safe here because we never let mReference leak and we don't modify it. + { + } + + + template + bitvector_const_iterator::bitvector_const_iterator(const reference_type& reference) + : mReference(reference) + { + } + + + template + bitvector_const_iterator& + bitvector_const_iterator::operator++() + { + ++mReference.mnBitIndex; + + if(mReference.mnBitIndex == kBitCount) + { + ++mReference.mpBitWord; + mReference.mnBitIndex = 0; + } + + return *this; + } + + + template + bitvector_const_iterator& + bitvector_const_iterator::operator--() + { + if(mReference.mnBitIndex == 0) + { + --mReference.mpBitWord; + mReference.mnBitIndex = kBitCount; + } + + --mReference.mnBitIndex; + return *this; + } + + + template + bitvector_const_iterator + bitvector_const_iterator::operator++(int) + { + bitvector_const_iterator copy(*this); + ++*this; + return copy; + } + + + template + bitvector_const_iterator + bitvector_const_iterator::operator--(int) + { + bitvector_const_iterator copy(*this); + --*this; + return copy; + } + + + template + bitvector_const_iterator& + bitvector_const_iterator::operator+=(difference_type n) + { + n += mReference.mnBitIndex; + + if(n >= difference_type(0)) + { + mReference.mpBitWord += n / kBitCount; + mReference.mnBitIndex = (size_type)(n % kBitCount); + } + else + { + // backwards is tricky + // figure out how many full words backwards we need to move + // n = [-1..-32] => 1 + // n = [-33..-64] => 2 + const size_type backwards = (size_type)(-n + kBitCount - 1); + mReference.mpBitWord -= backwards / kBitCount; + + // -1 => 31; backwards = 32; 31 - (backwards % 32) = 31 + // -2 => 30; backwards = 33; 31 - (backwards % 32) = 30 + // -3 => 29; backwards = 34 + // .. + // -32 => 0; backwards = 63; 31 - (backwards % 32) = 0 + // -33 => 31; backwards = 64; 31 - (backwards % 32) = 31 + mReference.mnBitIndex = (kBitCount - 1) - (backwards % kBitCount); + } + + return *this; + } + + + template + bitvector_const_iterator& + bitvector_const_iterator::operator-=(difference_type n) + { + return (*this += -n); + } + + + template + bitvector_const_iterator + bitvector_const_iterator::operator+(difference_type n) const + { + bitvector_const_iterator copy(*this); + copy += n; + return copy; + } + + + template + bitvector_const_iterator + bitvector_const_iterator::operator-(difference_type n) const + { + bitvector_const_iterator copy(*this); + copy -= n; + return copy; + } + + + template + typename bitvector_const_iterator::difference_type + bitvector_const_iterator::operator-(const this_type& rhs) const + { + return ((mReference.mpBitWord - rhs.mReference.mpBitWord) * kBitCount) + mReference.mnBitIndex - rhs.mReference.mnBitIndex; + } + + + template + bool bitvector_const_iterator::operator==(const this_type& rhs) const + { + return (mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex == rhs.mReference.mnBitIndex); + } + + + template + bool bitvector_const_iterator::operator!=(const this_type& rhs) const + { + return !(*this == rhs); + } + + + template + bool bitvector_const_iterator::operator<(const this_type& rhs) const + { + return (mReference.mpBitWord < rhs.mReference.mpBitWord) || + ((mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex < rhs.mReference.mnBitIndex)); + } + + + template + bool bitvector_const_iterator::operator<=(const this_type& rhs) const + { + return (mReference.mpBitWord < rhs.mReference.mpBitWord) || + ((mReference.mpBitWord == rhs.mReference.mpBitWord) && (mReference.mnBitIndex <= rhs.mReference.mnBitIndex)); + } + + + template + bool bitvector_const_iterator::operator>(const this_type& rhs) const + { + return !(*this <= rhs); + } + + + template + bool bitvector_const_iterator::operator>=(const this_type& rhs) const + { + return !(*this < rhs); + } + + + template + bool bitvector_const_iterator::operator*() const + { + return mReference; + } + + + template + bool bitvector_const_iterator::operator[](difference_type n) const + { + return *(*this + n); + } + + + template + bitvector_const_iterator& bitvector_const_iterator::operator= (const this_type& rhs) + { + mReference.CopyFrom(rhs.mReference); + return *this; + } + + + template + int bitvector_const_iterator::validate(const Element* pStart, const Element* pEnd, eastl_size_t nExtraBits) const + { + const Element* const pCurrent = mReference.mpBitWord; + + if(pCurrent >= pStart) + { + if(nExtraBits == 0) + { + if(pCurrent == pEnd && mReference) + return eastl::isf_valid | eastl::isf_current; + else if(pCurrent < pEnd) + return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference; + } + else if(pCurrent == (pEnd - 1)) + { + const size_type bit = mReference.mnBitIndex; + const size_type lastbit = kBitCount - nExtraBits; + + if(bit == lastbit) + return eastl::isf_valid | eastl::isf_current; + else if(bit < lastbit) + return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference; + } + else if(pCurrent < pEnd) + { + return eastl::isf_valid | eastl::isf_current | eastl::isf_can_dereference; + } + } + + return eastl::isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // bitvector_iterator + /////////////////////////////////////////////////////////////////////// + + template + bitvector_iterator::bitvector_iterator() + : base_type() + { + } + + template + bitvector_iterator::bitvector_iterator(Element* p, eastl_size_t i) + : base_type(p, i) + { + } + + + template + bitvector_iterator::bitvector_iterator(reference_type& reference) + : base_type(reference) + { + } + + + template + typename bitvector_iterator::reference_type + bitvector_iterator::operator*() const + { + return base_type::mReference; + } + + + template + typename bitvector_iterator::reference_type + bitvector_iterator::operator[](difference_type n) const + { + return *(*this + n); + } + + + template + void MoveBits(bitvector_iterator start, + bitvector_iterator end, + bitvector_iterator dest) + { + // Slow implemenation; could optimize by moving a word at a time. + if(dest <= start) + { + while(start != end) + { + *dest = *start; + ++dest; + ++start; + } + } + else + { + // Need to move backwards + dest += (end - start); + + while(start != end) + { + --dest; + --end; + *dest = *end; + } + } + } + + + template + bitvector_iterator + bitvector_iterator::operator++(int) + { + bitvector_iterator copy(*this); + ++*this; + return copy; + } + + + template + bitvector_iterator + bitvector_iterator::operator--(int) + { + bitvector_iterator copy(*this); + --*this; + return copy; + } + + + template + bitvector_iterator + bitvector_iterator::operator+(difference_type n) const + { + bitvector_iterator copy(*this); + copy += n; + return copy; + } + + + template + bitvector_iterator + bitvector_iterator::operator-(difference_type n) const + { + bitvector_iterator copy(*this); + copy -= n; + return copy; + } + + + + + /////////////////////////////////////////////////////////////////////// + // bitvector + /////////////////////////////////////////////////////////////////////// + + template + template + void bitvector::assign(InputIterator first, InputIterator last) + { + // To consider: We can maybe specialize this on bitvector_iterator to do a fast bitwise copy. + // We can also specialize for random access iterators to figure out the size & reserve first. + + clear(); + + while(first != last) + { + push_back(*first); + ++first; + } + } + + + template + typename bitvector::iterator + bitvector::begin() EA_NOEXCEPT + { + return iterator(mContainer.begin(), 0); + } + + + template + typename bitvector::const_iterator + bitvector::begin() const EA_NOEXCEPT + { + return const_iterator(mContainer.begin(), 0); + } + + + template + typename bitvector::const_iterator + bitvector::cbegin() const EA_NOEXCEPT + { + return const_iterator(mContainer.begin(), 0); + } + + + template + typename bitvector::iterator + bitvector::end() EA_NOEXCEPT + { + return iterator(mContainer.end(), 0) - mFreeBitCount; + } + + + template + typename bitvector::const_iterator + bitvector::end() const EA_NOEXCEPT + { + return const_iterator(mContainer.end(), 0) - mFreeBitCount; + } + + + template + typename bitvector::const_iterator + bitvector::cend() const EA_NOEXCEPT + { + return const_iterator(mContainer.end(), 0) - mFreeBitCount; + } + + + template + bool bitvector::empty() const EA_NOEXCEPT + { + return mContainer.empty(); + } + + + template + typename bitvector::size_type + bitvector::size() const EA_NOEXCEPT + { + return (mContainer.size() * kBitCount) - mFreeBitCount; + } + + + template + typename bitvector::size_type + bitvector::capacity() const EA_NOEXCEPT + { + return mContainer.capacity() * kBitCount; + } + + + template + void bitvector::set_capacity(size_type n) + { + if(n == npos) + mContainer.set_capacity(npos); + else + mContainer.set_capacity((n + kBitCount - 1) / kBitCount); + } + + + template + typename bitvector::reverse_iterator + bitvector::rbegin() EA_NOEXCEPT + { + return reverse_iterator(end()); + } + + + template + typename bitvector::const_reverse_iterator + bitvector::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(end()); + } + + + template + typename bitvector::const_reverse_iterator + bitvector::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(end()); + } + + + template + typename bitvector::reverse_iterator + bitvector::rend() EA_NOEXCEPT + { + return reverse_iterator(begin()); + } + + + template + typename bitvector::const_reverse_iterator + bitvector::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(begin()); + } + + + template + typename bitvector::const_reverse_iterator + bitvector::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(begin()); + } + + + template + typename bitvector::reference + bitvector::front() + { + EASTL_ASSERT(!empty()); + return reference(&mContainer[0], 0); + } + + + template + typename bitvector::const_reference + bitvector::front() const + { + EASTL_ASSERT(!empty()); + + // To consider: make a better solution to this than const_cast. + return reference(const_cast(&mContainer[0]), 0); + } + + + template + typename bitvector::reference + bitvector::back() + { + EASTL_ASSERT(!empty()); + return *(--end()); + } + + + template + typename bitvector::const_reference + bitvector::back() const + { + EASTL_ASSERT(!empty()); + return *(--end()); + } + + + template + void bitvector::push_back() + { + if(!mFreeBitCount) + { + mContainer.push_back(); + mFreeBitCount = kBitCount; + } + + --mFreeBitCount; + } + + + template + void bitvector::push_back(value_type value) + { + push_back(); + *--end() = value; + } + + + template + void bitvector::pop_back() + { + EASTL_ASSERT(!empty()); + + if(++mFreeBitCount == kBitCount) + { + mContainer.pop_back(); + mFreeBitCount = 0; + } + } + + + template + void bitvector::reserve(size_type n) + { + const size_type wordCount = (n + kBitCount - 1) / kBitCount; + mContainer.reserve(wordCount); + } + + + template + void bitvector::resize(size_type n) + { + const size_type wordCount = (n + kBitCount - 1) / kBitCount; + const size_type extra = (wordCount * kBitCount) - n; + + mContainer.resize(wordCount); + mFreeBitCount = extra; + } + + + template + void bitvector::resize(size_type n, value_type value) + { + const size_type s = size(); + if(n < s) + resize(n); + + // Fill up to the end of a word + size_type newbits = n - s; + + while(mFreeBitCount && newbits) + { + push_back(value); + --newbits; + } + + // Fill the rest a word at a time + if(newbits) + { + element_type element(0); + if(value) + element = ~element; + + const size_type words = (n + kBitCount - 1) / kBitCount; + const size_type extra = words * kBitCount - n; + mContainer.resize(words, element); + mFreeBitCount = extra; + } + } + + + template + bool bitvector::test(size_type n, bool defaultValue) const + { + if(n < size()) + return *(begin() + (difference_type)n); + + return defaultValue; + } + + + template + void bitvector::set(size_type n, bool value) + { + if(EASTL_UNLIKELY(n >= size())) + resize(n + 1); + + *(begin() + (difference_type)n) = value; + } + + + template + typename bitvector::reference + bitvector::at(size_type n) + { + // The difference between at and operator[] is that at signals + // if the requested position is out of range by throwing an + // out_of_range exception. + + #if EASTL_EXCEPTIONS_ENABLED + if(EASTL_UNLIKELY(n >= size())) + throw std::out_of_range("bitvector::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(n >= size())) + EASTL_FAIL_MSG("bitvector::at -- out of range"); + #endif + + return *(begin() + (difference_type)n); + } + + + template + typename bitvector::const_reference + bitvector::at(size_type n) const + { + #if EASTL_EXCEPTIONS_ENABLED + if(EASTL_UNLIKELY(n >= size())) + throw std::out_of_range("bitvector::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(n >= size())) + EASTL_FAIL_MSG("bitvector::at -- out of range"); + #endif + + return *(begin() + (difference_type)n); + } + + + template + typename bitvector::reference + bitvector::operator[](size_type n) + { + return *(begin() + (difference_type)n); + } + + + template + typename bitvector::const_reference + bitvector::operator[](size_type n) const + { + return *(begin() + (difference_type)n); + } + + +/* + template + template + typename bitvector::iterator + bitvector::find_first() + { + return begin(); + } + + template iterator find_next(const_iterator it); + template iterator find_last(); + template iterator find_prev(const_iterator it); + + template const_iterator find_first() const; + template const_iterator find_next(const_iterator it) const; + template const_iterator find_last() const; + template const_iterator find_prev(const_iterator it) const; +*/ + + + + + template + inline typename bitvector::container_type& + bitvector::get_container() + { + return mContainer; + } + + + template + inline const typename bitvector::container_type& + bitvector::get_container() const + { + return mContainer; + } + + + template + bool bitvector::validate() const + { + if(!mContainer.validate()) + return false; + + if((unsigned)mFreeBitCount >= kBitCount) + return false; + + return true; + } + + + template + int bitvector::validate_iterator(const_iterator i) const + { + return i.validate(mContainer.begin(), mContainer.end(), mFreeBitCount); + } + + + template + typename bitvector::element_type* + bitvector::data() EA_NOEXCEPT + { + return mContainer.data(); + } + + + template + const typename bitvector::element_type* + bitvector::data() const EA_NOEXCEPT + { + return mContainer.data(); + } + + + template + typename bitvector::iterator + bitvector::insert(const_iterator position, value_type value) + { + iterator iPosition(position.get_reference_type()); // This is just a non-const version of position. + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_valid) == 0) + EASTL_FAIL_MSG("bitvector::insert -- invalid iterator"); + #endif + + // Save because we might reallocate + const typename iterator::difference_type n = iPosition - begin(); + push_back(); + iPosition = begin() + n; + + MoveBits(iPosition, --end(), ++iterator(iPosition)); + *iPosition = value; + + return iPosition; + } + + + template + void bitvector::insert(const_iterator position, size_type n, value_type value) + { + iterator iPosition(position.get_reference_type()); // This is just a non-const version of position. + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_valid) == 0) + EASTL_FAIL_MSG("bitvector::insert -- invalid iterator"); + #endif + + // Save because we might reallocate. + const typename iterator::difference_type p = iPosition - begin(); + resize(size() + n); + iPosition = begin() + p; + + iterator insert_end = iPosition + n; + MoveBits(iPosition, end() - n, insert_end); + + // To do: Optimize this to word-at-a-time for large inserts + while(iPosition != insert_end) + { + *iPosition = value; + ++iPosition; + } + } + + + /* + The following is a placeholder for a future implementation. It turns out that a correct implementation of + insert(pos, first, last) is a non-trivial exercise that would take a few hours to implement and test. + The reasons why involve primarily the problem of handling the case where insertion source comes from + within the container itself, and the case that first and last (note they are templated) might not refer + to iterators might refer to a value/count pair. The C++ Standard requires you to handle this case and + I (Paul Pedriana) believe that it applies even for a bitvector, given that bool is an integral type. + So you have to set up a compile-time type traits function chooser. See vector, for example. + + template + template + void bitvector::insert(const_iterator position, InputIterator first, InputIterator last) + { + iterator iPosition(position.get_reference_type()); // This is just a non-const version of position. + + // This implementation is probably broken due to not handling insertion into self. + // To do: Make a more efficient version of this. + difference_type distance = (iPosition - begin()); + + while(first != last) + { + insert(iPosition, *first); + iPosition = begin() + ++distance; + ++first; + } + } + */ + + + template + typename bitvector::iterator + bitvector::erase(const_iterator position) + { + iterator iPosition(position.get_reference_type()); // This is just a non-const version of position. + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iPosition) & eastl::isf_can_dereference) == 0) + EASTL_FAIL_MSG("bitvector::erase -- invalid iterator"); + #endif + + MoveBits(++iterator(iPosition), end(), iPosition); + resize(size() - 1); + + // Verify that no reallocation occurred. + EASTL_ASSERT(validate_iterator(iPosition) & eastl::isf_valid); + return iPosition; + } + + + template + typename bitvector::iterator + bitvector::erase(const_iterator first, const_iterator last) + { + iterator iFirst(first.get_reference_type()); // This is just a non-const version of first. + iterator iLast(last.get_reference_type()); // This is just a non-const version of last. + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iLast) & eastl::isf_valid) == 0) + EASTL_FAIL_MSG("bitvector::erase -- invalid iterator"); + #endif + + if(!(iFirst == iLast)) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iFirst) & eastl::isf_can_dereference) == 0) + EASTL_FAIL_MSG("bitvector::erase -- invalid iterator"); + #endif + + const size_type eraseCount = (size_type)(iLast - iFirst); + MoveBits(iLast, end(), iFirst); + resize(size() - eraseCount); + + // Verify that no reallocation occurred. + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(validate_iterator(iFirst) & eastl::isf_valid) == 0) + EASTL_FAIL_MSG("bitvector::erase -- invalid iterator"); + #endif + } + + return iFirst; + } + + + template + typename bitvector::reverse_iterator + bitvector::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename bitvector::reverse_iterator + bitvector::erase(const_reverse_iterator first, const_reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase(last.base(), first.base())); + } + + + template + void bitvector::swap(this_type& rhs) + { + mContainer.swap(rhs.mContainer); + eastl::swap(mFreeBitCount, rhs.mFreeBitCount); + } + + + template + void bitvector::reset_lose_memory() + { + mContainer.reset_lose_memory(); // intentional memory leak. + mFreeBitCount = 0; + } + + + template + void bitvector::clear() + { + mContainer.clear(); + mFreeBitCount = 0; + } + + + template + bitvector& + bitvector::operator=(const bitvector& rhs) + { + // The following is OK if (&rhs == this) + mContainer = rhs.mContainer; + mFreeBitCount = rhs.mFreeBitCount; + + return *this; + } + + + template + bitvector::bitvector() + : mContainer(), + mFreeBitCount(0) + { + } + + + template + bitvector::bitvector(const allocator_type& allocator) + : mContainer(allocator), + mFreeBitCount(0) + { + } + + + template + bitvector::bitvector(size_type n, const allocator_type& allocator) + : mContainer((n + kBitCount - 1) / kBitCount, allocator) + { + mFreeBitCount = kBitCount - (n % kBitCount); + + if(mFreeBitCount == kBitCount) + mFreeBitCount = 0; + } + + + template + bitvector::bitvector(size_type n, value_type value, const allocator_type& allocator) + : mContainer((n + kBitCount - 1) / kBitCount, value ? ~element_type(0) : element_type(0), allocator) + { + mFreeBitCount = kBitCount - (n % kBitCount); + + if(mFreeBitCount == kBitCount) + mFreeBitCount = 0; + } + + + template + bitvector::bitvector(const bitvector& copy) + : mContainer(copy.mContainer), + mFreeBitCount(copy.mFreeBitCount) + { + } + + + template + template + bitvector::bitvector(InputIterator first, InputIterator last) + : mContainer(), + mFreeBitCount(0) + { + assign(first, last); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const bitvector& a, + const bitvector& b) + { + // To do: Replace this with a smart compare implementation. This is much slower than it needs to be. + return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin())); + } + + + template + inline bool operator!=(const bitvector& a, + const bitvector& b) + { + return !operator==(a, b); + } + + + template + inline bool operator<(const bitvector& a, + const bitvector& b) + { + // To do: Replace this with a smart compare implementation. This is much slower than it needs to be. + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + inline bool operator>(const bitvector& a, + const bitvector& b) + { + return b < a; + } + + + template + inline bool operator<=(const bitvector& a, + const bitvector& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const bitvector& a, + const bitvector& b) + { + return !(a < b); + } + + template + inline void swap(bitvector& a, + bitvector& b) + { + a.swap(b); + } + + +} // namespace eastl + + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/bonus/adaptors.h b/include/EASTL/bonus/adaptors.h new file mode 100644 index 0000000..94ff2ce --- /dev/null +++ b/include/EASTL/bonus/adaptors.h @@ -0,0 +1,74 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ADAPTORS_H +#define EASTL_ADAPTORS_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +EA_DISABLE_VC_WARNING(4512 4626) +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+ + EA_DISABLE_VC_WARNING(5027) // move assignment operator was implicitly defined as deleted +#endif + + +namespace eastl +{ + /// reverse + /// + /// This adaptor allows reverse iteration of a container in ranged base for-loops. + /// + /// for (auto& i : reverse(c)) { ... } + /// + template + struct reverse_wrapper + { + reverse_wrapper(Container& c) : mContainer(c) {} + Container& mContainer; + }; + + template + auto begin(const reverse_wrapper& w) -> decltype(rbegin(w.mContainer)) + { return rbegin(w.mContainer); } + + template + auto end(const reverse_wrapper& w) -> decltype(rend(w.mContainer)) + { return rend(w.mContainer); } + + template + reverse_wrapper reverse(Container&& c) + { return reverse_wrapper(eastl::forward(c)); } + +} // namespace eastl + +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015+ + EA_RESTORE_VC_WARNING() +#endif +EA_RESTORE_VC_WARNING() + +#endif // Header include guard + + + + + + + + + + + + + + diff --git a/include/EASTL/bonus/call_traits.h b/include/EASTL/bonus/call_traits.h new file mode 100644 index 0000000..0995d05 --- /dev/null +++ b/include/EASTL/bonus/call_traits.h @@ -0,0 +1,117 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// The design for call_traits here is very similar to that found in template +// metaprogramming libraries such as Boost, GCC, and Metrowerks, given that +// these libraries have established this interface as a defacto standard for +// solving this problem. Also, these are described in various books on the +// topic of template metaprogramming, such as "Modern C++ Design". +// +// See http://www.boost.org/libs/utility/call_traits.htm or search for +// call_traits in Google for a description of call_traits. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_CALL_TRAITS_H +#define EASTL_CALL_TRAITS_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + + template + struct ct_imp2 { typedef const T& param_type; }; + + template + struct ct_imp2 { typedef const T param_type; }; + + template + struct ct_imp { typedef const T& param_type; }; + + template + struct ct_imp { typedef typename ct_imp2::param_type param_type; }; + + template + struct ct_imp { typedef T const param_type; }; + + + + template + struct call_traits + { + public: + typedef T value_type; + typedef T& reference; + typedef const T& const_reference; + typedef typename ct_imp::value, is_arithmetic::value>::param_type param_type; + }; + + + template + struct call_traits + { + typedef T& value_type; + typedef T& reference; + typedef const T& const_reference; + typedef T& param_type; + }; + + + template + struct call_traits + { + private: + typedef T array_type[N]; + + public: + typedef const T* value_type; + typedef array_type& reference; + typedef const array_type& const_reference; + typedef const T* const param_type; + }; + + + template + struct call_traits + { + private: + typedef const T array_type[N]; + + public: + typedef const T* value_type; + typedef array_type& reference; + typedef const array_type& const_reference; + typedef const T* const param_type; + }; + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + diff --git a/include/EASTL/bonus/compressed_pair.h b/include/EASTL/bonus/compressed_pair.h new file mode 100644 index 0000000..379642b --- /dev/null +++ b/include/EASTL/bonus/compressed_pair.h @@ -0,0 +1,460 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// The compressed pair class is very similar to std::pair, but if either of the +// template arguments are empty classes, then the "empty base-class optimization" +// is applied to compress the size of the pair. +// +// The design for compressed_pair here is very similar to that found in template +// metaprogramming libraries such as Boost, GCC, and Metrowerks, given that +// these libraries have established this interface as a defacto standard for +// solving this problem. Also, these are described in various books on the +// topic of template metaprogramming, such as "Modern C++ Design". +// +// template +// class compressed_pair +// { +// public: +// typedef T1 first_type; +// typedef T2 second_type; +// typedef typename call_traits::param_type first_param_type; +// typedef typename call_traits::param_type second_param_type; +// typedef typename call_traits::reference first_reference; +// typedef typename call_traits::reference second_reference; +// typedef typename call_traits::const_reference first_const_reference; +// typedef typename call_traits::const_reference second_const_reference; +// +// compressed_pair() : base() {} +// compressed_pair(first_param_type x, second_param_type y); +// explicit compressed_pair(first_param_type x); +// explicit compressed_pair(second_param_type y); +// +// compressed_pair& operator=(const compressed_pair&); +// +// first_reference first(); +// first_const_reference first() const; +// +// second_reference second(); +// second_const_reference second() const; +// +// void swap(compressed_pair& y); +// }; +// +// The two members of the pair can be accessed using the member functions first() +// and second(). Note that not all member functions can be instantiated for all +// template parameter types. In particular compressed_pair can be instantiated for +// reference and array types, however in these cases the range of constructors that +// can be used are limited. If types T1 and T2 are the same type, then there is +// only one version of the single-argument constructor, and this constructor +// initialises both values in the pair to the passed value. +// +// Note that compressed_pair can not be instantiated if either of the template +// arguments is a union type, unless there is compiler support for is_union, +// or if is_union is specialised for the union type. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_COMPRESSED_PAIR_H +#define EASTL_COMPRESSED_PAIR_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later + EA_DISABLE_VC_WARNING(4626 5027) // warning C4626: 'eastl::compressed_pair_imp': assignment operator was implicitly defined as deleted because a base class assignment operator is inaccessible or deleted +#endif + +namespace eastl +{ + + template + class compressed_pair; + + + template + struct compressed_pair_switch; + + template + struct compressed_pair_switch{ static const int value = 0; }; + + template + struct compressed_pair_switch { static const int value = 1; }; + + template + struct compressed_pair_switch { static const int value = 2; }; + + template + struct compressed_pair_switch { static const int value = 3; }; + + template + struct compressed_pair_switch { static const int value = 4; }; + + template + struct compressed_pair_switch { static const int value = 5; }; + + template + class compressed_pair_imp; + + + + template + inline void cp_swap(T& t1, T& t2) + { + T tTemp = t1; + t1 = t2; + t2 = tTemp; + } + + + // Derive from neither + template + class compressed_pair_imp + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : mFirst(x), mSecond(y) {} + + compressed_pair_imp(first_param_type x) + : mFirst(x) {} + + compressed_pair_imp(second_param_type y) + : mSecond(y) {} + + first_reference first() { return mFirst; } + first_const_reference first() const { return mFirst; } + + second_reference second() { return mSecond; } + second_const_reference second() const { return mSecond; } + + void swap(compressed_pair& y) + { + cp_swap(mFirst, y.first()); + cp_swap(mSecond, y.second()); + } + + private: + first_type mFirst; + second_type mSecond; + }; + + + // Derive from T1 + template + class compressed_pair_imp : private T1 + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : first_type(x), mSecond(y) {} + + compressed_pair_imp(first_param_type x) + : first_type(x) {} + + compressed_pair_imp(second_param_type y) + : mSecond(y) {} + + first_reference first() { return *this; } + first_const_reference first() const { return *this; } + + second_reference second() { return mSecond; } + second_const_reference second() const { return mSecond; } + + void swap(compressed_pair& y) + { + // No need to swap empty base class + cp_swap(mSecond, y.second()); + } + + private: + second_type mSecond; + }; + + + + // Derive from T2 + template + class compressed_pair_imp : private T2 + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : second_type(y), mFirst(x) {} + + compressed_pair_imp(first_param_type x) + : mFirst(x) {} + + compressed_pair_imp(second_param_type y) + : second_type(y) {} + + first_reference first() { return mFirst; } + first_const_reference first() const { return mFirst; } + + second_reference second() { return *this; } + second_const_reference second() const { return *this; } + + void swap(compressed_pair& y) + { + // No need to swap empty base class + cp_swap(mFirst, y.first()); + } + + private: + first_type mFirst; + }; + + + + // Derive from T1 and T2 + template + class compressed_pair_imp : private T1, private T2 + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : first_type(x), second_type(y) {} + + compressed_pair_imp(first_param_type x) + : first_type(x) {} + + compressed_pair_imp(second_param_type y) + : second_type(y) {} + + first_reference first() { return *this; } + first_const_reference first() const { return *this; } + + second_reference second() { return *this; } + second_const_reference second() const { return *this; } + + // No need to swap empty bases + void swap(compressed_pair&) + { } + }; + + + // T1 == T2, T1 and T2 are both empty + // Note does not actually store an instance of T2 at all; + // but reuses T1 base class for both first() and second(). + template + class compressed_pair_imp : private T1 + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type) + : first_type(x) {} + + compressed_pair_imp(first_param_type x) + : first_type(x) {} + + first_reference first() { return *this; } + first_const_reference first() const { return *this; } + + second_reference second() { return *this; } + second_const_reference second() const { return *this; } + + void swap(compressed_pair&) { } + }; + + + // T1 == T2 and are not empty + template + class compressed_pair_imp + { + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair_imp() {} + + compressed_pair_imp(first_param_type x, second_param_type y) + : mFirst(x), mSecond(y) {} + + compressed_pair_imp(first_param_type x) + : mFirst(x), mSecond(x) {} + + first_reference first() { return mFirst; } + first_const_reference first() const { return mFirst; } + + second_reference second() { return mSecond; } + second_const_reference second() const { return mSecond; } + + void swap(compressed_pair& y) + { + cp_swap(mFirst, y.first()); + cp_swap(mSecond, y.second()); + } + + private: + first_type mFirst; + second_type mSecond; + }; + + + + template + class compressed_pair + : private compressed_pair_imp::type, typename remove_cv::type>::value, + is_empty::value, + is_empty::value>::value> + { + private: + typedef compressed_pair_imp::type, typename remove_cv::type>::value, + is_empty::value, + is_empty::value>::value> base; + public: + typedef T1 first_type; + typedef T2 second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair() : base() {} + compressed_pair(first_param_type x, second_param_type y) : base(x, y) {} + explicit compressed_pair(first_param_type x) : base(x) {} + explicit compressed_pair(second_param_type y) : base(y) {} + + first_reference first() { return base::first(); } + first_const_reference first() const { return base::first(); } + + second_reference second() { return base::second(); } + second_const_reference second() const { return base::second(); } + + void swap(compressed_pair& y) { base::swap(y); } + }; + + + // Partial specialisation for case where T1 == T2: + template + class compressed_pair + : private compressed_pair_imp::type, typename remove_cv::type>::value, + is_empty::value, + is_empty::value>::value> + { + private: + typedef compressed_pair_imp::type, typename remove_cv::type>::value, + is_empty::value, + is_empty::value>::value> base; + public: + typedef T first_type; + typedef T second_type; + typedef typename call_traits::param_type first_param_type; + typedef typename call_traits::param_type second_param_type; + typedef typename call_traits::reference first_reference; + typedef typename call_traits::reference second_reference; + typedef typename call_traits::const_reference first_const_reference; + typedef typename call_traits::const_reference second_const_reference; + + compressed_pair() : base() {} + compressed_pair(first_param_type x, second_param_type y) : base(x, y) {} + explicit compressed_pair(first_param_type x) : base(x) {} + + first_reference first() { return base::first(); } + first_const_reference first() const { return base::first(); } + + second_reference second() { return base::second(); } + second_const_reference second() const { return base::second(); } + + void swap(compressed_pair& y) { base::swap(y); } + }; + + + template + inline void swap(compressed_pair& x, compressed_pair& y) + { + x.swap(y); + } + + +} // namespace eastl + +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later + EA_RESTORE_VC_WARNING() +#endif + +#endif // Header include guard + + + diff --git a/include/EASTL/bonus/fixed_ring_buffer.h b/include/EASTL/bonus/fixed_ring_buffer.h new file mode 100644 index 0000000..2bb54e4 --- /dev/null +++ b/include/EASTL/bonus/fixed_ring_buffer.h @@ -0,0 +1,50 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FIXED_RING_BUFFER_H +#define EASTL_FIXED_RING_BUFFER_H + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +namespace eastl +{ + + /// fixed_ring_buffer + /// + /// This is a convenience template alias for creating a fixed-sized + /// ring_buffer using eastl::fixed_vector as its storage container. This has + /// been tricky for users to get correct due to the constructor requirements + /// of eastl::ring_buffer leaking the implementation detail of the sentinel + /// value being used internally. In addition, it was not obvious what the + /// correct allocator_type template parameter should be used for containers + /// providing both a default allocator type and an overflow allocator type. + /// + /// We are over-allocating the fixed_vector container to accommodate the + /// ring_buffer sentinel to prevent that implementation detail leaking into + /// user code. + /// + /// Example usage: + /// + /// fixed_ring_buffer rb = {0, 1, 2, 3, 4, 5, 6, 7}; + /// or + /// fixed_ring_buffer rb(8); // capacity doesn't need to respect sentinel + /// rb.push_back(0); + /// + /// +#if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + using fixed_ring_buffer = + ring_buffer, typename fixed_vector::overflow_allocator_type>; +#endif + +} // namespace eastl + +#endif // Header include guard + diff --git a/include/EASTL/bonus/fixed_tuple_vector.h b/include/EASTL/bonus/fixed_tuple_vector.h new file mode 100644 index 0000000..e9ce0ec --- /dev/null +++ b/include/EASTL/bonus/fixed_tuple_vector.h @@ -0,0 +1,210 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FIXEDTUPLEVECTOR_H +#define EASTL_FIXEDTUPLEVECTOR_H + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +namespace eastl +{ + + /// EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME + #define EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_tuple_vector" // Unless the user overrides something, this is "EASTL fixed_vector". + #endif + + + /// EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR + #define EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_TUPLE_VECTOR_DEFAULT_NAME) + #endif + +// External interface of fixed_tuple_vector +template +class fixed_tuple_vector : public TupleVecInternal::TupleVecImpl::GetTotalAllocationSize(nodeCount, 0), 1, + TupleVecInternal::TupleRecurser::GetTotalAlignment(), 0, + bEnableOverflow, EASTLAllocatorType>, make_index_sequence, Ts...> +{ +public: + typedef fixed_vector_allocator< + TupleVecInternal::TupleRecurser::GetTotalAllocationSize(nodeCount, 0), 1, + TupleVecInternal::TupleRecurser::GetTotalAlignment(), 0, + bEnableOverflow, EASTLAllocatorType> fixed_allocator_type; + typedef aligned_buffer aligned_buffer_type; + typedef fixed_tuple_vector this_type; + typedef EASTLAllocatorType overflow_allocator_type; + + typedef TupleVecInternal::TupleVecImpl, Ts...> base_type; + typedef typename base_type::size_type size_type; + +private: + aligned_buffer_type mBuffer; + +public: + fixed_tuple_vector() + : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { } + + fixed_tuple_vector(const overflow_allocator_type& allocator) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { } + + fixed_tuple_vector(this_type&& x) + : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::get_allocator().copy_overflow_allocator(x.get_allocator()); + base_type::DoInitFromIterator(make_move_iterator(x.begin()), make_move_iterator(x.end())); + x.clear(); + } + + fixed_tuple_vector(this_type&& x, const overflow_allocator_type& allocator) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromIterator(make_move_iterator(x.begin()), make_move_iterator(x.end())); + x.clear(); + } + + fixed_tuple_vector(const this_type& x) + : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::get_allocator().copy_overflow_allocator(x.get_allocator()); + base_type::DoInitFromIterator(x.begin(), x.end()); + } + + fixed_tuple_vector(const this_type& x, const overflow_allocator_type& allocator) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromIterator(x.begin(), x.end()); + } + + template + fixed_tuple_vector(move_iterator begin, move_iterator end, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromIterator(begin, end); + } + + template + fixed_tuple_vector(Iterator begin, Iterator end, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromIterator(begin, end); + } + + fixed_tuple_vector(size_type n, const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitDefaultFill(n); + } + + fixed_tuple_vector(size_type n, const Ts&... args) + : base_type(fixed_allocator_type(mBuffer.buffer), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFillArgs(n, args...); + } + + fixed_tuple_vector(size_type n, const Ts&... args, const overflow_allocator_type& allocator) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFillArgs(n, args...); + } + + fixed_tuple_vector(size_type n, + typename base_type::const_reference_tuple tup, + const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFillTuple(n, tup); + } + + fixed_tuple_vector(const typename base_type::value_tuple* first, const typename base_type::value_tuple* last, + const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromTupleArray(first, last); + } + + fixed_tuple_vector(std::initializer_list iList, + const overflow_allocator_type& allocator = EASTL_FIXED_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : base_type(fixed_allocator_type(mBuffer.buffer, allocator), mBuffer.buffer, nodeCount, fixed_allocator_type::kNodeSize) + { + base_type::DoInitFromTupleArray(iList.begin(), iList.end()); + } + + this_type& operator=(const this_type& other) + { + base_type::operator=(other); + return *this; + } + + this_type& operator=(this_type&& other) + { + base_type::clear(); + // OK to call DoInitFromIterator in a non-ctor scenario because clear() reset everything, more-or-less + base_type::DoInitFromIterator(make_move_iterator(other.begin()), make_move_iterator(other.end())); + other.clear(); + return *this; + } + + this_type& operator=(std::initializer_list iList) + { + base_type::operator=(iList); + return *this; + } + + void swap(this_type& x) + { + // If both containers are using the heap instead of local memory + // then we can do a fast pointer swap instead of content swap. + if ((has_overflowed() && x.has_overflowed()) && (get_overflow_allocator() == x.get_overflow_allocator())) + { + base_type::swap(x); + } + else + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + } + + // Returns the max fixed size, which is the user-supplied nodeCount parameter. + size_type max_size() const { return nodeCount; } + // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, + // the container size can be greater than nodeCount but full() could return true because the + // fixed space may have a recently freed slot. + bool full() const { return (base_type::mNumElements >= nodeCount) || ((void*)base_type::mpData != (void*)mBuffer.buffer); } + // Returns true if the allocations spilled over into the overflow allocator. Meaningful + // only if overflow is enabled. + bool has_overflowed() const { return ((void*)base_type::mpData != (void*)mBuffer.buffer); } + // Returns the value of the bEnableOverflow template parameter. + bool can_overflow() const { return bEnableOverflow; } + + const overflow_allocator_type& get_overflow_allocator() const { return base_type::get_allocator().get_overflow_allocator(); } +}; + + +template +inline void swap(fixed_tuple_vector& a, + fixed_tuple_vector& b) +{ + a.swap(b); +} + + +} // namespace eastl + +#endif // EASTL_TUPLEVECTOR_H diff --git a/include/EASTL/bonus/intrusive_sdlist.h b/include/EASTL/bonus/intrusive_sdlist.h new file mode 100644 index 0000000..1b126d4 --- /dev/null +++ b/include/EASTL/bonus/intrusive_sdlist.h @@ -0,0 +1,694 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// intrusive_sdlist is a special kind of intrusive list which we say is +// "singly-doubly" linked. Instead of having a typical intrusive list node +// which looks like this: +// +// struct intrusive_sdlist_node { +// intrusive_sdlist_node *mpNext; +// intrusive_sdlist_node *mpPrev; +// }; +// +// We instead have one that looks like this: +// +// struct intrusive_sdlist_node { +// intrusive_sdlist_node* mpNext; +// intrusive_sdlist_node** mppPrevNext; +// }; +// +// This may seem to be suboptimal, but it has one specific advantage: it allows +// the intrusive_sdlist class to be the size of only one pointer instead of two. +// This may seem like a minor optimization, but some users have wanted to create +// thousands of empty instances of these. +// This is because while an intrusive_list class looks like this: +// +// class intrusive_list { +// intrusive_list_node mBaseNode; +// }; +// +// an intrusive_sdlist class looks like this: +// +// class intrusive_sdlist { +// intrusive_sdlist_node* mpNext; +// }; +// +// So here we make a list of plusses and minuses of intrusive sdlists +// compared to intrusive_lists and intrusive_slists: +// +// | list | slist | sdlist +// --------------------------------------------------------- +// min size | 8 | 4 | 4 +// node size | 8 | 4 | 8 +// anonymous erase | yes | no | yes +// reverse iteration | yes | no | no +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTRUSIVE_SDLIST_H +#define EASTL_INTRUSIVE_SDLIST_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + + /// intrusive_sdlist_node + /// + struct intrusive_sdlist_node + { + intrusive_sdlist_node* mpNext; + intrusive_sdlist_node** mppPrevNext; + }; + + + /// IntrusiveSDListIterator + /// + template + struct IntrusiveSDListIterator + { + typedef IntrusiveSDListIterator this_type; + typedef IntrusiveSDListIterator iterator; + typedef IntrusiveSDListIterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + pointer mpNode; + + public: + IntrusiveSDListIterator(); + explicit IntrusiveSDListIterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type. + IntrusiveSDListIterator(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + this_type& operator++(); + this_type operator++(int); + + }; // struct IntrusiveSDListIterator + + + + + /// intrusive_sdlist_base + /// + /// Provides a template-less base class for intrusive_sdlist. + /// + class intrusive_sdlist_base + { + public: + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + + protected: + intrusive_sdlist_node* mpNext; + + public: + intrusive_sdlist_base(); + + bool empty() const; ///< Returns true if the container is empty. + size_type size() const; ///< Returns the number of elements in the list; O(n). + + void clear(); ///< Clears the list; O(1). No deallocation occurs. + void pop_front(); ///< Removes an element from the front of the list; O(1). The element must be present, but is not deallocated. + void reverse(); ///< Reverses a list so that front and back are swapped; O(n). + + //bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching. + + }; // class intrusive_sdlist_base + + + + /// intrusive_sdlist + /// + template + class intrusive_sdlist : public intrusive_sdlist_base + { + public: + typedef intrusive_sdlist this_type; + typedef intrusive_sdlist_base base_type; + typedef T node_type; + typedef T value_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + typedef T& reference; + typedef const T& const_reference; + typedef T* pointer; + typedef const T* const_pointer; + typedef IntrusiveSDListIterator iterator; + typedef IntrusiveSDListIterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + + public: + intrusive_sdlist(); ///< Creates an empty list. + intrusive_sdlist(const this_type& x); ///< Creates an empty list; ignores the argument. + this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument. + + iterator begin(); ///< Returns an iterator pointing to the first element in the list. + const_iterator begin() const; ///< Returns a const_iterator pointing to the first element in the list. + const_iterator cbegin() const; ///< Returns a const_iterator pointing to the first element in the list. + + iterator end(); ///< Returns an iterator pointing one-after the last element in the list. + const_iterator end() const; ///< Returns a const_iterator pointing one-after the last element in the list. + const_iterator cend() const; ///< Returns a const_iterator pointing one-after the last element in the list. + + reference front(); ///< Returns a reference to the first element. The list must be empty. + const_reference front() const; ///< Returns a const reference to the first element. The list must be empty. + + void push_front(value_type& value); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list. + void push_back(value_type& value); ///< Adds an element to the back of the list; O(N). The element is not copied. The element must not be in any other list. + void pop_back(); ///< Removes an element from the back of the list; O(N). The element must be present, but is not deallocated. + + bool contains(const value_type& value) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()). + + iterator locate(value_type& value); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n) + const_iterator locate(const value_type& value) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n) + + iterator insert(iterator position, value_type& value); ///< Inserts an element before the element pointed to by the iterator. O(1) + iterator erase(iterator position); ///< Erases the element pointed to by the iterator. O(1) + iterator erase(iterator first, iterator last); ///< Erases elements within the iterator range [first, last). O(1). + void swap(intrusive_sdlist& x); ///< Swaps the contents of two intrusive lists; O(1). + + static void remove(value_type& value); ///< Erases an element from a list; O(1). Note that this is static so you don't need to know which list the element, although it must be in some list. + + void splice(iterator position, value_type& value); ///< Moves the given element into this list before the element pointed to by position; O(1). + ///< Required: x must be in some list or have first/next pointers that point it itself. + + void splice(iterator position, this_type& x); ///< Moves the contents of a list into this list before the element pointed to by position; O(1). + ///< Required: &x != this (same as std::list). + + void splice(iterator position, this_type& x, iterator xPosition); ///< Moves the given element pointed to i within the list x into the current list before + ///< the element pointed to by position; O(1). + + void splice(iterator position, this_type& x, iterator first, iterator last); ///< Moves the range of elements [first, last) from list x into the current list before + ///< the element pointed to by position; O(1). + ///< Required: position must not be in [first, last). (same as std::list). + bool validate() const; + int validate_iterator(const_iterator i) const; + + }; // intrusive_sdlist + + + + + /////////////////////////////////////////////////////////////////////// + // IntrusiveSDListIterator functions + /////////////////////////////////////////////////////////////////////// + + template + inline IntrusiveSDListIterator::IntrusiveSDListIterator() + { + #if EASTL_DEBUG + mpNode = NULL; + #endif + } + + template + inline IntrusiveSDListIterator::IntrusiveSDListIterator(pointer pNode) + : mpNode(pNode) + { + } + + template + inline IntrusiveSDListIterator::IntrusiveSDListIterator(const iterator& x) + : mpNode(x.mpNode) + { + } + + template + inline typename IntrusiveSDListIterator::reference + IntrusiveSDListIterator::operator*() const + { + return *mpNode; + } + + template + inline typename IntrusiveSDListIterator::pointer + IntrusiveSDListIterator::operator->() const + { + return mpNode; + } + + template + inline typename IntrusiveSDListIterator::this_type& + IntrusiveSDListIterator::operator++() + { + mpNode = static_cast(mpNode->mpNext); + return *this; + } + + template + inline typename IntrusiveSDListIterator::this_type + IntrusiveSDListIterator::operator++(int) + { + this_type temp = *this; + mpNode = static_cast(mpNode->mpNext); + return temp; + } + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const IntrusiveSDListIterator& a, + const IntrusiveSDListIterator& b) + { + return a.mpNode == b.mpNode; + } + + + template + inline bool operator!=(const IntrusiveSDListIterator& a, + const IntrusiveSDListIterator& b) + { + return a.mpNode != b.mpNode; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const IntrusiveSDListIterator& a, + const IntrusiveSDListIterator& b) + { + return a.mpNode != b.mpNode; + } + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_sdlist_base + /////////////////////////////////////////////////////////////////////// + + inline intrusive_sdlist_base::intrusive_sdlist_base() + { mpNext = NULL; } + + + inline bool intrusive_sdlist_base::empty() const + { return mpNext == NULL; } + + + inline intrusive_sdlist_base::size_type intrusive_sdlist_base::size() const + { + size_type n = 0; + for(const intrusive_sdlist_node* pCurrent = mpNext; pCurrent; pCurrent = pCurrent->mpNext) + n++; + return n; + } + + + inline void intrusive_sdlist_base::clear() + { mpNext = NULL; } // Note that we don't do anything with the list nodes. + + + inline void intrusive_sdlist_base::pop_front() + { + // To consider: Set mpNext's pointers to NULL in debug builds. + mpNext = mpNext->mpNext; + mpNext->mppPrevNext = &mpNext; + } + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_sdlist + /////////////////////////////////////////////////////////////////////// + + template + inline intrusive_sdlist::intrusive_sdlist() + { + } + + + template + inline intrusive_sdlist::intrusive_sdlist(const this_type& /*x*/) + : intrusive_sdlist_base() + { + // We intentionally ignore argument x. + } + + + template + inline typename intrusive_sdlist::this_type& intrusive_sdlist::operator=(const this_type& /*x*/) + { + return *this; // We intentionally ignore argument x. + } + + + template + inline typename intrusive_sdlist::iterator intrusive_sdlist::begin() + { return iterator(static_cast(mpNext)); } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::begin() const + { return const_iterator(static_cast(const_cast(mpNext))); } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::cbegin() const + { return const_iterator(static_cast(const_cast(mpNext))); } + + + template + inline typename intrusive_sdlist::iterator intrusive_sdlist::end() + { return iterator(static_cast(NULL)); } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::end() const + { return const_iterator(static_cast(NULL)); } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::cend() const + { return const_iterator(static_cast(NULL)); } + + + template + inline typename intrusive_sdlist::reference intrusive_sdlist::front() + { return *static_cast(mpNext); } + + + template + inline typename intrusive_sdlist::const_reference intrusive_sdlist::front() const + { return *static_cast(mpNext); } + + + template + inline void intrusive_sdlist::push_front(value_type& value) + { + value.mpNext = mpNext; + value.mppPrevNext = &mpNext; + if(mpNext) + mpNext->mppPrevNext = &value.mpNext; + mpNext = &value; + } + + + template + inline void intrusive_sdlist::push_back(value_type& value) + { + intrusive_sdlist_node* pNext = mpNext; + intrusive_sdlist_node** ppPrevNext = &mpNext; + + while(pNext) + { + ppPrevNext = &pNext->mpNext; + pNext = pNext->mpNext; + } + + *ppPrevNext = &value; + value.mppPrevNext = ppPrevNext; + value.mpNext = NULL; + } + + + template + inline void intrusive_sdlist::pop_back() + { + node_type* pCurrent = static_cast(mpNext); + + while(pCurrent->mpNext) + pCurrent = static_cast(pCurrent->mpNext); + + *pCurrent->mppPrevNext = NULL; + } + + template + inline bool intrusive_sdlist::contains(const value_type& value) const + { + const intrusive_sdlist_node* pCurrent; + + for(pCurrent = mpNext; pCurrent; pCurrent = pCurrent->mpNext) + { + if(pCurrent == &value) + break; + } + + return (pCurrent != NULL); + } + + + template + inline typename intrusive_sdlist::iterator intrusive_sdlist::locate(value_type& value) + { + intrusive_sdlist_node* pCurrent; + + for(pCurrent = static_cast(mpNext); pCurrent; pCurrent = pCurrent->mpNext) + { + if(pCurrent == &value) + break; + } + + return iterator(static_cast(pCurrent)); + } + + + template + inline typename intrusive_sdlist::const_iterator intrusive_sdlist::locate(const T& value) const + { + const intrusive_sdlist_node* pCurrent; + + for(pCurrent = static_cast(mpNext); pCurrent; pCurrent = pCurrent->mpNext) + { + if(pCurrent == &value) + break; + } + + return const_iterator(static_cast(const_cast(pCurrent))); + } + + + template + inline typename intrusive_sdlist::iterator + intrusive_sdlist::insert(iterator position, value_type& value) + { + value.mppPrevNext = position.mpNode->mppPrevNext; + value.mpNext = position.mpNode; + *value.mppPrevNext = &value; + position.mpNode->mppPrevNext = &value.mpNext; + + return iterator(&value); + } + + + template + inline typename intrusive_sdlist::iterator + intrusive_sdlist::erase(iterator position) + { + *position.mpNode->mppPrevNext = position.mpNode->mpNext; + position.mpNode->mpNext->mppPrevNext = position.mpNode->mppPrevNext; + + return iterator(position.mpNode); + } + + + template + inline typename intrusive_sdlist::iterator + intrusive_sdlist::erase(iterator first, iterator last) + { + if(first.mpNode) // If not erasing the end... + { + *first.mpNode->mppPrevNext = last.mpNode; + + if(last.mpNode) // If not erasing to the end... + last.mpNode->mppPrevNext = first.mpNode->mppPrevNext; + } + + return last; + } + + + template + inline void intrusive_sdlist::remove(value_type& value) + { + *value.mppPrevNext = value.mpNext; + if(value.mpNext) + value.mpNext->mppPrevNext = value.mppPrevNext; + } + + + template + void intrusive_sdlist::swap(intrusive_sdlist& x) + { + // swap anchors + intrusive_sdlist_node* const temp(mpNext); + mpNext = x.mpNext; + x.mpNext = temp; + + if(x.mpNext) + x.mpNext->mppPrevNext = &mpNext; + + if(mpNext) + mpNext->mppPrevNext = &x.mpNext; + } + + + + + + // To do: Complete these splice functions. Might want to look at intrusive_sdlist for help. + + template + void intrusive_sdlist::splice(iterator /*position*/, value_type& /*value*/) + { + EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion. + } + + + template + void intrusive_sdlist::splice(iterator /*position*/, intrusive_sdlist& /*x*/) + { + EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion. + } + + + template + void intrusive_sdlist::splice(iterator /*position*/, intrusive_sdlist& /*x*/, iterator /*xPosition*/) + { + EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion. + } + + + template + void intrusive_sdlist::splice(iterator /*position*/, intrusive_sdlist& /*x*/, iterator /*first*/, iterator /*last*/) + { + EASTL_ASSERT(false); // If you need this working, ask Paul Pedriana or submit a working version for inclusion. + } + + + template + inline bool intrusive_sdlist::validate() const + { + return true; // To do. + } + + + template + inline int intrusive_sdlist::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + bool operator==(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + // If we store an mSize member for intrusive_sdlist, we want to take advantage of it here. + typename intrusive_sdlist::const_iterator ia = a.begin(); + typename intrusive_sdlist::const_iterator ib = b.begin(); + typename intrusive_sdlist::const_iterator enda = a.end(); + typename intrusive_sdlist::const_iterator endb = b.end(); + + while((ia != enda) && (ib != endb) && (*ia == *ib)) + { + ++ia; + ++ib; + } + return (ia == enda) && (ib == endb); + } + + template + bool operator<(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + bool operator!=(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return !(a == b); + } + + template + bool operator>(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return b < a; + } + + template + bool operator<=(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return !(b < a); + } + + template + bool operator>=(const intrusive_sdlist& a, const intrusive_sdlist& b) + { + return !(a < b); + } + + template + void swap(intrusive_sdlist& a, intrusive_sdlist& b) + { + a.swap(b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/bonus/intrusive_slist.h b/include/EASTL/bonus/intrusive_slist.h new file mode 100644 index 0000000..28d445d --- /dev/null +++ b/include/EASTL/bonus/intrusive_slist.h @@ -0,0 +1,321 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// *** Note *** +// This implementation is incomplete. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTRUSIVE_SLIST_H +#define EASTL_INTRUSIVE_SLIST_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// intrusive_slist_node + /// + struct intrusive_slist_node + { + intrusive_slist_node* mpNext; + }; + + + /// IntrusiveSListIterator + /// + template + struct IntrusiveSListIterator + { + typedef IntrusiveSListIterator this_type; + typedef IntrusiveSListIterator iterator; + typedef IntrusiveSListIterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + node_type* mpNode; + + public: + IntrusiveSListIterator(); + explicit IntrusiveSListIterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type. + IntrusiveSListIterator(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + this_type& operator++(); + this_type operator++(int); + + }; // struct IntrusiveSListIterator + + + + /// intrusive_slist_base + /// + /// Provides a template-less base class for intrusive_slist. + /// + class intrusive_slist_base + { + public: + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + + protected: + intrusive_slist_node* mpNext; + + public: + intrusive_slist_base(); + + bool empty() const; ///< Returns true if the container is empty. + size_type size() const; ///< Returns the number of elements in the list; O(n). + + void clear(); ///< Clears the list; O(1). No deallocation occurs. + void pop_front(); ///< Removes an element from the front of the list; O(1). The element must be present, but is not deallocated. + void reverse(); ///< Reverses a list so that front and back are swapped; O(n). + + //bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching. + + }; // class intrusive_slist_base + + + + /// intrusive_slist + /// + template + class intrusive_slist : public intrusive_slist_base + { + public: + typedef intrusive_slist this_type; + typedef intrusive_slist_base base_type; + typedef T node_type; + typedef T value_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + typedef T& reference; + typedef const T& const_reference; + typedef T* pointer; + typedef const T* const_pointer; + typedef IntrusiveSListIterator iterator; + typedef IntrusiveSListIterator const_iterator; + + public: + intrusive_slist(); ///< Creates an empty list. + //intrusive_slist(const this_type& x); ///< Creates an empty list; ignores the argument. To consider: Is this a useful function? + //this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument. To consider: Is this a useful function? + + iterator begin(); ///< Returns an iterator pointing to the first element in the list. O(1). + const_iterator begin() const; ///< Returns a const_iterator pointing to the first element in the list. O(1). + const_iterator cbegin() const; ///< Returns a const_iterator pointing to the first element in the list. O(1). + iterator end(); ///< Returns an iterator pointing one-after the last element in the list. O(1). + const_iterator end() const; ///< Returns a const_iterator pointing one-after the last element in the list. O(1). + const_iterator cend() const; ///< Returns a const_iterator pointing one-after the last element in the list. O(1). + iterator before_begin(); ///< Returns iterator to position before begin. O(1). + const_iterator before_begin() const; ///< Returns iterator to previous position. O(1). + const_iterator cbefore_begin() const; ///< Returns iterator to previous position. O(1). + + iterator previous(const_iterator position); ///< Returns iterator to previous position. O(n). + const_iterator previous(const_iterator position) const; ///< Returns iterator to previous position. O(n). + + reference front(); ///< Returns a reference to the first element. The list must be empty. + const_reference front() const; ///< Returns a const reference to the first element. The list must be empty. + + void push_front(value_type& value); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list. + void pop_front(); ///< Removes an element from the back of the list; O(n). The element must be present, but is not deallocated. + + bool contains(const value_type& value) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()). + + iterator locate(value_type& value); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n) + const_iterator locate(const value_type& value) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n) + + iterator insert(iterator position, value_type& value); ///< Inserts an element before the element pointed to by the iterator. O(n) + iterator insert_after(iterator position, value_type& value); ///< Inserts an element after the element pointed to by the iterator. O(1) + + iterator erase(iterator position); ///< Erases the element pointed to by the iterator. O(n) + iterator erase_after(iterator position); ///< Erases the element after the element pointed to by the iterator. O(1) + + iterator erase(iterator first, iterator last); ///< Erases elements within the iterator range [first, last). O(n). + iterator erase_after(iterator before_first, iterator last); ///< Erases elements within the iterator range [before_first, last). O(1). + + void swap(this_type& x); ///< Swaps the contents of two intrusive lists; O(1). + + + void splice(iterator position, value_type& value); ///< Moves the given element into this list before the element pointed to by position; O(n). + ///< Required: x must be in some list or have first/next pointers that point it itself. + + void splice(iterator position, this_type& x); ///< Moves the contents of a list into this list before the element pointed to by position; O(n). + ///< Required: &x != this (same as std::list). + + void splice(iterator position, this_type& x, iterator xPosition); ///< Moves the given element pointed to i within the list x into the current list before + ///< the element pointed to by position; O(n). + + void splice(iterator position, this_type& x, iterator first, iterator last); ///< Moves the range of elements [first, last) from list x into the current list before + ///< the element pointed to by position; O(n). + ///< Required: position must not be in [first, last). (same as std::list). + + void splice_after(iterator position, value_type& value); ///< Moves the given element into this list after the element pointed to by position; O(1). + ///< Required: x must be in some list or have first/next pointers that point it itself. + + void splice_after(iterator position, this_type& x); ///< Moves the contents of a list into this list after the element pointed to by position; O(n). + ///< Required: &x != this (same as std::list). + + void splice_after(iterator position, this_type& x, iterator xPrevious); ///< Moves the element after xPrevious to be after position. O(1). + ///< Required: &x != this (same as std::list). + + void splice_after(iterator position, this_type& x, iterator before_first, iterator before_last); ///< Moves the elements in the range of [before_first+1, before_last+1) to be after position. O(1). + + bool validate() const; + int validate_iterator(const_iterator i) const; + + }; // intrusive_slist + + + + + /////////////////////////////////////////////////////////////////////// + // IntrusiveSListIterator + /////////////////////////////////////////////////////////////////////// + + template + inline IntrusiveSListIterator::IntrusiveSListIterator() + { + #if EASTL_DEBUG + mpNode = NULL; + #endif + } + + template + inline IntrusiveSListIterator::IntrusiveSListIterator(pointer pNode) + : mpNode(pNode) + { + } + + template + inline IntrusiveSListIterator::IntrusiveSListIterator(const iterator& x) + : mpNode(x.mpNode) + { + } + + + /////////////////////////////////////////////////////////////////////// + // intrusive_slist_base + /////////////////////////////////////////////////////////////////////// + + // To do. + + + /////////////////////////////////////////////////////////////////////// + // intrusive_slist + /////////////////////////////////////////////////////////////////////// + + // To do. + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + bool operator==(const intrusive_slist& a, const intrusive_slist& b) + { + // If we store an mSize member for intrusive_slist, we want to take advantage of it here. + typename intrusive_slist::const_iterator ia = a.begin(); + typename intrusive_slist::const_iterator ib = b.begin(); + typename intrusive_slist::const_iterator enda = a.end(); + typename intrusive_slist::const_iterator endb = b.end(); + + while((ia != enda) && (ib != endb) && (*ia == *ib)) + { + ++ia; + ++ib; + } + return (ia == enda) && (ib == endb); + } + + template + bool operator<(const intrusive_slist& a, const intrusive_slist& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + bool operator!=(const intrusive_slist& a, const intrusive_slist& b) + { + return !(a == b); + } + + template + bool operator>(const intrusive_slist& a, const intrusive_slist& b) + { + return b < a; + } + + template + bool operator<=(const intrusive_slist& a, const intrusive_slist& b) + { + return !(b < a); + } + + template + bool operator>=(const intrusive_slist& a, const intrusive_slist& b) + { + return !(a < b); + } + + template + void swap(intrusive_slist& a, intrusive_slist& b) + { + a.swap(b); + } + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/bonus/list_map.h b/include/EASTL/bonus/list_map.h new file mode 100644 index 0000000..8a080d6 --- /dev/null +++ b/include/EASTL/bonus/list_map.h @@ -0,0 +1,932 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_LIST_MAP_H +#define EASTL_LIST_MAP_H + + +#include + + +namespace eastl +{ + + /// EASTL_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_LIST_MAP_DEFAULT_NAME + #define EASTL_LIST_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " list_map" // Unless the user overrides something, this is "EASTL list_map". + #endif + + /// EASTL_MAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_LIST_MAP_DEFAULT_ALLOCATOR + #define EASTL_LIST_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_LIST_MAP_DEFAULT_NAME) + #endif + + + /// list_map_data_base + /// + /// We define a list_map_data_base separately from list_map_data (below), because it + /// allows us to have non-templated operations, and it makes it so that the + /// list_map anchor node doesn't carry a T with it, which would waste space and + /// possibly lead to surprising the user due to extra Ts existing that the user + /// didn't explicitly create. The downside to all of this is that it makes debug + /// viewing of an list_map harder, given that the node pointers are of type + /// list_map_data_base and not list_map_data. + /// + struct list_map_data_base + { + list_map_data_base* mpNext; + list_map_data_base* mpPrev; + }; + + + /// list_map_data + /// + template + struct list_map_data : public list_map_data_base + { + typedef Value value_type; + + list_map_data(const value_type& value); + + value_type mValue; // This is a pair of key/value. + }; + + + /// list_map_iterator + /// + template + struct list_map_iterator + { + typedef list_map_iterator this_type; + typedef list_map_iterator iterator; + typedef list_map_iterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef list_map_data_base base_node_type; + typedef list_map_data node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category; + + public: + node_type* mpNode; + + public: + list_map_iterator(); + list_map_iterator(const base_node_type* pNode); + list_map_iterator(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + this_type& operator++(); + this_type operator++(int); + + this_type& operator--(); + this_type operator--(int); + + }; // list_map_iterator + + + /// use_value_first + /// + /// operator()(x) simply returns x.mValue.first. Used in list_map. + /// This is similar to eastl::use_first, however it assumes that the input type is an object + /// whose mValue is an eastl::pair, and the first value in the pair is the desired return. + /// + template + struct use_value_first + { + typedef Object argument_type; + typedef typename Object::value_type::first_type result_type; + + const result_type& operator()(const Object& x) const + { return x.mValue.first; } + }; + + + /// list_map + /// + /// Implements a map like container, which also provides functionality similar to a list. + /// + /// Note: Like a map, keys must still be unique. As such, push_back() and push_front() operations + /// return a bool indicating success, or failure if the entry's key is already in use. + /// + /// list_map is designed to improve performance for situations commonly implemented as: + /// A map, which must be iterated over to find the oldest entry, or purge expired entries. + /// A list, which must be iterated over to remove a player's record when they sign off. + /// + /// list_map requires a little more memory per node than either a list or map alone, + /// and many of list_map's functions have a higher operational cost (CPU time) than their + /// counterparts in list and map. However, as the node count increases, list_map quickly outperforms + /// either a list or a map when find [by-index] and front/back type operations are required. + /// + /// In essence, list_map avoids O(n) iterations at the expense of additional costs to quick (O(1) and O(log n) operations: + /// push_front(), push_back(), pop_front() and pop_back() have O(log n) operation time, similar to map::insert(), rather than O(1) time like a list, + /// however, front() and back() maintain O(1) operation time. + /// + /// As a canonical example, consider a large backlog of player group invites, which are removed when either: + /// The invitation times out - in main loop: while( !listMap.empty() && listMap.front().IsExpired() ) { listMap.pop_front(); } + /// The player rejects the outstanding invitation - on rejection: iter = listMap.find(playerId); if (iter != listMap.end()) { listMap.erase(iter); } + /// + /// For a similar example, consider a high volume pending request container which must: + /// Time out old requests (similar to invites timing out above) + /// Remove requests once they've been handled (similar to rejecting invites above) + /// + /// For such usage patterns, the performance benefits of list_map become dramatic with + /// common O(n) operations once the node count rises to hundreds or more. + /// + /// When high performance is a priority, Containers with thousands of nodes or more + /// can quickly result in unacceptable performance when executing even infrequenty O(n) operations. + /// + /// In order to maintain strong performance, avoid iterating over list_map whenever possible. + /// + /////////////////////////////////////////////////////////////////////// + /// find_as + /// In order to support the ability to have a tree of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the tree's key type. See the find_as function + /// for more documentation on this. + /// + /////////////////////////////////////////////////////////////////////// + /// Pool allocation + /// If you want to make a custom memory pool for a list_map container, your pool + /// needs to contain items of type list_map::node_type. So if you have a memory + /// pool that has a constructor that takes the size of pool items and the + /// count of pool items, you would do this (assuming that MemoryPool implements + /// the Allocator interface): + /// typedef list_map, MemoryPool> WidgetMap; // Delare your WidgetMap type. + /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes. + /// WidgetMap myMap(&myPool); // Create a map that uses the pool. + /// + template , typename Allocator = EASTLAllocatorType> + class list_map + : protected rbtree >, Compare, Allocator, eastl::use_value_first > >, true, true> + { + public: + typedef rbtree >, Compare, Allocator, + eastl::use_value_first > >, true, true> base_type; + typedef list_map this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::key_type key_type; + typedef T mapped_type; + typedef typename eastl::pair value_type; // This is intentionally different from base_type::value_type + typedef value_type& reference; + typedef const value_type& const_reference; + typedef typename base_type::node_type node_type; // Despite the internal and external values being different, we're keeping the node type the same as the base + // in order to allow for pool allocation. See EASTL/map.h for more information. + typedef typename eastl::list_map_iterator iterator; // This is intentionally different from base_type::iterator + typedef typename eastl::list_map_iterator const_iterator; // This is intentionally different from base_type::const_iterator + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef typename base_type::allocator_type allocator_type; + typedef typename eastl::pair insert_return_type; // This is intentionally removed, as list_map doesn't support insert() functions, in favor of list like push_back and push_front + typedef typename eastl::use_first extract_key; // This is intentionally different from base_type::extract_key + + using base_type::get_allocator; + using base_type::set_allocator; + using base_type::key_comp; + using base_type::empty; + using base_type::size; + + protected: + typedef typename eastl::list_map_data > internal_value_type; + + protected: + // internal base node, acting as the sentinel for list like behaviors + list_map_data_base mNode; + + public: + list_map(const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR); + list_map(const Compare& compare, const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR); + + // To do: Implement the following: + + //list_map(const this_type& x); + //list_map(this_type&& x); + //list_map(this_type&& x, const allocator_type& allocator); + //list_map(std::initializer_list ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR); + + //template + //list_map(Iterator itBegin, Iterator itEnd); + + //this_type& operator=(const this_type& x); + //this_type& operator=(std::initializer_list ilist); + //this_type& operator=(this_type&& x); + + //void swap(this_type& x); + + public: + // iterators + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + public: + // List like methods + reference front(); + const_reference front() const; + + reference back(); + const_reference back() const; + + // push_front and push_back which takes in a key/value pair + bool push_front(const value_type& value); + bool push_back(const value_type& value); + + // push_front and push_back which take key and value separately, for convenience + bool push_front(const key_type& key, const mapped_type& value); + bool push_back(const key_type& key, const mapped_type& value); + + void pop_front(); + void pop_back(); + + public: + // Map like methods + iterator find(const key_type& key); + const_iterator find(const key_type& key) const; + + template + iterator find_as(const U& u, Compare2 compare2); + template + const_iterator find_as(const U& u, Compare2 compare2) const; + + size_type count(const key_type& key) const; + size_type erase(const key_type& key); + + public: + // Shared methods which are common to list and map + iterator erase(const_iterator position); + reverse_iterator erase(const_reverse_iterator position); + + void clear(); + void reset_lose_memory(); + + bool validate() const; + int validate_iterator(const_iterator i) const; + + public: + // list like functionality which is in consideration for implementation: + // iterator insert(const_iterator position, const value_type& value); + // void remove(const mapped_type& x); + + public: + // list like functionality which may be implemented, but is discouraged from implementation: + // due to the liklihood that they would require O(n) time to execute. + // template + // void remove_if(Predicate); + // void reverse(); + // void sort(); + // template + // void sort(Compare compare); + + public: + // map like functionality which list_map does not support, due to abmiguity with list like functionality: + #if !defined(EA_COMPILER_NO_DELETED_FUNCTIONS) + template + list_map(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR) = delete; + + insert_return_type insert(const value_type& value) = delete; + iterator insert(const_iterator position, const value_type& value) = delete; + + template + void insert(InputIterator first, InputIterator last) = delete; + + insert_return_type insert(const key_type& key) = delete; + + iterator erase(const_iterator first, const_iterator last) = delete; + reverse_iterator erase(reverse_iterator first, reverse_iterator last) = delete; + + void erase(const key_type* first, const key_type* last) = delete; + + iterator lower_bound(const key_type& key) = delete; + const_iterator lower_bound(const key_type& key) const = delete; + + iterator upper_bound(const key_type& key) = delete; + const_iterator upper_bound(const key_type& key) const = delete; + + eastl::pair equal_range(const key_type& key) = delete; + eastl::pair equal_range(const key_type& key) const = delete; + + mapped_type& operator[](const key_type& key) = delete; // Of map, multimap, set, and multimap, only map has operator[]. + #endif + + public: + // list like functionality which list_map does not support, due to ambiguity with map like functionality: + #if 0 + reference push_front() = delete; + void* push_front_uninitialized() = delete; + + reference push_back() = delete; + void* push_back_uninitialized() = delete; + + iterator insert(const_iterator position) = delete; + + void insert(const_iterator position, size_type n, const value_type& value) = delete; + + template + void insert(const_iterator position, InputIterator first, InputIterator last) = delete; + + iterator erase(const_iterator first, const_iterator last) = delete; + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last) = delete; + + void splice(const_iterator position, this_type& x) = delete + void splice(const_iterator position, this_type& x, const_iterator i) = delete; + void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last) = delete; + + void merge(this_type& x) = delete; + + template + void merge(this_type& x, Compare compare) = delete; + + void unique() = delete; // Uniqueness is enforced by map functionality + + template + void unique(BinaryPredicate) = delete; // Uniqueness is enforced by map functionality + #endif + + }; // list_map + + + /////////////////////////////////////////////////////////////////////// + // list_map_data + /////////////////////////////////////////////////////////////////////// + + template + inline list_map_data::list_map_data(const Value& value) + : mValue(value) + { + mpNext = NULL; // GCC 4.8 is generating warnings about referencing these values in list_map::push_front unless we + mpPrev = NULL; // initialize them here. The compiler seems to be mistaken, as our code isn't actually using them unintialized. + } + + + /////////////////////////////////////////////////////////////////////// + // list_map_iterator + /////////////////////////////////////////////////////////////////////// + + template + inline list_map_iterator::list_map_iterator() + : mpNode(NULL) + { + // Empty + } + + + template + inline list_map_iterator::list_map_iterator(const base_node_type* pNode) + : mpNode(static_cast(const_cast(pNode))) + { + // Empty + } + + + template + inline list_map_iterator::list_map_iterator(const iterator& x) + : mpNode(const_cast(x.mpNode)) + { + // Empty + } + + + template + inline typename list_map_iterator::reference + list_map_iterator::operator*() const + { + return mpNode->mValue; + } + + + template + inline typename list_map_iterator::pointer + list_map_iterator::operator->() const + { + return &mpNode->mValue; + } + + + template + inline typename list_map_iterator::this_type& + list_map_iterator::operator++() + { + mpNode = static_cast(mpNode->mpNext); + return *this; + } + + + template + inline typename list_map_iterator::this_type + list_map_iterator::operator++(int) + { + this_type temp(*this); + mpNode = static_cast(mpNode->mpNext); + return temp; + } + + + template + inline typename list_map_iterator::this_type& + list_map_iterator::operator--() + { + mpNode = static_cast(mpNode->mpPrev); + return *this; + } + + + template + inline typename list_map_iterator::this_type + list_map_iterator::operator--(int) + { + this_type temp(*this); + mpNode = static_cast(mpNode->mpPrev); + return temp; + } + + + // We provide additional template paremeters here to support comparisons between const and non-const iterators. + // See C++ defect report #179, or EASTL/list.h for more information. + template + inline bool operator==(const list_map_iterator& a, + const list_map_iterator& b) + { + return a.mpNode == b.mpNode; + } + + + template + inline bool operator!=(const list_map_iterator& a, + const list_map_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const list_map_iterator& a, + const list_map_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + /////////////////////////////////////////////////////////////////////// + // list_map + /////////////////////////////////////////////////////////////////////// + + template + inline list_map::list_map(const allocator_type& allocator) + : base_type(allocator) + { + mNode.mpNext = &mNode; + mNode.mpPrev = &mNode; + } + + template + inline list_map::list_map(const Compare& compare, const allocator_type& allocator) + : base_type(compare, allocator) + { + mNode.mpNext = &mNode; + mNode.mpPrev = &mNode; + } + + template + inline typename list_map::iterator + list_map::begin() EA_NOEXCEPT + { + return iterator(mNode.mpNext); + } + + template + inline typename list_map::const_iterator + list_map::begin() const EA_NOEXCEPT + { + return const_iterator(mNode.mpNext); + } + + template + inline typename list_map::const_iterator + list_map::cbegin() const EA_NOEXCEPT + { + return const_iterator(mNode.mpNext); + } + + template + inline typename list_map::iterator + list_map::end() EA_NOEXCEPT + { + return iterator(&mNode); + } + + template + inline typename list_map::const_iterator + list_map::end() const EA_NOEXCEPT + { + return const_iterator(&mNode); + } + + template + inline typename list_map::const_iterator + list_map::cend() const EA_NOEXCEPT + { + return const_iterator(&mNode); + } + + template + inline typename list_map::reverse_iterator + list_map::rbegin() EA_NOEXCEPT + { + return reverse_iterator(&mNode); + } + + template + inline typename list_map::const_reverse_iterator + list_map::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(&mNode); + } + + template + inline typename list_map::const_reverse_iterator + list_map::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(&mNode); + } + + template + inline typename list_map::reverse_iterator + list_map::rend() EA_NOEXCEPT + { + return reverse_iterator(mNode.mpNext); + } + + template + inline typename list_map::const_reverse_iterator + list_map::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(mNode.mpNext); + } + + template + inline typename list_map::const_reverse_iterator + list_map::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(mNode.mpNext); + } + + template + inline typename list_map::reference + list_map::front() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(mNode.mpNext) == &mNode)) + EASTL_FAIL_MSG("list_map::front -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(mNode.mpNext)->mValue; + } + + template + inline typename list_map::const_reference + list_map::front() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(mNode.mpNext) == &mNode)) + EASTL_FAIL_MSG("list_map::front -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(mNode.mpNext)->mValue; + } + + template + inline typename list_map::reference + list_map::back() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(mNode.mpNext) == &mNode)) + EASTL_FAIL_MSG("list_map::back -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(mNode.mpPrev)->mValue; + } + + template + inline typename list_map::const_reference + list_map::back() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(mNode.mpNext) == &mNode)) + EASTL_FAIL_MSG("list_map::back -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(mNode.mpPrev)->mValue; + } + + template + bool list_map::push_front(const value_type& value) + { + internal_value_type tempValue(value); + typename base_type::insert_return_type baseReturn = base_type::insert(tempValue); + + // Did the insert succeed? + if (baseReturn.second) + { + internal_value_type* pNode = &(*baseReturn.first); + + pNode->mpNext = mNode.mpNext; + pNode->mpPrev = &mNode; + + mNode.mpNext->mpPrev = pNode; + mNode.mpNext = pNode; + + return true; + } + else + { + return false; + } + } + + template + bool list_map::push_back(const value_type& value) + { + internal_value_type tempValue(value); + typename base_type::insert_return_type baseReturn = base_type::insert(tempValue); + + // Did the insert succeed? + if (baseReturn.second) + { + internal_value_type* pNode = &(*baseReturn.first); + + pNode->mpPrev = mNode.mpPrev; + pNode->mpNext = &mNode; + + mNode.mpPrev->mpNext = pNode; + mNode.mpPrev = pNode; + + return true; + } + else + { + return false; + } + } + + template + bool list_map::push_front(const key_type& key, const mapped_type& value) + { + return push_front(eastl::make_pair(key, value)); + } + + template + bool list_map::push_back(const key_type& key, const mapped_type& value) + { + return push_back(eastl::make_pair(key, value)); + } + + template + void list_map::pop_front() + { + #if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(empty())) + EASTL_FAIL_MSG("list_map::pop_front -- empty container"); + #endif + + erase(static_cast(mNode.mpNext)->mValue.first); + } + + template + void list_map::pop_back() + { + #if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(empty())) + EASTL_FAIL_MSG("list_map::pop_back -- empty container"); + #endif + + erase(static_cast(mNode.mpPrev)->mValue.first); + } + + template + inline typename list_map::iterator + list_map::find(const key_type& key) + { + typename base_type::iterator baseIter = base_type::find(key); + if (baseIter != base_type::end()) + { + return iterator(&(*baseIter)); + } + else + { + return end(); + } + } + + template + inline typename list_map::const_iterator + list_map::find(const key_type& key) const + { + typename base_type::const_iterator baseIter = base_type::find(key); + if (baseIter != base_type::end()) + { + return const_iterator(&(*baseIter)); + } + else + { + return end(); + } + } + + template + template + inline typename list_map::iterator + list_map::find_as(const U& u, Compare2 compare2) + { + typename base_type::iterator baseIter = base_type::find_as(u, compare2); + if (baseIter != base_type::end()) + { + return iterator(&(*baseIter)); + } + else + { + return end(); + } + } + + template + template + inline typename list_map::const_iterator + list_map::find_as(const U& u, Compare2 compare2) const + { + typename base_type::const_iterator baseIter = base_type::find_as(u, compare2); + if (baseIter != base_type::end()) + { + return const_iterator(&(*baseIter)); + } + else + { + return end(); + } + } + + template + inline typename list_map::size_type + list_map::count(const key_type& key) const + { + const typename base_type::const_iterator it = base_type::find(key); + return (it != base_type::end()) ? 1 : 0; + } + + template + inline typename list_map::size_type + list_map::erase(const key_type& key) + { + typename base_type::iterator baseIter = base_type::find(key); + if (baseIter != base_type::end()) + { + internal_value_type* node = &(*baseIter); + + node->mpNext->mpPrev = node->mpPrev; + node->mpPrev->mpNext = node->mpNext; + + base_type::erase(baseIter); + + return 1; + } + return 0; + } + + template + inline typename list_map::iterator + list_map::erase(const_iterator position) + { + iterator posIter(position.mpNode); // Convert from const. + iterator eraseIter(posIter++); + erase(eraseIter->first); + return posIter; + } + + template + inline typename list_map::reverse_iterator + list_map::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + template + void list_map::clear() + { + base_type::clear(); + + mNode.mpNext = &mNode; + mNode.mpPrev = &mNode; + } + + template + void list_map::reset_lose_memory() + { + base_type::reset_lose_memory(); + + mNode.mpNext = &mNode; + mNode.mpPrev = &mNode; + } + + template + bool list_map::validate() const + { + if (!base_type::validate()) + { + return false; + } + + size_type nodeCount(0); + list_map_data_base* node = mNode.mpNext; + while (node != &mNode) + { + internal_value_type* data = static_cast(node); + if (base_type::find(data->mValue.first) == base_type::end()) + { + return false; + } + node = node->mpNext; + ++nodeCount; + } + if (nodeCount != size()) + { + return false; + } + nodeCount = 0; + node = mNode.mpPrev; + while (node != &mNode) + { + internal_value_type* data = static_cast(node); + if (base_type::find(data->mValue.first) == base_type::end()) + { + return false; + } + node = node->mpPrev; + ++nodeCount; + } + if (nodeCount != size()) + { + return false; + } + + return true; + } + + template + int list_map::validate_iterator(const_iterator iter) const + { + for (const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if (temp == iter) + { + return (isf_valid | isf_current | isf_can_dereference); + } + } + + if (iter == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + +} // namespace eastl + + +#endif // Header include guard + + + + diff --git a/include/EASTL/bonus/lru_cache.h b/include/EASTL/bonus/lru_cache.h new file mode 100644 index 0000000..5c1c32e --- /dev/null +++ b/include/EASTL/bonus/lru_cache.h @@ -0,0 +1,407 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// lru_cache is a container that simplifies caching of objects in a map. +// Basically, you give the container a key, like a string, and the data you want. +// The container provides callback mechanisms to generate data if it's missing +// as well as delete data when it's purged from the cache. This container +// uses a least recently used method: whatever the oldest item is will be +// replaced with a new entry. +// +// Algorithmically, the container is a combination of a map and a list. +// The list stores the age of the entries by moving the entry to the head +// of the list on each access, either by a call to get() or to touch(). +// The map is just the map as one would expect. +// +// This is useful for caching off data that is expensive to generate, +// for example text to speech wave files that are dynamically generated, +// but that will need to be reused, as is the case in narration of menu +// entries as a user scrolls through the entries. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_LRUCACHE_H +#define EASTL_LRUCACHE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) +#pragma once +#endif + +#include +#include +#include + +namespace eastl +{ + /// EASTL_LRUCACHE_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_LRUCACHE_DEFAULT_NAME + #define EASTL_LRUCACHE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " lru_cache" // Unless the user overrides something, this is "EASTL lru_cache". + #endif + + + /// EASTL_LRUCACHE_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_LRUCACHE_DEFAULT_ALLOCATOR + #define EASTL_LRUCACHE_DEFAULT_ALLOCATOR allocator_type(EASTL_LRUCACHE_DEFAULT_NAME) + #endif + + /// lru_cache + /// + /// Implements a caching map based off of a key and data. + /// LRUList parameter is any container that guarantees the validity of its iterator even after a modification (e.g. list) + /// LRUMap is any mapping container that can map a key to some data. By default, we use unordered_set, but it might be better + /// to use hash_map or some other structure depending on your key/data combination. For example, you may want to swap the + /// map backing if using strings as keys or if the data objects are small. In any case, unordered_set is a good default and should + /// work well enough since the purpose of this class is to cache results of expensive, order of milliseconds, operations + /// + /// Algorithmic Performance (default data structures): + /// touch() -> O(1) + /// insert() / update(), get() / operator[] -> equivalent to unordered_set (O(1) on average, O(n) worst) + /// size() -> O(1) + /// + /// All accesses to a given key (insert, update, get) will push that key to most recently used. + /// If the data objects are shared between threads, it would be best to use a smartptr to manage the lifetime of the data. + /// as it could be removed from the cache while in use by another thread. + template , + typename map_type = eastl::unordered_map, eastl::hash, eastl::equal_to, Allocator > > + class lru_cache + { + public: + using key_type = Key; + using value_type = Value; + using allocator_type = Allocator; + using size_type = eastl_size_t; + using list_iterator = typename list_type::iterator; + using map_iterator = typename map_type::iterator; + using data_container_type = eastl::pair; + using iterator = typename map_type::iterator; + using const_iterator = typename map_type::const_iterator; + using this_type = lru_cache ; + using create_callback_type = eastl::function; + using delete_callback_type = eastl::function; + + /// lru_cache constructor + /// + /// Creates a Key / Value map that only stores size Value objects until it deletes them. + /// For complex objects or operations, the creator and deletor callbacks can be used. + /// This works just like a regular map object: on access, the Value will be created if it doesn't exist, returned otherwise. + explicit lru_cache(size_type size, + const allocator_type &allocator = EASTL_LRUCACHE_DEFAULT_ALLOCATOR, + create_callback_type creator = nullptr, + delete_callback_type deletor = nullptr) + : m_list(allocator) + , m_map(allocator) + , m_capacity(size) + , m_create_callback(creator) + , m_delete_callback(deletor) + {} + + /// lru_cache destructor + /// + /// Iterates across every entry in the map and calls the deletor before calling the standard destructors + ~lru_cache() + { + // Destruct everything we have cached + for (auto &iter : m_map) + { + if (m_delete_callback) m_delete_callback(iter.second.first); + } + } + + lru_cache(this_type &) = delete; + this_type &operator=(const this_type&) = delete; + + /// insert + /// + /// insert key k with value v. + /// If key already exists, no change is made and the return value is false. + /// If the key doesn't exist, the data is added to the map and the return value is true. + bool insert(const key_type &k, const value_type &v) + { + if (m_map.find(k) == m_map.end()) + { + make_space(); + + m_list.push_front(k); + m_map[k] = data_container_type(v, m_list.begin()); + + return true; + } + else + { + return false; + } + } + + /// emplace + /// + /// Places a new object in place k created with args + /// If the key already exists, it is replaced. + template + void emplace(const key_type &k, Args&&... args) + { + make_space(); + + m_list.push_front(k); + m_map.emplace(k, data_container_type(eastl::forward(args)..., m_list.begin())); + } + + /// insert_or_assign + /// + /// Same as add, but replaces the data at key k, if it exists, with the new entry v + /// Note that the deletor for the old v will be called before it's replaced with the new value of v + void insert_or_assign(const key_type &k, const value_type &v) + { + auto iter = m_map.find(k); + + if (m_map.find(k) != m_map.end()) + { + assign(iter, v); + } + else + { + insert(k, v); + } + } + + /// contains + /// + /// Returns true if key k exists in the cache + bool contains(const key_type &k) const + { + return m_map.find(k) != m_map.end(); + } + + /// at + /// + /// Retrives the data for key k, not valid if k does not exist + eastl::optional at(const key_type &k) + { + auto iter = m_map.find(k); + + if (iter != m_map.end()) + { + return iter->second.first; + } + else + { + return eastl::nullopt; + } + } + + /// get + /// + /// Retrives the data for key k. If no data exists, it will be created by calling the + /// creator. + value_type &get(const key_type &k) + { + auto iter = m_map.find(k); + + // The entry exists in the cache + if (iter != m_map.end()) + { + touch(k); + return iter->second.first; + } + else // The entry doesn't exist in the cache, so create one + { + // Add the entry to the map + insert(k, m_create_callback ? m_create_callback(k) : value_type()); + + // return the new data + return m_map[k].first; + } + } + + /// Equivalent to get(k) + value_type &operator[](const key_type &k) { return get(k); } + + /// erase + /// + /// erases key k from the cache. + /// If k does not exist, returns false. If k exists, returns true. + bool erase(const key_type &k) + { + auto iter = m_map.find(k); + + if (iter != m_map.end()) + { + m_list.erase(iter->second.second); + + // Delete the actual entry + map_erase(iter); + + return true; + } + + return false; + } + + /// erase_oldest + /// + /// Removes the oldest entry from the cache. + void erase_oldest() + { + auto key = m_list.back(); + m_list.pop_back(); + + // Delete the actual entry + auto iter = m_map.find(key); + map_erase(iter); + } + + /// touch + /// + /// Touches key k, marking it as most recently used. + /// If k does not exist, returns false. If the touch was successful, returns true. + bool touch(const key_type &k) + { + auto iter = m_map.find(k); + + if (iter != m_map.end()) + { + touch(iter); + return true; + } + + return false; + } + + /// touch + /// + /// Touches key at iterator iter, moving it to most recently used position + void touch(iterator &iter) + { + auto listRef = iter->second.second; + + m_list.erase(listRef); + m_list.push_front(iter->first); + iter->second.second = m_list.begin(); + } + + /// assign + /// + /// Updates key k with data v. + /// If key k does not exist, returns false and no changes are made. + /// If key k exists, existing data has its deletor called and key k's data is replaced with new v data + bool assign(const key_type &k, const value_type &v) + { + auto iter = m_map.find(k); + + if (iter != m_map.end()) + { + assign(iter, v); + return true; + } + + return false; + } + + /// assign + /// + /// Updates data at spot iter with data v. + void assign(iterator &iter, const value_type &v) + { + if (m_delete_callback) + m_delete_callback(iter->second.first); + touch(iter); + iter->second.first = v; + } + + // standard container functions + iterator begin() EA_NOEXCEPT { return m_map.begin(); } + iterator end() EA_NOEXCEPT { return m_map.end(); } + iterator rbegin() EA_NOEXCEPT { return m_map.rbegin(); } + iterator rend() EA_NOEXCEPT { return m_map.rend(); } + const_iterator begin() const EA_NOEXCEPT { return m_map.begin(); } + const_iterator cbegin() const EA_NOEXCEPT { return m_map.cbegin(); } + const_iterator crbegin() const EA_NOEXCEPT { return m_map.crbegin(); } + const_iterator end() const EA_NOEXCEPT { return m_map.end(); } + const_iterator cend() const EA_NOEXCEPT { return m_map.cend(); } + const_iterator crend() const EA_NOEXCEPT { return m_map.crend(); } + + bool empty() const EA_NOEXCEPT { return m_map.empty(); } + size_type size() const EA_NOEXCEPT { return m_map.size(); } + size_type capacity() const EA_NOEXCEPT { return m_capacity; } + + void clear() EA_NOEXCEPT + { + // Since we have a delete callback, we want to reuse the trim function by cheating the max + // size to clear all the entries to avoid duplicating code. + auto old_max = m_capacity; + + m_capacity = 0; + trim(); + m_capacity = old_max; + } + + /// resize + /// + /// Resizes the cache. Can be used to either expand or contract the cache. + /// In the case of a contraction, the oldest entries will be evicted with their respective + /// deletors called before completing. + void resize(size_type newSize) + { + m_capacity = newSize; + trim(); + } + + void setCreateCallback(create_callback_type callback) { m_create_callback = callback; } + void setDeleteCallback(delete_callback_type callback) { m_delete_callback = callback; } + + // EASTL extensions + const allocator_type& get_allocator() const EA_NOEXCEPT { return m_map.get_allocator(); } + allocator_type& get_allocator() EA_NOEXCEPT { return m_map.get_allocator(); } + void set_allocator(const allocator_type& allocator) { m_map.set_allocator(allocator); m_list.set_allocator(allocator); } + + /// Does not reset the callbacks + void reset_lose_memory() EA_NOEXCEPT { m_map.reset_lose_memory(); m_list.reset_lose_memory(); } + + private: + inline void map_erase(map_iterator pos) + { + if (m_delete_callback) + m_delete_callback(pos->second.first); + m_map.erase(pos); + } + + bool trim() + { + if (size() <= m_capacity) + { + return false; // No trim necessary + } + + // We need to trim + do + { + erase_oldest(); + } while (m_list.size() > m_capacity); + + return true; + } + + void make_space() + { + if (size() == m_capacity) + { + erase_oldest(); + } + } + + list_type m_list; + map_type m_map; + size_type m_capacity; + create_callback_type m_create_callback; + delete_callback_type m_delete_callback; + }; +} + + + +#endif diff --git a/include/EASTL/bonus/ring_buffer.h b/include/EASTL/bonus/ring_buffer.h new file mode 100644 index 0000000..fcd8fd2 --- /dev/null +++ b/include/EASTL/bonus/ring_buffer.h @@ -0,0 +1,1581 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// A ring buffer is a FIFO (first-in, first-out) container which acts +// much like a queue. The difference is that a ring buffer is implemented +// via chasing pointers around a given container instead of like queue +// adds to the writes to the end of the container are reads from the begin. +// The benefit of a ring buffer is that memory allocations don't occur +// and new elements are neither added nor removed from the container. +// Elements in the container are simply assigned values in circles around +// the container. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_RING_BUFFER_H +#define EASTL_RING_BUFFER_H + + +#include +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_RING_BUFFER_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_RING_BUFFER_DEFAULT_NAME + #define EASTL_RING_BUFFER_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " ring_buffer" // Unless the user overrides something, this is "EASTL ring_buffer". + #endif + + /// EASTL_RING_BUFFER_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_RING_BUFFER_DEFAULT_ALLOCATOR + #define EASTL_RING_BUFFER_DEFAULT_ALLOCATOR allocator_type(EASTL_RING_BUFFER_DEFAULT_NAME) + #endif + + + /// ring_buffer_iterator + /// + /// We force this iterator to act like a random access iterator even if + /// the underlying container doesn't support random access iteration. + /// Any BidirectionalIterator can be a RandomAccessIterator; it just + /// might be inefficient in some cases. + /// + template + struct ring_buffer_iterator + { + public: + typedef ring_buffer_iterator this_type; + typedef T value_type; + typedef Pointer pointer; + typedef Reference reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator container_iterator; + typedef typename Container::const_iterator container_const_iterator; + typedef ring_buffer_iterator iterator; + typedef ring_buffer_iterator const_iterator; + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + + public: + Container* mpContainer; + container_iterator mContainerIterator; + + public: + ring_buffer_iterator(); + ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator); + ring_buffer_iterator(const iterator& x); + + ring_buffer_iterator& operator=(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + this_type& operator++(); + this_type operator++(int); + + this_type& operator--(); + this_type operator--(int); + + this_type& operator+=(difference_type n); + this_type& operator-=(difference_type n); + + this_type operator+(difference_type n) const; + this_type operator-(difference_type n) const; + + protected: + void increment(difference_type n, EASTL_ITC_NS::input_iterator_tag); + void increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag); + + }; // struct ring_buffer_iterator + + + + /// ring_buffer + /// + /// Implements a ring buffer via a given container type, which would + /// typically be a vector or array, though any container which supports + /// bidirectional iteration would work. + /// + /// A ring buffer is a FIFO (first-in, first-out) container which acts + /// much like a queue. The difference is that a ring buffer is implemented + /// via chasing pointers around a container and moving the read and write + /// positions forward (and possibly wrapping around) as the container is + /// read and written via pop_front and push_back. + /// + /// The benefit of a ring buffer is that memory allocations don't occur + /// and new elements are neither added nor removed from the container. + /// Elements in the container are simply assigned values in circles around + /// the container. + /// + /// ring_buffer is different from other containers -- including adapter + /// containers -- in how iteration is done. Iteration of a ring buffer + /// starts at the current begin position, proceeds to the end of the underlying + /// container, and continues at the begin of the underlying container until + /// the ring buffer's current end position. Thus a ring_buffer does + /// indeed have a begin and an end, though the values of begin and end + /// chase each other around the container. An empty ring_buffer is one + /// in which end == begin, and a full ring_buffer is one in which + /// end + 1 == begin. + /// + /// Example of a ring buffer layout, where + indicates queued items: + /// ++++++++++--------------------------------+++++++++ + /// ^ ^ + /// end begin + /// + /// Empty ring buffer: + /// --------------------------------------------------- + /// ^ + /// begin / end + /// + /// Full ring buffer. Note that one item is necessarily unused; it is + /// analagous to a '\0' at the end of a C string: + /// +++++++++++++++++++++++++++++++++++++++++-+++++++++ + /// ^^ + /// end begin + /// + /// A push_back operation on a ring buffer assigns the new value to end. + /// If there is no more space in the buffer, this will result in begin + /// being overwritten and the begin position being moved foward one position. + /// The user can use the full() function to detect this condition. + /// Note that elements in a ring buffer are not created or destroyed as + /// their are added and removed; they are merely assigned. Only on + /// container construction and destruction are any elements created and + /// destroyed. + /// + /// The ring buffer can be used in either direction. By this we mean that + /// you can use push_back to add items and pop_front to remove them; or you can + /// use push_front to add items and pop_back to remove them. You aren't + /// limited to these operations; you can push or pop from either side + /// arbitrarily and you can insert or erase anywhere in the container. + /// + /// The ring buffer requires the user to specify a Container type, which + /// by default is vector. However, any container with bidirectional iterators + /// will work, such as list, deque, string or any of the fixed_* versions + /// of these containers, such as fixed_string. Since ring buffer works via copying + /// elements instead of allocating and freeing nodes, inserting in the middle + /// of a ring buffer based on list (instead of vector) is no more efficient. + /// + /// To use the ring buffer, its container must be resized to the desired + /// ring buffer size. Changing the size of a ring buffer may cause ring + /// buffer iterators to invalidate. + /// + /// An alternative to using a ring buffer is to use a list with a user-created + /// node pool and custom allocator. There are various tradeoffs that result from this. + /// + /// Example usage: + /// ring_buffer< int, list > rb(100); + /// rb.push_back(1); + /// + /// Example usage: + /// // Example of creating an on-screen debug log that shows 16 + /// // strings at a time and scrolls older strings away. + /// + /// // Create ring buffer of 16 strings. + /// ring_buffer< string, vector > debugLogText(16); + /// + /// // Reserve 128 chars for each line. This can make it so that no + /// // runtime memory allocations occur. + /// for(vector::iterator it = debugLogText.get_container().begin(), + /// itEnd = debugLogText.get_container().end(); it != itEnd; ++it) + /// { + /// (*it).reserve(128); + /// } + /// + /// // Add a new string, using push_front() and front() instead of + /// // push_front(str) in order to avoid creating a temporary str. + /// debugLogText.push_front(); + /// debugLogText.front() = "Player fired weapon"; + /// + template , typename Allocator = typename Container::allocator_type> + class ring_buffer + { + public: + typedef ring_buffer this_type; + typedef Container container_type; + typedef Allocator allocator_type; + + typedef typename Container::value_type value_type; + typedef typename Container::reference reference; + typedef typename Container::const_reference const_reference; + typedef typename Container::size_type size_type; + typedef typename Container::difference_type difference_type; + typedef typename Container::iterator container_iterator; + typedef typename Container::const_iterator container_const_iterator; + typedef ring_buffer_iterator iterator; + typedef ring_buffer_iterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + + public: // We declare public so that global comparison operators can be implemented without adding an inline level and without tripping up GCC 2.x friend declaration failures. GCC (through at least v4.0) is poor at inlining and performance wins over correctness. + Container c; // We follow the naming convention established for stack, queue, priority_queue and name this 'c'. This variable must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element. + + protected: + container_iterator mBegin; // We keep track of where our begin and end are by using Container iterators. + container_iterator mEnd; + size_type mSize; + + public: + // There currently isn't a ring_buffer constructor that specifies an initial size, unlike other containers. + explicit ring_buffer(size_type cap = 0); // Construct with an initial capacity (but size of 0). + explicit ring_buffer(size_type cap, const allocator_type& allocator); + explicit ring_buffer(const Container& x); + explicit ring_buffer(const allocator_type& allocator); + ring_buffer(const this_type& x); + ring_buffer(this_type&& x); + ring_buffer(this_type&& x, const allocator_type& allocator); + ring_buffer(std::initializer_list ilist, const allocator_type& allocator = EASTL_RING_BUFFER_DEFAULT_ALLOCATOR); // This function sets the capacity to be equal to the size of the initializer list. + + // No destructor necessary. Default will do. + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + template + void assign(InputIterator first, InputIterator last); + + void swap(this_type& x); + + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + bool empty() const EA_NOEXCEPT; + bool full() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + size_type capacity() const EA_NOEXCEPT; + + void resize(size_type n); + void set_capacity(size_type n); // Sets the capacity to the given value, including values less than the current capacity. Adjusts the size downward if n < size, by throwing out the oldest elements in the buffer. + void reserve(size_type n); // Reserve a given capacity. Doesn't decrease the capacity; it only increases it (for compatibility with other containers' behavior). + + reference front(); + const_reference front() const; + + reference back(); + const_reference back() const; + + void push_back(const value_type& value); + reference push_back(); + + void push_front(const value_type& value); + reference push_front(); + + void pop_back(); + void pop_front(); + + reference operator[](size_type n); + const_reference operator[](size_type n) const; + + // To consider: + // size_type read(value_type* pDestination, size_type nCount); + // size_type read(iterator** pPosition1, iterator** pPosition2, size_type& nCount1, size_type& nCount2); + + /* To do: + template + void emplace_front(Args&&... args); + + template + void emplace_back(Args&&... args); + + template + iterator emplace(const_iterator position, Args&&... args); + */ + + iterator insert(const_iterator position, const value_type& value); + void insert(const_iterator position, size_type n, const value_type& value); + void insert(const_iterator position, std::initializer_list ilist); + + template + void insert(const_iterator position, InputIterator first, InputIterator last); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + reverse_iterator erase(const_reverse_iterator position); + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last); + + void clear(); + + container_type& get_container(); + const container_type& get_container() const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + //size_type DoGetSize(EASTL_ITC_NS::input_iterator_tag) const; + //size_type DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const; + + }; // class ring_buffer + + + + + /////////////////////////////////////////////////////////////////////// + // ring_buffer_iterator + /////////////////////////////////////////////////////////////////////// + + template + ring_buffer_iterator::ring_buffer_iterator() + : mpContainer(NULL), mContainerIterator() + { + } + + + template + ring_buffer_iterator::ring_buffer_iterator(Container* pContainer, const container_iterator& containerIterator) + : mpContainer(pContainer), mContainerIterator(containerIterator) + { + } + + + template + ring_buffer_iterator::ring_buffer_iterator(const iterator& x) + : mpContainer(x.mpContainer), mContainerIterator(x.mContainerIterator) + { + } + + + template + ring_buffer_iterator& + ring_buffer_iterator::operator=(const iterator& x) + { + mpContainer = x.mpContainer; + mContainerIterator = x.mContainerIterator; + return *this; + } + + template + typename ring_buffer_iterator::reference + ring_buffer_iterator::operator*() const + { + return *mContainerIterator; + } + + + template + typename ring_buffer_iterator::pointer + ring_buffer_iterator::operator->() const + { + return &*mContainerIterator; + } + + + template + typename ring_buffer_iterator::this_type& + ring_buffer_iterator::operator++() + { + if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end())) + mContainerIterator = mpContainer->begin(); + return *this; + } + + + template + typename ring_buffer_iterator::this_type + ring_buffer_iterator::operator++(int) + { + const this_type temp(*this); + if(EASTL_UNLIKELY(++mContainerIterator == mpContainer->end())) + mContainerIterator = mpContainer->begin(); + return temp; + } + + + template + typename ring_buffer_iterator::this_type& + ring_buffer_iterator::operator--() + { + if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin())) + mContainerIterator = mpContainer->end(); + --mContainerIterator; + return *this; + } + + + template + typename ring_buffer_iterator::this_type + ring_buffer_iterator::operator--(int) + { + const this_type temp(*this); + if(EASTL_UNLIKELY(mContainerIterator == mpContainer->begin())) + mContainerIterator = mpContainer->end(); + --mContainerIterator; + return temp; + } + + + template + typename ring_buffer_iterator::this_type& + ring_buffer_iterator::operator+=(difference_type n) + { + typedef typename eastl::iterator_traits::iterator_category IC; + increment(n, IC()); + return *this; + } + + + template + typename ring_buffer_iterator::this_type& + ring_buffer_iterator::operator-=(difference_type n) + { + typedef typename eastl::iterator_traits::iterator_category IC; + increment(-n, IC()); + return *this; + } + + + template + typename ring_buffer_iterator::this_type + ring_buffer_iterator::operator+(difference_type n) const + { + return this_type(*this).operator+=(n); + } + + + template + typename ring_buffer_iterator::this_type + ring_buffer_iterator::operator-(difference_type n) const + { + return this_type(*this).operator+=(-n); + } + + + template + void ring_buffer_iterator::increment(difference_type n, EASTL_ITC_NS::input_iterator_tag) + { + // n cannot be negative, as input iterators don't support reverse iteration. + while(n-- > 0) + operator++(); + } + + + template + void ring_buffer_iterator::increment(difference_type n, EASTL_ITC_NS::random_access_iterator_tag) + { + // We make the assumption here that the user is incrementing from a valid + // starting position to a valid ending position. Thus *this + n yields a + // valid iterator, including if n happens to be a negative value. + + if(n >= 0) + { + const difference_type d = mpContainer->end() - mContainerIterator; + + if(n < d) + mContainerIterator += n; + else + mContainerIterator = mpContainer->begin() + (n - d); + } + else + { + // Recall that n and d here will be negative and so the logic here works as intended. + const difference_type d = mpContainer->begin() - mContainerIterator; + + if(n >= d) + mContainerIterator += n; + else + mContainerIterator = mpContainer->end() + (n - d); + } + } + + + // Random access iterators must support operator + and operator -. + // You can only add an integer to an iterator, and you cannot add two iterators. + template + inline ring_buffer_iterator + operator+(ptrdiff_t n, const ring_buffer_iterator& x) + { + return x + n; // Implement (n + x) in terms of (x + n). + } + + + // You can only add an integer to an iterator, but you can subtract two iterators. + template + inline typename ring_buffer_iterator::difference_type + operator-(const ring_buffer_iterator& a, + const ring_buffer_iterator& b) + { + typedef typename ring_buffer_iterator::difference_type difference_type; + + // To do: If container_iterator is a random access iterator, then do a simple calculation. + // Otherwise, we have little choice but to iterate from a to b and count as we go. + // See the ring_buffer::size function for an implementation of this. + + // Iteration implementation: + difference_type d = 0; + + for(ring_buffer_iterator temp(b); temp != a; ++temp) + ++d; + + return d; + } + + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const ring_buffer_iterator& a, + const ring_buffer_iterator& b) + { + // Perhaps we should compare the container pointer as well. + // However, for valid iterators this shouldn't be necessary. + return a.mContainerIterator == b.mContainerIterator; + } + + + template + inline bool operator!=(const ring_buffer_iterator& a, + const ring_buffer_iterator& b) + { + // Perhaps we should compare the container pointer as well. + // However, for valid iterators this shouldn't be necessary. + return !(a.mContainerIterator == b.mContainerIterator); + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const ring_buffer_iterator& a, + const ring_buffer_iterator& b) + { + return !(a.mContainerIterator == b.mContainerIterator); + } + + + + + /////////////////////////////////////////////////////////////////////// + // ring_buffer + /////////////////////////////////////////////////////////////////////// + + template + ring_buffer::ring_buffer(size_type cap) + : c() // Default construction with default allocator for the container. + { + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + + // We add one because the element at mEnd is necessarily unused. + c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + } + + + template + ring_buffer::ring_buffer(size_type cap, const allocator_type& allocator) + : c(allocator) + { + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + + // We add one because the element at mEnd is necessarily unused. + c.resize(cap + 1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + } + + + template + ring_buffer::ring_buffer(const Container& x) + : c(x) // This copies elements from x, but unless the user is doing some tricks, the only thing that matters is that c.size() == x.size(). + { + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + if(c.empty()) + c.resize(1); + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + } + + + template + ring_buffer::ring_buffer(const allocator_type& allocator) + : c(allocator) + { + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + + // We add one because the element at mEnd is necessarily unused. + c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + } + + + template + ring_buffer::ring_buffer(const this_type& x) + : c(x.c) + { + mBegin = c.begin(); + mEnd = mBegin; + mSize = x.mSize; + + eastl::advance(mBegin, eastl::distance(const_cast(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound. + eastl::advance(mEnd, eastl::distance(const_cast(x).c.begin(), x.mEnd)); + } + + template + ring_buffer::ring_buffer(this_type&& x) + : c() // Default construction with default allocator for the container. + { + c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + + swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible. + } + + template + ring_buffer::ring_buffer(this_type&& x, const allocator_type& allocator) + : c(allocator) + { + c.resize(1); // Possibly we could construct 'c' with size, but c may not have such a ctor, though we rely on it having a resize function. + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + + if(c.get_allocator() == x.c.get_allocator()) + swap(x); // We are leaving x in an unusual state by swapping default-initialized members with it, as it won't be usable and can be only destructible. + else + operator=(x); + } + + + template + ring_buffer::ring_buffer(std::initializer_list ilist, const allocator_type& allocator) + : c(allocator) + { + c.resize((eastl_size_t)ilist.size() + 1); + mBegin = c.begin(); + mEnd = mBegin; + mSize = 0; + + assign(ilist.begin(), ilist.end()); + } + + + template + typename ring_buffer::this_type& + ring_buffer::operator=(const this_type& x) + { + if(&x != this) + { + c = x.c; + + mBegin = c.begin(); + mEnd = mBegin; + mSize = x.mSize; + + eastl::advance(mBegin, eastl::distance(const_cast(x).c.begin(), x.mBegin)); // We can do a simple distance algorithm here, as there will be no wraparound. + eastl::advance(mEnd, eastl::distance(const_cast(x).c.begin(), x.mEnd)); + } + + return *this; + } + + + template + typename ring_buffer::this_type& + ring_buffer::operator=(this_type&& x) + { + swap(x); + return *this; + } + + + template + typename ring_buffer::this_type& + ring_buffer::operator=(std::initializer_list ilist) + { + assign(ilist.begin(), ilist.end()); + return *this; + } + + + template + template + void ring_buffer::assign(InputIterator first, InputIterator last) + { + // To consider: We can make specializations of this for pointer-based + // iterators to PODs and turn the action into a memcpy. + clear(); + + for(; first != last; ++first) + push_back(*first); + } + + + template + void ring_buffer::swap(this_type& x) + { + if(&x != this) + { + const difference_type dBegin = eastl::distance(c.begin(), mBegin); // We can do a simple distance algorithm here, as there will be no wraparound. + const difference_type dEnd = eastl::distance(c.begin(), mEnd); + + const difference_type dxBegin = eastl::distance(x.c.begin(), x.mBegin); + const difference_type dxEnd = eastl::distance(x.c.begin(), x.mEnd); + + eastl::swap(c, x.c); + eastl::swap(mSize, x.mSize); + + mBegin = c.begin(); + eastl::advance(mBegin, dxBegin); // We can do a simple advance algorithm here, as there will be no wraparound. + + mEnd = c.begin(); + eastl::advance(mEnd, dxEnd); + + x.mBegin = x.c.begin(); + eastl::advance(x.mBegin, dBegin); + + x.mEnd = x.c.begin(); + eastl::advance(x.mEnd, dEnd); + } + } + + + template + typename ring_buffer::iterator + ring_buffer::begin() EA_NOEXCEPT + { + return iterator(&c, mBegin); + } + + + template + typename ring_buffer::const_iterator + ring_buffer::begin() const EA_NOEXCEPT + { + return const_iterator(const_cast(&c), mBegin); // We trust that the const_iterator will respect const-ness. + } + + + template + typename ring_buffer::const_iterator + ring_buffer::cbegin() const EA_NOEXCEPT + { + return const_iterator(const_cast(&c), mBegin); // We trust that the const_iterator will respect const-ness. + } + + + template + typename ring_buffer::iterator + ring_buffer::end() EA_NOEXCEPT + { + return iterator(&c, mEnd); + } + + + template + typename ring_buffer::const_iterator + ring_buffer::end() const EA_NOEXCEPT + { + return const_iterator(const_cast(&c), mEnd); // We trust that the const_iterator will respect const-ness. + } + + + template + typename ring_buffer::const_iterator + ring_buffer::cend() const EA_NOEXCEPT + { + return const_iterator(const_cast(&c), mEnd); // We trust that the const_iterator will respect const-ness. + } + + + template + typename ring_buffer::reverse_iterator + ring_buffer::rbegin() EA_NOEXCEPT + { + return reverse_iterator(iterator(&c, mEnd)); + } + + + template + typename ring_buffer::const_reverse_iterator + ring_buffer::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(const_cast(&c), mEnd)); + } + + + template + typename ring_buffer::const_reverse_iterator + ring_buffer::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(const_cast(&c), mEnd)); + } + + + template + typename ring_buffer::reverse_iterator + ring_buffer::rend() EA_NOEXCEPT + { + return reverse_iterator(iterator(&c, mBegin)); + } + + + template + typename ring_buffer::const_reverse_iterator + ring_buffer::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(const_cast(&c), mBegin)); + } + + + template + typename ring_buffer::const_reverse_iterator + ring_buffer::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(const_cast(&c), mBegin)); + } + + + template + bool ring_buffer::empty() const EA_NOEXCEPT + { + return mBegin == mEnd; + } + + + template + bool ring_buffer::full() const EA_NOEXCEPT + { + // Implementation that relies on c.size() being a fast operation: + // return mSize == (c.size() - 1); // (c.size() - 1) == capacity(); we are attempting to reduce function calls. + + // Version that has constant speed guarantees, but is still pretty fast. + const_iterator afterEnd(end()); + ++afterEnd; + return afterEnd.mContainerIterator == mBegin; + } + + + template + typename ring_buffer::size_type + ring_buffer::size() const EA_NOEXCEPT + { + return mSize; + + // Alternatives: + // return eastl::distance(begin(), end()); + // return end() - begin(); // This is more direct than using distance(). + //typedef typename eastl::iterator_traits::iterator_category IC; + //return DoGetSize(IC()); // This is more direct than using iterator math. + } + + + /* + template + typename ring_buffer::size_type + ring_buffer::DoGetSize(EASTL_ITC_NS::input_iterator_tag) const + { + // We could alternatively just use eastl::distance() here, but we happen to + // know that such code would boil down to what we have here, and we might + // as well remove function calls where possible. + difference_type d = 0; + + for(const_iterator temp(begin()), tempEnd(end()); temp != tempEnd; ++temp) + ++d; + + return (size_type)d; + } + */ + + /* + template + typename ring_buffer::size_type + ring_buffer::DoGetSize(EASTL_ITC_NS::random_access_iterator_tag) const + { + // A simpler but less efficient implementation fo this function would be: + // return eastl::distance(mBegin, mEnd); + // + // The calculation of distance here takes advantage of the fact that random + // access iterators' distances can be calculated by simple pointer calculation. + // Thus the code below boils down to a few subtractions when using a vector, + // string, or array as the Container type. + // + const difference_type dBegin = eastl::distance(const_cast(c).begin(), mBegin); // const_cast here solves a little compiler + const difference_type dEnd = eastl::distance(const_cast(c).begin(), mEnd); // argument matching problem. + + if(dEnd >= dBegin) + return dEnd - dBegin; + + return c.size() - (dBegin - dEnd); + } + */ + + + namespace Internal + { + /////////////////////////////////////////////////////////////// + // has_overflow_allocator + // + // returns true_type when the specified container type is an + // eastl::fixed_* container and therefore has an overflow + // allocator type. + // + template + struct has_overflow_allocator : false_type {}; + + template + struct has_overflow_allocator().get_overflow_allocator())>> : true_type {}; + + + /////////////////////////////////////////////////////////////// + // GetFixedContainerCtorAllocator + // + // eastl::fixed_* containers are only constructible via their + // overflow allocator type. This helper select the appropriate + // allocator from the specified container. + // + template ()()> + struct GetFixedContainerCtorAllocator + { + auto& operator()(Container& c) { return c.get_overflow_allocator(); } + }; + + template + struct GetFixedContainerCtorAllocator + { + auto& operator()(Container& c) { return c.get_allocator(); } + }; + } // namespace Internal + + + /////////////////////////////////////////////////////////////// + // ContainerTemporary + // + // Helper type which prevents utilizing excessive stack space + // when creating temporaries when swapping/copying the underlying + // ring_buffer container type. + // + template = EASTL_MAX_STACK_USAGE)> + struct ContainerTemporary + { + Container mContainer; + + ContainerTemporary(Container& parentContainer) + : mContainer(Internal::GetFixedContainerCtorAllocator{}(parentContainer)) + { + } + + Container& get() { return mContainer; } + }; + + template + struct ContainerTemporary + { + typename Container::allocator_type* mAllocator; + Container* mContainer; + + ContainerTemporary(Container& parentContainer) + : mAllocator(&parentContainer.get_allocator()) + , mContainer(new (mAllocator->allocate(sizeof(Container))) Container) + { + } + + ~ContainerTemporary() + { + mContainer->~Container(); + mAllocator->deallocate(mContainer, sizeof(Container)); + } + + Container& get() { return *mContainer; } + }; + + + template + void ring_buffer::resize(size_type n) + { + // Note that if n > size(), we just move the end position out to + // the begin + n, with the data being the old end and the new end + // being stale values from the past. This is by design, as the concept + // of arbitrarily resizing a ring buffer like this is currently deemed + // to be vague in what it intends to do. We can only assume that the + // user knows what he is doing and will deal with the stale values. + EASTL_ASSERT(c.size() >= 1); + const size_type cap = (c.size() - 1); + + mSize = n; + + if(n > cap) // If we need to grow in capacity... + { + // Given that a growing operation will always result in memory allocation, + // we currently implement this function via the usage of a temp container. + // This makes for a simple implementation, but in some cases it is less + // efficient. In particular, if the container is a node-based container like + // a (linked) list, this function would be faster if we simply added nodes + // to ourself. We would do this by inserting the nodes to be after end() + // and adjusting the begin() position if it was after end(). + + // To do: This code needs to be amended to deal with possible exceptions + // that could occur during the resize call below. + + ContainerTemporary cTemp(c); + cTemp.get().resize(n + 1); + eastl::copy(begin(), end(), cTemp.get().begin()); + eastl::swap(c, cTemp.get()); + + mBegin = c.begin(); + mEnd = mBegin; + eastl::advance(mEnd, n); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around. + } + else // We could do a check here for n != size(), but that would be costly and people don't usually resize things to their same size. + { + mEnd = mBegin; + + // eastl::advance(mEnd, n); // We *cannot* use this because there may be wraparound involved. + + // To consider: Possibly we should implement some more detailed logic to optimize the code here. + // We'd need to do different behaviour dending on whether the container iterator type is a + // random access iterator or otherwise. + + while(n--) + { + if(EASTL_UNLIKELY(++mEnd == c.end())) + mEnd = c.begin(); + } + } + } + + + template + typename ring_buffer::size_type + ring_buffer::capacity() const EA_NOEXCEPT + { + EASTL_ASSERT(c.size() >= 1); // This is required because even an empty ring_buffer has one unused termination element, somewhat like a \0 at the end of a C string. + + return (c.size() - 1); // Need to subtract one because the position at mEnd is unused. + } + + + template + void ring_buffer::set_capacity(size_type n) + { + const size_type capacity = (c.size() - 1); + + if(n != capacity) // If we need to change capacity... + { + ContainerTemporary cTemp(c); + cTemp.get().resize(n + 1); + + iterator itCopyBegin = begin(); + + if(n < mSize) // If we are shrinking the capacity, to less than our size... + { + eastl::advance(itCopyBegin, mSize - n); + mSize = n; + } + + eastl::copy(itCopyBegin, end(), cTemp.get().begin()); // The begin-end range may in fact be larger than n, in which case values will be overwritten. + eastl::swap(c, cTemp.get()); + + mBegin = c.begin(); + mEnd = mBegin; + eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around. + } + } + + + template + void ring_buffer::reserve(size_type n) + { + // We follow the pattern of vector and only do something if n > capacity. + EASTL_ASSERT(c.size() >= 1); + + if(n > (c.size() - 1)) // If we need to grow in capacity... // (c.size() - 1) == capacity(); we are attempting to reduce function calls. + { + ContainerTemporary cTemp(c); + cTemp.get().resize(n + 1); + eastl::copy(begin(), end(), cTemp.get().begin()); + eastl::swap(c, cTemp.get()); + + mBegin = c.begin(); + mEnd = mBegin; + eastl::advance(mEnd, mSize); // We can do a simple advance algorithm on this because we know that mEnd will not wrap around. + } + } + + + template + typename ring_buffer::reference + ring_buffer::front() + { + return *mBegin; + } + + + template + typename ring_buffer::const_reference + ring_buffer::front() const + { + return *mBegin; + } + + + template + typename ring_buffer::reference + ring_buffer::back() + { + // return *(end() - 1); // Can't use this because not all iterators support operator-. + + iterator temp(end()); // To do: Find a way to construct this temporary in the return statement. + return *(--temp); // We can do it by making all our containers' iterators support operator-. + } + + + template + typename ring_buffer::const_reference + ring_buffer::back() const + { + // return *(end() - 1); // Can't use this because not all iterators support operator-. + + const_iterator temp(end()); // To do: Find a way to construct this temporary in the return statement. + return *(--temp); // We can do it by making all our containers' iterators support operator-. + } + + + /// A push_back operation on a ring buffer assigns the new value to end. + /// If there is no more space in the buffer, this will result in begin + /// being overwritten and the begin position being moved foward one position. + template + void ring_buffer::push_back(const value_type& value) + { + *mEnd = value; + + if(++mEnd == c.end()) + mEnd = c.begin(); + + if(mEnd == mBegin) + { + if(++mBegin == c.end()) + mBegin = c.begin(); + } + else + ++mSize; + } + + + /// A push_back operation on a ring buffer assigns the new value to end. + /// If there is no more space in the buffer, this will result in begin + /// being overwritten and the begin position being moved foward one position. + template + typename ring_buffer::reference + ring_buffer::push_back() + { + // We don't do the following assignment, as the value at mEnd is already constructed; + // it is merely possibly not default-constructed. However, the spirit of push_back + // is that the user intends to do an assignment or data modification after the + // push_back call. The user can always execute *back() = value_type() if he wants. + //*mEnd = value_type(); + + if(++mEnd == c.end()) + mEnd = c.begin(); + + if(mEnd == mBegin) + { + if(++mBegin == c.end()) + mBegin = c.begin(); + } + else + ++mSize; + + return back(); + } + + + template + void ring_buffer::pop_back() + { + EASTL_ASSERT(mEnd != mBegin); // We assume that size() > 0 and thus that there is something to pop. + + if(EASTL_UNLIKELY(mEnd == c.begin())) + mEnd = c.end(); + --mEnd; + --mSize; + } + + + template + void ring_buffer::push_front(const value_type& value) + { + if(EASTL_UNLIKELY(mBegin == c.begin())) + mBegin = c.end(); + + if(--mBegin == mEnd) + { + if(EASTL_UNLIKELY(mEnd == c.begin())) + mEnd = c.end(); + --mEnd; + } + else + ++mSize; + + *mBegin = value; + } + + + template + typename ring_buffer::reference + ring_buffer::push_front() + { + if(EASTL_UNLIKELY(mBegin == c.begin())) + mBegin = c.end(); + + if(--mBegin == mEnd) + { + if(EASTL_UNLIKELY(mEnd == c.begin())) + mEnd = c.end(); + --mEnd; + } + else + ++mSize; + + // See comments above in push_back for why we don't execute this: + // *mBegin = value_type(); + + return *mBegin; // Same as return front(); + } + + + template + void ring_buffer::pop_front() + { + EASTL_ASSERT(mBegin != mEnd); // We assume that mEnd > mBegin and thus that there is something to pop. + + if(++mBegin == c.end()) + mBegin = c.begin(); + --mSize; + } + + + template + typename ring_buffer::reference + ring_buffer::operator[](size_type n) + { + // return *(begin() + n); // Can't use this because not all iterators support operator+. + + // This should compile to code that is nearly as efficient as that above. + // The primary difference is the possible generation of a temporary in this case. + iterator temp(begin()); + eastl::advance(temp, n); + return *(temp.mContainerIterator); + } + + + template + typename ring_buffer::const_reference + ring_buffer::operator[](size_type n) const + { + // return *(begin() + n); // Can't use this because not all iterators support operator+. + + // This should compile to code that is nearly as efficient as that above. + // The primary difference is the possible generation of a temporary in this case. + const_iterator temp(begin()); + eastl::advance(temp, n); + return *(temp.mContainerIterator); + } + + + template + typename ring_buffer::iterator + ring_buffer::insert(const_iterator position, const value_type& value) + { + // To consider: It would be faster if we could tell that position was in the first + // half of the container and instead of moving things after the position back, + // we could move things before the position forward. + + iterator afterEnd(end()); + iterator beforeEnd(afterEnd); + + ++afterEnd; + + if(afterEnd.mContainerIterator == mBegin) // If we are at full capacity... + --beforeEnd; + else + push_back(); + + iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator. + eastl::copy_backward(itPosition, beforeEnd, end()); + *itPosition = value; + + return itPosition; + } + + + template + void ring_buffer::insert(const_iterator position, size_type n, const value_type& value) + { + // To do: This can be improved with a smarter version. However, + // this is a little tricky because we need to deal with the case + // whereby n is greater than the size of the container itself. + while(n--) + insert(position, value); + } + + + template + void ring_buffer::insert(const_iterator position, std::initializer_list ilist) + { + insert(position, ilist.begin(), ilist.end()); + } + + + template + template + void ring_buffer::insert(const_iterator position, InputIterator first, InputIterator last) + { + // To do: This can possibly be improved with a smarter version. + // However, this can be tricky if distance(first, last) is greater + // than the size of the container itself. + for(; first != last; ++first, ++position) + insert(position, *first); + } + + + template + typename ring_buffer::iterator + ring_buffer::erase(const_iterator position) + { + iterator itPosition(position.mpContainer, position.mContainerIterator); // We merely copy from const_iterator to iterator. + iterator iNext(itPosition); + + eastl::copy(++iNext, end(), itPosition); + pop_back(); + + return itPosition; + } + + + template + typename ring_buffer::iterator + ring_buffer::erase(const_iterator first, const_iterator last) + { + iterator itFirst(first.mpContainer, first.mContainerIterator); // We merely copy from const_iterator to iterator. + iterator itLast(last.mpContainer, last.mContainerIterator); + + typename iterator::difference_type d = eastl::distance(itFirst, itLast); + + eastl::copy(itLast, end(), itFirst); + + while(d--) // To do: improve this implementation. + pop_back(); + + return itFirst; + } + + + template + typename ring_buffer::reverse_iterator + ring_buffer::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename ring_buffer::reverse_iterator + ring_buffer::erase(const_reverse_iterator first, const_reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase((++last).base(), (++first).base())); + } + + + template + void ring_buffer::clear() + { + // Don't clear the container; we use its valid data for our elements. + mBegin = c.begin(); + mEnd = c.begin(); + mSize = 0; + } + + + template + typename ring_buffer::container_type& + ring_buffer::get_container() + { + return c; + } + + + template + const typename ring_buffer::container_type& + ring_buffer::get_container() const + { + return c; + } + + + template + inline bool ring_buffer::validate() const + { + if(!c.validate()) // This requires that the container implement the validate function. That pretty much + return false; // means that the container is an EASTL container and not a std STL container. + + if(c.empty()) // c must always have a size of at least 1, as even an empty ring_buffer has an unused terminating element. + return false; + + if(size() > capacity()) + return false; + + if((validate_iterator(begin()) & (isf_valid | isf_current)) != (isf_valid | isf_current)) + return false; + + if((validate_iterator(end()) & (isf_valid | isf_current)) != (isf_valid | isf_current)) + return false; + + // Verify that the size calculation is consistent. + size_type n = 0; + for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i) + ++n; + if(n != mSize) + return false; + + return true; + } + + + template + inline int ring_buffer::validate_iterator(const_iterator i) const + { + // To do: Replace this with a more efficient implementation if possible. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const ring_buffer& a, const ring_buffer& b) + { + return (a.size() == b.size()) && (a.c == b.c); + } + + + template + inline bool operator<(const ring_buffer& a, const ring_buffer& b) + { + const typename ring_buffer::size_type sizeA = a.size(); + const typename ring_buffer::size_type sizeB = b.size(); + + if(sizeA == sizeB) + return (a.c < b.c); + return sizeA < sizeB; + } + + + template + inline bool operator!=(const ring_buffer& a, const ring_buffer& b) + { + return !(a == b); + } + + + template + inline bool operator>(const ring_buffer& a, const ring_buffer& b) + { + return (b < a); + } + + + template + inline bool operator<=(const ring_buffer& a, const ring_buffer& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const ring_buffer& a, const ring_buffer& b) + { + return !(a < b); + } + + + template + inline void swap(ring_buffer& a, ring_buffer& b) + { + a.swap(b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/bonus/sort_extra.h b/include/EASTL/bonus/sort_extra.h new file mode 100644 index 0000000..5f9a0c4 --- /dev/null +++ b/include/EASTL/bonus/sort_extra.h @@ -0,0 +1,204 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +////////////////////////////////////////////////////////////////////////////// +// This file implements additional sort algorithms beyond the basic set. +// Included here are: +// selection_sort -- Unstable. +// shaker_sort -- Stable. +// bucket_sort -- Stable. +// +////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_SORT_EXTRA_H +#define EASTL_SORT_EXTRA_H + + +#include +#include +#include +#include +#include +#include // For backwards compatibility due to sorts moved from here to sort.h. +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// selection_sort + /// + /// Implements the SelectionSort algorithm. + /// + template + void selection_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare) + { + ForwardIterator iCurrent, iMin; + + for(; first != last; ++first) + { + iCurrent = first; + iMin = iCurrent; + + for(++iCurrent; iCurrent != last; ++iCurrent) + { + if(compare(*iCurrent, *iMin)) + { + EASTL_VALIDATE_COMPARE(!compare(*iMin, *iCurrent)); // Validate that the compare function is sane. + iMin = iCurrent; + } + } + + if(first != iMin) + eastl::iter_swap(first, iMin); + } + } // selection_sort + + template + inline void selection_sort(ForwardIterator first, ForwardIterator last) + { + typedef eastl::less::value_type> Less; + + eastl::selection_sort(first, last, Less()); + } + + + + /// shaker_sort + /// + /// Implements the ShakerSort algorithm, which is a sorting algorithm which + /// improves on bubble_sort by sweeping both from left to right and right + /// to left, resulting in less iteration. + /// + template + void shaker_sort(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare) + { + if(first != last) + { + BidirectionalIterator iCurrent, iNext, iLastModified; + + --last; + + while(first != last) + { + iLastModified = first; + + for(iCurrent = first; iCurrent != last; iCurrent = iNext) + { + iNext = iCurrent; + ++iNext; + + if(compare(*iNext, *iCurrent)) + { + EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane. + iLastModified = iCurrent; + eastl::iter_swap(iCurrent, iNext); + } + } + + last = iLastModified; + + if(first != last) + { + for(iCurrent = last; iCurrent != first; iCurrent = iNext) + { + iNext = iCurrent; + --iNext; + + if(compare(*iCurrent, *iNext)) + { + EASTL_VALIDATE_COMPARE(!compare(*iNext, *iCurrent)); // Validate that the compare function is sane. + iLastModified = iCurrent; + eastl::iter_swap(iNext, iCurrent); + } + } + first = iLastModified; + } + } + } + } // shaker_sort + + template + inline void shaker_sort(BidirectionalIterator first, BidirectionalIterator last) + { + typedef eastl::less::value_type> Less; + + eastl::shaker_sort(first, last, Less()); + } + + + + /// bucket_sort + /// + /// Implements the BucketSort algorithm. + /// + /// Example usage: + /// const size_t kElementRange = 32; + /// vector intArray(1000); + /// + /// for(int i = 0; i < 1000; i++) + /// intArray[i] = rand() % kElementRange; + /// + /// vector< vector > bucketArray(kElementRange); + /// bucket_sort(intArray.begin(), intArray.end(), bucketArray, eastl::hash_use_self()); + /// + template + struct hash_use_self + { + T operator()(const T& x) const + { return x; } + }; + + // Requires buckeyArray to be an array of arrays with a size equal to the range of values + // returned by the hash function. The hash function is required to return a unique value + // for each uniquely sorted element. Usually the way this is done is the elements are + // integers of a limited range (e.g. 0-64) and the hash function returns the element value + // itself. If you had a case where all elements were always even numbers (e.g. 0-128), + // you could use a custom hash function that returns (element value / 2). + // + // The user is required to provide an empty bucketArray to this function. This function returns + // with the bucketArray non-empty. This function doesn't clear the bucketArray because that takes + // time and the user might not need it to be cleared, at least at that time. + // + template + void bucket_sort(ForwardIterator first, ForwardIterator last, ContainerArray& bucketArray, HashFunction hash /*= hash_use_self*/) + { + for(ForwardIterator iInput = first; iInput != last; ++iInput) + bucketArray[hash(*iInput)].push_back(*iInput); + + for(typename ContainerArray::const_iterator iBucket = bucketArray.begin(); iBucket != bucketArray.end(); ++iBucket) + first = eastl::copy((*iBucket).begin(), (*iBucket).end(), first); + } + + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/bonus/sparse_matrix.h b/include/EASTL/bonus/sparse_matrix.h new file mode 100644 index 0000000..dd8ea65 --- /dev/null +++ b/include/EASTL/bonus/sparse_matrix.h @@ -0,0 +1,1581 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// *** Note *** +// This implementation is incomplete. +// +// Additionally, this current implementation is not yet entirely in line with +// EASTL conventions and thus may appear a little out of place to the observant. +// The goal is to bring thus file up to current standards in a future version. +/////////////////////////////////////////////////////////////////////////////// + + +// To do: +// Remove forward declarations of classes. +// Remove mCol variable from matrix_cell. +// Make iterators have const and non-const versions. +// Remove mpCell from sparse_matrix_col_iterator. +// Remove mpRow from sparse_matrix_row_iterator. +// Remove mpMatrix from iterators. + + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a sparse matrix, which is a 2 dimensional array of +// cells of an arbitrary type T. It is useful for situations where you need +// to store data in a very sparse way. The cost of storing an individual cell +// is higher than with a 2D array (or vector of vectors), but if the array is +// sparse, then a sparse matrix can save memory. It can also iterate non-empty +// cells faster than a regular 2D array, as only used cells are stored. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_SPARSE_MATRIX_H +#define EASTL_SPARSE_MATRIX_H + +#if 0 + +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + // kRowColIndexNone + // Refers to a row that is non-existant. If you call a function that returns a + // row or col index, and get kSparseMatrixIndexNone, the row or col doesn't exist. + static const int32_t kSparseMatrixIntMin = (-2147483647 - 1); + static const int32_t kSparseMatrixIntMax = 2147483647; + + + /////////////////////////////////////////////////////////////////////////////// + // Forward declarations + // + template struct matrix_cell; + template struct matrix_row; + template class sparse_matrix; + template class sparse_matrix_row_iterator; + template class sparse_matrix_col_iterator; + template class sparse_matrix_iterator; + + + + /////////////////////////////////////////////////////////////////////////////// + /// matrix_cell + /// + template + struct matrix_cell + { + public: + typedef matrix_cell this_type; + typedef T value_type; + + public: + int mCol; + value_type mValue; + + public: + matrix_cell(int nCol = 0); + matrix_cell(int nCol, const value_type& value); + + }; // matrix_cell + + + + /////////////////////////////////////////////////////////////////////////// + /// matrix_row + /// + template + struct matrix_row + { + public: + typedef Allocator allocator_type; + typedef matrix_row this_type; + typedef T value_type; + typedef matrix_cell cell_type; + typedef eastl::map, allocator_type> CellMap; + + public: + int mRow; + CellMap mCellRow; + + public: + matrix_row(int nRow = 0); + + // This function finds the given column in this row, if present. + // The result is a cell, and the pointer to the cell data itself + // is returned in the 'pCell' argument. + bool GetMatrixCol(int nCol, cell_type*& pCell); + + }; // matrix_row + + + + + /////////////////////////////////////////////////////////////////////////////// + /// sparse_matrix_row_iterator + /// + /// Iterates cells in a given row of a sparse matrix. + /// + template + class sparse_matrix_row_iterator + { + public: + typedef sparse_matrix_row_iterator this_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T& reference; + typedef T* pointer; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + typedef sparse_matrix MatrixType; + typedef matrix_row row_type; + typedef matrix_cell cell_type; + typedef eastl::map RowMap; + typedef typename row_type::CellMap CellMap; + + public: + MatrixType* mpMatrix; + row_type* mpRow; + typename CellMap::iterator mCellMapIterator; + + public: + sparse_matrix_row_iterator(MatrixType* pMatrix, row_type* pRow, const typename CellMap::iterator& ic) + : mpMatrix(pMatrix), mpRow(pRow), mCellMapIterator(ic) + { + } + + sparse_matrix_row_iterator(MatrixType* pMatrix = NULL) + : mpMatrix(pMatrix), mpRow(NULL) + { + } + + int GetCol() // Returns kSparseMatrixIntMin if iterator is 'empty'. We don't + { // return -1 because sparse matrix is not limited to rows/cols >= 0. + if(mpRow) // You can have a matrix that starts at column -100 and row -500. + { + const cell_type& cell = (*mCellMapIterator).second; + return cell.mCol; + } + return kSparseMatrixIntMin; + } + + int GetRow() + { + if(mpRow) + return mpRow->mRow; + return kSparseMatrixIntMin; + } + + bool operator==(const this_type& x) const + { + if(!mpRow && !x.mpRow) // If we are comparing 'empty' iterators... + return true; + + // The first check below wouldn't be necessary if we had a guarantee the iterators can compare between different rows. + return (mpRow == x.mpRow) && (mCellMapIterator == x.mCellMapIterator); + } + + bool operator!=(const this_type& x) const + { + return !operator==(x); + } + + reference operator*() const + { + const cell_type& cell = (*mCellMapIterator).second; + return cell.mValue; + } + + pointer operator->() const + { + const cell_type& cell = (*mCellMapIterator).second; + return &cell.mValue; + } + + this_type& operator++() + { + ++mCellMapIterator; + return *this; + } + + this_type operator++(int) + { + this_type tempCopy = *this; + ++*this; + return tempCopy; + } + + }; // sparse_matrix_row_iterator + + + + /////////////////////////////////////////////////////////////////////////////// + /// sparse_matrix_col_iterator + /// + /// Iterates cells in a given column of a sparse matrix. Do not modify the + /// sparse_matrix while iterating through it. You can do this with some + /// STL classes, but I'd rather not have to support this kind of code in + /// the future here. + /// + template + class sparse_matrix_col_iterator + { + public: + typedef sparse_matrix_col_iterator this_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T& reference; + typedef T* pointer; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + typedef sparse_matrix MatrixType; + typedef matrix_row row_type; + typedef matrix_cell cell_type; + typedef eastl::map RowMap; + typedef typename row_type::CellMap CellMap; + + public: + MatrixType* mpMatrix; + typename RowMap::iterator mRowMapIterator; + cell_type* mpCell; + + public: + sparse_matrix_col_iterator(MatrixType* pMatrix, const typename RowMap::iterator& i, cell_type* pCell) + : mpMatrix(pMatrix), mRowMapIterator(i), mpCell(pCell) + { + } + + sparse_matrix_col_iterator(MatrixType* pMatrix = NULL) + : mpMatrix(pMatrix), mpCell(NULL) + { + } + + int GetCol() // Returns kSparseMatrixIntMin if iterator is 'empty'. We don't return -1 + { // because sparse matrix is not limited to rows/cols >= 0. + if(mpCell) // You can have a matrix that starts at column -100 and row -500. + return mpCell->mCol; + return kSparseMatrixIntMin; + } + + int GetRow() + { + if(mpCell) // This might look strange, but we are using 'pCell' to + return (*mRowMapIterator).second.mRow; // simply tell us if the iterator is 'empty' or not. + return kSparseMatrixIntMin; + } + + bool operator==(const this_type& x) const + { + if(!mpCell && !x.mpCell) // If we are comparing 'empty' iterators... + return true; + + // The second check below wouldn't be necessary if we had a guarantee the iterators can compare between different maps. + return (mRowMapIterator == x.mRowMapIterator) && (mpCell == x.mpCell); + } + + bool operator!=(const this_type& x) const + { + return !operator==(x); + } + + reference operator*() const + { + return mpCell->mValue; + } + + reference operator->() const + { + return &mpCell->mValue; + } + + this_type& operator++() + { + ++mRowMapIterator; + + while(mRowMapIterator != mpMatrix->mRowMap.end()) + { + row_type& row = (*mRowMapIterator).second; + + // Can't we just use row.mCellRow.find(cell)? + typename CellMap::const_iterator it = row.mCellRow.find(mpCell->mCol); + + if(it != row.mCellRow.end()) + { + mpCell = const_cast(&(*it).second); // Trust me, we won't be modifying the data. + return *this; + } + + // Linear search: + //for(typename CellMap::iterator it(row.mCellRow.begin()); it != row.mCellRow.end(); ++it) + //{ + // const cell_type& cell = (*it).second; + // + // if(cell.mCol == mpCell->mCol) + // { + // mpCell = const_cast(&cell); // Trust me, we won't be modifying the data. + // return *this; + // } + //} + + ++mRowMapIterator; + } + + mpCell = NULL; + return *this; + } + + this_type operator++(int) + { + this_type tempCopy = *this; + ++*this; + return tempCopy; + } + + }; // sparse_matrix_col_iterator + + + + /////////////////////////////////////////////////////////////////////////////// + /// sparse_matrix_iterator + /// + /// Iterates cells of a sparse matrix, by rows and columns. Each row is iterated + /// and each column within that row is iterated in order. + /// + template + class sparse_matrix_iterator + { + public: + typedef sparse_matrix_iterator this_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T& reference; + typedef T* pointer; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + typedef sparse_matrix MatrixType; + typedef matrix_row row_type; + typedef matrix_cell cell_type; + typedef eastl::map RowMap; + typedef typename row_type::CellMap CellMap; + + public: + MatrixType* mpMatrix; + typename RowMap::iterator mRowMapIterator; + typename CellMap::iterator mCellMapIterator; + + public: + sparse_matrix_iterator(MatrixType* pMatrix, const typename RowMap::iterator& ir, const typename CellMap::iterator& ic) + : mpMatrix(pMatrix), mRowMapIterator(ir), mCellMapIterator(ic) + { + } + + sparse_matrix_iterator(MatrixType* pMatrix, const typename RowMap::iterator& ir) + : mpMatrix(pMatrix), mRowMapIterator(ir), mCellMapIterator() + { + } + + int GetCol() + { + const cell_type& cell = (*mCellMapIterator).second; + return cell.mCol; + } + + int GetRow() + { + const row_type& row = (*mRowMapIterator).second; + return row.mRow; + } + + bool operator==(const this_type& x) const + { + return (mRowMapIterator == x.mRowMapIterator) && (mCellMapIterator == x.mCellMapIterator); + } + + bool operator!=(const this_type& x) const + { + return (mRowMapIterator != x.mRowMapIterator) || (mCellMapIterator != x.mCellMapIterator); + } + + reference operator*() const + { + cell_type& cell = (*mCellMapIterator).second; + return cell.mValue; + } + + this_type& operator++() + { + ++mCellMapIterator; // Increment the current cell (column) in the current row. + + row_type& row = (*mRowMapIterator).second; + + if(mCellMapIterator == row.mCellRow.end()) // If we hit the end of the current row... + { + ++mRowMapIterator; + + while(mRowMapIterator != mpMatrix->mRowMap.end()) // While we haven't hit the end of rows... + { + row_type& row = (*mRowMapIterator).second; + + if(!row.mCellRow.empty()) // If there are any cells (columns) in this row... + { + mCellMapIterator = row.mCellRow.begin(); + break; + } + + ++mRowMapIterator; + } + } + + return *this; + } + + this_type operator++(int) + { + this_type tempCopy = *this; + operator++(); + return tempCopy; + } + + }; // sparse_matrix_iterator + + + + /////////////////////////////////////////////////////////////////////////////// + /// sparse_matrix + /// + template + class sparse_matrix + { + public: + typedef sparse_matrix this_type; + typedef T value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef ptrdiff_t difference_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef sparse_matrix_row_iterator row_iterator; + typedef sparse_matrix_col_iterator col_iterator; + typedef sparse_matrix_iterator iterator; + typedef sparse_matrix_iterator const_iterator; // To do: Fix this. + typedef Allocator allocator_type; + typedef matrix_row row_type; + typedef typename row_type::CellMap CellMap; + typedef eastl::map RowMap; + + // iterator friends + friend class sparse_matrix_row_iterator; + friend class sparse_matrix_col_iterator; + friend class sparse_matrix_iterator; + + // kRowColIndexNone + static const int32_t kRowColIndexNone = kSparseMatrixIntMin; + + // UserCell + // We don't internally use this struct to store data, because that would + // be inefficient. However, whenever the user of this class needs to query for + // individual cells, especially in batches, it is useful to have a struct that + // identifies both the cell coordinates and cell data for the user. + struct UserCell + { + int mCol; + int mRow; + T mValue; + }; + + public: + sparse_matrix(); + sparse_matrix(const sparse_matrix& x); + ~sparse_matrix(); + + this_type& operator=(const this_type& x); + + void swap(); + + // Iterators + row_iterator row_begin(int nRow); + row_iterator row_end(int nRow); + col_iterator col_begin(int nCol); + col_iterator col_end(int nCol); + iterator begin(); + iterator end(); + + // Standard interface functions + bool empty() const; // Returns true if no cells are used. + size_type size() const; // Returns total number of non-empty cells. + + int GetMinUsedRow(int& nResultCol) const; // Returns first row that has data. Fills in column that has that data. Returns kRowUnused if no row has data. + int GetMaxUsedRow(int& nResultCol) const; // Returns last row that has data. Fills in column that has that data. Returns kRowUnused if no row has data. + bool GetMinMaxUsedColForRow(int nRow, int& nMinCol, int& nMaxCol) const; // Sets the min and max column and returns true if any found. + bool GetMinMaxUsedRowForCol(int nCol, int& nMinRow, int& nMaxRow) const; // Sets the min and max row and returns true if any found. + size_type GetColCountForRow(int nRow) const; // You specify the row, it gives you the used cell count. + + int GetMinUsedCol(int& nResultRow) const; // Returns first column that has data. Fills in row that has that data. Returns kColUnused if no column has data. + int GetMaxUsedCol(int& nResultRow) const; // Returns last column that has data. Fills in row that has that data. Returns kColUnused if no column has data. + size_type GetRowCountForCol(int nCol) const; // + int GetRowWithMaxColCount(size_type& nColCount) const; // + + bool remove(int nRow, int nCol, T* pPreviousT = NULL); // If you pass in a 'pPreviousT', it will copy in value to it before removing the cell. + bool remove_row(int nRow, size_type nCount = 1); // Removes 'nCount' rows, starting at 'nRow'. + bool remove_col(int nCol, size_type nCount = 1); // Removes 'nCount' cols, starting at 'nCol'. + bool clear(); // Removes all cells. + void insert(int nRow, int nCol, const value_type& t, value_type* pPrevValue = NULL); // If you pass in a 'pPreviousT', it will copy in value to it before changing the cell. + bool IsCellUsed(int nRow, int nCol); // Returns true if cell is non-empty + + bool GetCell(int nRow, int nCol, value_type* pValue = NULL); // + bool GetCellPtr(int nRow, int nCol, value_type** pValue); // Gets a pointer to the cell itself, for direct manipulation. + size_type GetCellCountForRange(int nRowStart, int nRowEnd, + int nColStart, int nColEnd); // Counts cells in range. Range is inclusive. + int GetCellRange(int nRowStart, int nRowEnd, + int nColStart, int nColEnd, UserCell* pCellArray = NULL); // Copies cell data into the array of UserCells provided by the caller. + int FindCell(const value_type& t, UserCell* pCellArray = NULL); // Finds all cells that match the given argument cell. Call this function with NULL pCellArray to simply get the count. + + bool validate(); + int validate_iterator(const_iterator i) const; + + protected: + bool GetMatrixRow(int nRow, row_type*& pRow); + + protected: + RowMap mRowMap; /// Map of all row data. It is a map of maps. + size_type mnSize; /// The count of all cells. This is equal to the sums of the sizes of the maps in mRowMap. + allocator_type mAllocator; /// The allocator for all data. + + }; // sparse_matrix + + + + + + + + /////////////////////////////////////////////////////////////////////////////// + // matrix_cell + /////////////////////////////////////////////////////////////////////////////// + + template + matrix_cell::matrix_cell(int nCol = 0) + : mCol(nCol), mValue() + { + } + + template + matrix_cell::matrix_cell(int nCol, const value_type& value) + : mCol(nCol), mValue(value) + { + } + + + + + /////////////////////////////////////////////////////////////////////////////// + // matrix_row + /////////////////////////////////////////////////////////////////////////////// + + template + matrix_row::matrix_row(int nRow = 0) + : mRow(nRow), mCellRow() + { + } + + template + bool matrix_row::GetMatrixCol(int nCol, cell_type*& pCell) + { + #if EASTL_ASSERT_ENABLED + int nPreviousCol(sparse_matrix::kRowColIndexNone); + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + #endif + + typename CellMap::iterator it(mCellRow.find(nCol)); + + if(it != mCellRow.end()) + { + cell_type& cell = (*it).second; + pCell = &cell; + return true; + } + + return false; + } + + template + inline bool operator==(const matrix_row& a, const matrix_row& b) + { + return (a.mRow == b.mRow) && (a.mCellRow == b.mCellRow); + } + + template + inline bool operator==(const matrix_cell& a, const matrix_cell& b) + { + return (a.mValue == b.mValue); + } + + + + + /////////////////////////////////////////////////////////////////////////////// + // sparse_matrix + /////////////////////////////////////////////////////////////////////////////// + + template + inline sparse_matrix::sparse_matrix() + : mRowMap(), mnSize(0) + { + } + + + template + inline sparse_matrix::sparse_matrix(const this_type& x) + { + mnSize = x.mnSize; + mRowMap = x.mRowMap; + } + + + template + inline sparse_matrix::~sparse_matrix() + { + // Nothing to do. + } + + + template + inline typename sparse_matrix::this_type& + sparse_matrix::operator=(const this_type& x) + { + // Check for self-asignment is not needed, as the assignments below already do it. + mnSize = x.mnSize; + mRowMap = x.mRowMap; + return *this; + } + + + template + inline void sparse_matrix& sparse_matrix::swap() + { + eastl::swap(mnSize, x.mnSize); + eastl::swap(mRowMap, x.mRowMap); + } + + + template + inline bool sparse_matrix::empty() const + { + return (mnSize == 0); + } + + + template + inline typename sparse_matrix::size_type + sparse_matrix::size() const + { + return mnSize; + } + + + /////////////////////////////////////////////////////////////////////////////// + // row_begin + // + // This function returns a sparse matrix row iterator. It allows you to + // iterate all used cells in a given row. You pass in the row index and it + // returns an iterator for the first used cell. You can dereference the + // iterator to get the cell data. Just like STL containers, the end iterator + // is one-past the past the last valid iterator. A row iterator returned + // by this function is good only for that row; likewise, you can only use + // such a row iterator with the end iterator for that row and not with an + // end iterator for any other row. + // + // Here is an example of using a row iterator to iterate all used cells + // in row index 3 of a sparse matrix of 'int': + // sparse_matrix::row_iterator it = intMatrix.row_begin(3); + // sparse_matrix::row_iterator itEnd = intMatrix.row_end(3); + // + // while(it != itEnd) + // { + // printf("Col=%d, row=%d, value=%d\n", it.GetCol(), it.GetRow(), *it); + // ++it; + // } + // + template + typename sparse_matrix::row_iterator + sparse_matrix::row_begin(int nRow) + { + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + row_type* pRow; + + if(GetMatrixRow(nRow, pRow)) + return sparse_matrix_row_iterator(this, pRow, pRow->mCellRow.begin()); + return sparse_matrix_row_iterator(this); //Create an 'empty' iterator. + } + + + /////////////////////////////////////////////////////////////////////////////// + // row_end + // + // Returns the end iterator for a given row. See the row_begin function for more. + // + template + inline typename sparse_matrix::row_iterator + sparse_matrix::row_end(int nRow) + { + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + row_type* pRow; + + if(GetMatrixRow(nRow, pRow)) + return sparse_matrix_row_iterator(this, pRow, pRow->mCellRow.end()); + return sparse_matrix_row_iterator(this); //Create an 'empty' iterator. + } + + + /////////////////////////////////////////////////////////////////////////////// + // col_begin + // + // This function returns a sparse matrix column iterator. A column iterator + // acts just like a row iterator except it iterates cells in a column instead + // of cells in a row. + // + // Here is an example of using a column iterator to iterate all used cells + // in column index 0 (the first column) of a sparse matrix of 'int': + // sparse_matrix::col_iterator it = intMatrix.col_begin(0); + // sparse_matrix::col_iterator itEnd = intMatrix.col_end(0); + // + // while(it != itEnd) + // { + // printf("Col=%d, row=%d, value=%d\n", it.GetCol(), it.GetRow(), *it); + // ++it; + // } + // + template + typename sparse_matrix::col_iterator + sparse_matrix::col_begin(int nCol) + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + const row_type& matrixRowConst = (*it).second; + row_type& row = const_cast(matrixRowConst); + + for(typename CellMap::iterator it1(row.mCellRow.begin()); it1!=row.mCellRow.end(); ++it1) + { + const cell_type& cellConst = (*it1).second; + cell_type& cell = const_cast(cellConst); + + if(cell.mCol == nCol) + return sparse_matrix_col_iterator(this, it, &cell); + } + } + return sparse_matrix_col_iterator(this, mRowMap.end(), NULL); + } + + + /////////////////////////////////////////////////////////////////////////////// + // col_end + // + // Returns the end iterator for a given colum. See the col_begin function for more. + // + template + inline typename sparse_matrix::col_iterator + sparse_matrix::col_end(int nCol) + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + + return sparse_matrix_col_iterator(this, mRowMap.end(), NULL); + } + + + /////////////////////////////////////////////////////////////////////////////// + // begin + // + // This function returns a sparse matrix cell iterator. It iterates all used + // cells in the sparse matrix. The cells are returned in column,row order + // (as opposed to row,column order). Thus, all columns for a given row will + // be iterated before moving onto the next row. + // + // Here is an example of using an iterator to iterate all used cells: + // sparse_matrix::iterator it = intMatrix.begin(); + // sparse_matrix::iterator itEnd = intMatrix.end(); + // + // while(it != itEnd) + // { + // printf("Col=%d, row=%d, value=%d\n", it.GetCol(), it.GetRow(), *it); + // ++it; + // } + // + template + typename sparse_matrix::iterator + sparse_matrix::begin() + { + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + + if(!row.mCellRow.empty()) + return sparse_matrix_iterator(this, it, row.mCellRow.begin()); + } + return sparse_matrix_iterator(this, mRowMap.end()); + } + + + template + inline typename sparse_matrix::iterator + sparse_matrix::end() + { + return sparse_matrix_iterator(this, mRowMap.end()); + } + + + template + int sparse_matrix::GetMinUsedRow(int& nResultCol) const + { + if(!mRowMap.empty()) + { + const row_type& row = (*mRowMap.begin()).second; // Get the last row. + const cell_type& cell = (*row.mCellRow.begin()).second; // Get the first cell in that row, though it doesn't really matter which one we get. + + nResultCol = cell.mCol; + + return row.mRow; // Return the row of the last item in the map. + } + + nResultCol = kRowColIndexNone; + return kRowColIndexNone; + } + + + template + int sparse_matrix::GetMaxUsedRow(int& nResultCol) const + { + if(!mRowMap.empty()) + { + const row_type& row = (*mRowMap.rbegin()).second; // Get the last row. + const cell_type& cell = (*row.mCellRow.begin()).second; // Get the first cell in that row, though it doesn't really matter which one we get. + + nResultCol = cell.mCol; + + return row.mRow; // Return the row of the last item in the map. + } + + nResultCol = kRowColIndexNone; + return kRowColIndexNone; + } + + + template + bool sparse_matrix::GetMinMaxUsedColForRow(int nRow, int& nMinCol, int& nMaxCol) const + { + bool bReturnValue(false); + + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + nMinCol = kSparseMatrixIntMax; + nMaxCol = kSparseMatrixIntMin; + + typename RowMap::iterator it(mRowMap.find(nRow)); + + if(it != mRowMap.end()) + { + const row_type& row = (*it).second; + EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed it. + + const cell_type& matrixCellFront = (*row.mCellRow.begin()).second; + const cell_type& matrixCellBack = (*row.mCellRow.rbegin()).second; + + nMinCol = matrixCellFront.mCol; + nMaxCol = matrixCellBack.mCol; + + bReturnValue = true; + } + + return bReturnValue; + } + + + /////////////////////////////////////////////////////////////////////////////// + // GetMinMaxUsedRowForCol + // + template + bool sparse_matrix::GetMinMaxUsedRowForCol(int nCol, int& nMinRow, int& nMaxRow) const + { + // The implementation of this function is a little tougher than with the "col for row" version of + // this function, since the data is stored in row maps instead of column maps. + bool bReturnValue(false); + + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + + nMinRow = kSparseMatrixIntMax; + nMaxRow = kSparseMatrixIntMin; + + //First search for the min row. + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed the row. + + // Find the given column in this row. If present work on it. + typename CellMap::iterator it1(row.mCellRow.find(nCol)); + + if(it1 != row.mCellRow.end()) + { + nMinRow = row.mRow; + nMaxRow = row.mRow; + bReturnValue = true; + break; + } + } + + // Now search for a max row. + if(bReturnValue) // There can only be a max row if there was also a min row. + { + for(typename RowMap::reverse_iterator it(mRowMap.rbegin()); it != mRowMap.rend(); ++it) + { + row_type& row = (*it).second; + EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed the row. + + // Find the given column in this row. If present work on it. + typename CellMap::iterator it1(row.mCellRow.find(nCol)); + + if(it1 != row.mCellRow.end()) + { + nMaxRow = row.mRow; + break; + } + } + } + + return bReturnValue; + } + + + /////////////////////////////////////////////////////////////////////////////// + // GetColCountForRow + // + template + typename sparse_matrix::size_type + sparse_matrix::GetColCountForRow(int nRow) const + { + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + row_type* pRow; + + if(GetMatrixRow(nRow, pRow)) + return (size_type)pRow->mCellRow.size(); + return 0; + } + + + /////////////////////////////////////////////////////////////////////////////// + // GetMinUsedCol + // + template + int sparse_matrix::GetMinUsedCol(int& nResultRow) const + { + int nMinCol = kRowColIndexNone; + nResultRow = kRowColIndexNone; + + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed it. + + const cell_type& cell = (*row.mCellRow.begin()).second; + + if((cell.mCol < nMinCol) || (nMinCol == kRowColIndexNone)) + { + nMinCol = cell.mCol; + nResultRow = row.mRow; + } + } + + return nMinCol; + } + + + /////////////////////////////////////////////////////////////////////////////// + // GetMaxUsedCol + // + template + int sparse_matrix::GetMaxUsedCol(int& nResultRow) const + { + int nMaxCol = kRowColIndexNone; + nResultRow = kRowColIndexNone; + + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + EASTL_ASSERT(!row.mCellRow.empty()); // All rows should have at least one col, or we would have removed it. + + const cell_type& cell = (*row.mCellRow.rbegin()).second; + + if((cell.mCol > nMaxCol) || (nMaxCol == kRowColIndexNone)) + { + nMaxCol = cell.mCol; + nResultRow = row.mRow; + } + } + + return nMaxCol; + } + + + /////////////////////////////////////////////////////////////////////////////// + // GetRowCountForCol + // + template + typename sparse_matrix::size_type + sparse_matrix::GetRowCountForCol(int nCol) const + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + + size_type nRowCount = 0; + + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + EASTL_ASSERT(!row.mCellRow.empty()); + + //Faster set-based code: + typename CellMap::iterator it1(row.mCellRow.find(nCol)); + if(it1 != row.mCellRow.end()) + nRowCount++; + } + + return nRowCount; + } + + + /////////////////////////////////////////////////////////////////////////////// + // GetRowWithMaxColCount + // + template + int sparse_matrix::GetRowWithMaxColCount(size_type& nColCount) const + { + int nRow = 0; + nColCount = 0; + + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + const row_type& row = (*it).second; + const size_type nSize(row.mCellRow.size()); + EASTL_ASSERT(nSize != 0); + + if(nSize > (size_type)nColCount) + { + nRow = row.mRow; + nColCount = nSize; + } + } + return nRow; + } + + + /////////////////////////////////////////////////////////////////////////// + // GetCellCountForRange + // + template + typename sparse_matrix::size_type + sparse_matrix::GetCellCountForRange(int nRowStart, int nRowEnd, int nColStart, int nColEnd) const + { + size_type nCellCount(0); + + // Note by Paul P.: This could be made a little faster by doing a search + // for the first row and iterating the container from then on. + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + + if(row.mRow < nRowStart) + continue; + + if(row.mRow > nRowEnd) + break; + + for(typename CellMap::iterator it1(row.mCellRow.begin()); it1 != row.mCellRow.end(); ++it1) + { + const cell_type& cell = (*it1).second; + + if(cell.mCol < nColStart) + continue; + + if(cell.mCol > nColEnd) + break; + + nCellCount++; + } + } + + return nCellCount; + } + + + /////////////////////////////////////////////////////////////////////////////// + // GetCellRange + // + template + int sparse_matrix::GetCellRange(int nRowStart, int nRowEnd, + int nColStart, int nColEnd, UserCell* pCellArray) const + { + int nCellCount(0); + + // Note by Paul P.: This could be made a little faster by doing a search + // for the first row and iterating the container from then on. + + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + + if(row.mRow < nRowStart) + continue; + if(row.mRow > nRowEnd) + break; + + for(typename CellMap::iterator it1(row.mCellRow.begin()); it1 != row.mCellRow.end(); ++it1) + { + const cell_type& cell = (*it1).second; + + if(cell.mCol < nColStart) + continue; + + if(cell.mCol > nColEnd) + break; + + if(pCellArray) + { + pCellArray[nCellCount].mCol = cell.mCol; + pCellArray[nCellCount].mRow = row.mRow; + pCellArray[nCellCount].mValue = cell.mValue; + } + + nCellCount++; + } + } + + return nCellCount; + } + + + /////////////////////////////////////////////////////////////////////////////// + // remove + // + template + bool sparse_matrix::remove(int nRow, int nCol, T* pPreviousT) + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + //Faster map-based technique: + typename RowMap::iterator it(mRowMap.find(nRow)); + + if(it != mRowMap.end()) + { + row_type& row = (*it).second; + + typename CellMap::iterator it1(row.mCellRow.find(nCol)); + + if(it1 != row.mCellRow.end()) + { + cell_type& cell = (*it1).second; + + if(pPreviousT) + *pPreviousT = cell.mValue; + row.mCellRow.erase(it1); + mnSize--; + + if(row.mCellRow.empty()) // If the row is now empty and thus has no more columns... + mRowMap.erase(it); // Remove the row from the row map. + return true; + } + } + + return false; + } + + + /////////////////////////////////////////////////////////////////////////////// + // remove_row + // + template + bool sparse_matrix::remove_row(int nRow, size_type nCount) + { + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + // Faster map-based technique: + for(int i(nRow), iEnd(nRow + (int)nCount); i < iEnd; i++) + { + typename RowMap::iterator it(mRowMap.find(i)); + + if(it != mRowMap.end()) // If the row is present... + { + row_type& row = (*it).second; + + mnSize -= row.mCellRow.size(); + mRowMap.erase(it); + } + } + + return true; + } + + + /////////////////////////////////////////////////////////////////////////////// + // remove_col + // + template + bool sparse_matrix::remove_col(int nCol, size_type nCount) + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + + // Faster map-based version: + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ) // For each row... + { + row_type& row = (*it).second; + + for(int i(nCol), iEnd(nCol + (int)nCount); i < iEnd; i++) + { + typename CellMap::iterator it1(row.mCellRow.find(i)); + + if(it1 != row.mCellRow.end()) // If the col is present... + { + row.mCellRow.erase(it1); + mnSize--; + } + } + + if(row.mCellRow.empty()) + mRowMap.erase(it++); + else + ++it; + } + + return true; + } + + + template + inline bool sparse_matrix::clear() + { + mRowMap.clear(); // Clear out the map of maps. + mnSize = 0; + return true; + } + + + template + void sparse_matrix::insert(int nRow, int nCol, const T& t, T* pPreviousT) + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + typename RowMap::iterator it(mRowMap.find(nRow)); + + if(it != mRowMap.end()) // If the row is already present... + { + row_type& row = (*it).second; + + typename CellMap::iterator it1(row.mCellRow.find(nCol)); + + if(it1 != row.mCellRow.end()) // If the col is already present... + { + cell_type& cell = (*it1).second; + + if(pPreviousT) + *pPreviousT = cell.mValue; + cell.mValue = t; + // Note that we leave 'mnSize' as is. + } + else + { + const typename CellMap::value_type insertionPair(nCol, cell_type(nCol, t)); + row.mCellRow.insert(insertionPair); + mnSize++; + } + } + else // Else the row doesn't exist (and the column in that row doesn't exist either). + { + const typename RowMap::value_type insertionPair(nRow, row_type(nRow)); + + eastl::pair insertionResult = mRowMap.insert(insertionPair); + row_type& row = (*insertionResult.first).second; + + EASTL_ASSERT(row.mRow == nRow); // Make sure we are now on the row we just inserted. + const typename CellMap::value_type insertionPair1(nCol, cell_type(nCol, t)); + row.mCellRow.insert(insertionPair1); // Now add the new cell to the new row. + mnSize++; + } + } + + + template + bool sparse_matrix::IsCellUsed(int nRow, int nCol) + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + typename RowMap::iterator it(mRowMap.find(nRow)); + + if(it != mRowMap.end()) + { + row_type& row = (*it).second; + + typename CellMap::iterator it1(row.mCellRow.find(nCol)); + if(it1 != row.mCellRow.end()) + return true; + } + + return false; + } + + + template + bool sparse_matrix::GetCell(int nRow, int nCol, T* pT) + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + + row_type* pRow; + cell_type* pCell; + + if(GetMatrixRow(nRow, pRow)) + { + if(pRow->GetMatrixCol(nCol, pCell)) + { + if(pT) + *pT = pCell->mValue; + return true; + } + } + + return false; + } + + + template + bool sparse_matrix::GetCellPtr(int nRow, int nCol, T** pT) + { + EASTL_ASSERT((nCol < kSparseMatrixIntMax / 2) && (nCol > kSparseMatrixIntMin / 2)); + + row_type* pRow; + cell_type* pCell; + + if(GetMatrixRow(nRow, pRow)) + { + if(pRow->GetMatrixCol(nCol, pCell)) + { + if(pT) + *pT = &pCell->mValue; + return true; + } + } + + return false; + } + + + template + bool sparse_matrix::GetMatrixRow(int nRow, row_type*& pRow) + { + EASTL_ASSERT((nRow < kSparseMatrixIntMax / 2) && (nRow > kSparseMatrixIntMin / 2)); + + typename RowMap::iterator it(mRowMap.find(nRow)); + + if(it != mRowMap.end()) + { + row_type& row = (*it).second; + pRow = &row; + return true; + } + + return false; + } + + + /////////////////////////////////////////////////////////////////////////////// + // FindCell + // + // Searches all cells for a match for input data 't'. Writes the cell data into + // the user celldata array. Call with a NULL pCellArray to get the count. + // + // This is a simple search function. Many real-world applications would need a + // slightly more flexible search function or mechanism. + // + template + int sparse_matrix::FindCell(const T& t, UserCell* pCellArray) + { + int nCount(0); + + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + + for(typename CellMap::iterator it1(row.mCellRow.begin()); it1 != row.mCellRow.end(); ++it1) + { + cell_type& cell = (*it1).second; + + if(cell.mValue == t) + { + if(pCellArray) + { + UserCell& cell = pCellArray[nCount]; + + cell.mCol = cell.mCol; + cell.mRow = row.mRow; + cell.mValue = t; + } + nCount++; + } + } + } + + return nCount; + } + + + /////////////////////////////////////////////////////////////////////////////// + // validate + // + template + bool sparse_matrix::validate() + { + int nPreviousCol; + int nPreviousRow = kRowColIndexNone; + size_type nActualTotalCells = 0; + + for(typename RowMap::iterator it(mRowMap.begin()); it != mRowMap.end(); ++it) + { + row_type& row = (*it).second; + + if(row.mCellRow.empty()) + { + // EASTL_TRACE("sparse_matrix::validate(): Error: Empty Cell Row %d.\n", row.mRow); + return false; + } + + nPreviousCol = kRowColIndexNone; + + for(typename CellMap::iterator it1(row.mCellRow.begin()); it1 != row.mCellRow.end(); ++it1) + { + cell_type& cell = (*it1).second; + + if(cell.mCol <= nPreviousCol) + { + // EASTL_TRACE("sparse_matrix::validate(): Error: Columns out of order in row, col: %d, %d.\n", row.mRow, cell.mCol); + return false; + } + + nPreviousCol = cell.mCol; + nActualTotalCells++; + } + + if(row.mRow <= nPreviousRow) + { + // EASTL_TRACE("sparse_matrix::validate(): Error: Rows out of order at row: %d.\n", row.mRow); + return false; + } + + nPreviousRow = row.mRow; + } + + if(mnSize != nActualTotalCells) + { + // EASTL_TRACE("sparse_matrix::validate(): Error: 'mnSize' != counted cells %d != %d\n", mnSize, nActualTotalCells); + return false; + } + + return true; + } + + + template + int sparse_matrix::validate_iterator(const_iterator i) const + { + // To do: Complete this. The value below is a potential false positive. + return (isf_valid | isf_current | isf_can_dereference); + } + + + + + /////////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////////// + + template + bool operator==(sparse_matrix& a, sparse_matrix& b) + { + return (a.mRowMap == b.mRowMap); + } + + template + bool operator<(sparse_matrix& a, sparse_matrix& b) + { + return (a.mRowMap < b.mRowMap); + } + + template + bool operator!=(sparse_matrix& a, sparse_matrix& b) + { + return !(a.mRowMap == b.mRowMap); + } + + template + bool operator>(sparse_matrix& a, sparse_matrix& b) + { + return (b.mRowMap < a.mRowMap); + } + + template + bool operator<=(sparse_matrix& a, sparse_matrix& b) + { + return !(b.mRowMap < a.mRowMap); + } + + template + bool operator>=(sparse_matrix& a, sparse_matrix& b) + { + return !(a.mRowMap < b.mRowMap); + } + + template + void swap(sparse_matrix& a, sparse_matrix& b) + { + a.swap(b); + } + + + +} // namespace eastl + +#endif + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/include/EASTL/bonus/tuple_vector.h b/include/EASTL/bonus/tuple_vector.h new file mode 100644 index 0000000..e55fb6a --- /dev/null +++ b/include/EASTL/bonus/tuple_vector.h @@ -0,0 +1,1592 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// tuple_vector is a data container that is designed to abstract and simplify +// the handling of a "structure of arrays" layout of data in memory. In +// particular, it mimics the interface of vector, including functionality to do +// inserts, erases, push_backs, and random-access. It also provides a +// RandomAccessIterator and corresponding functionality, making it compatible +// with most STL (and STL-esque) algorithms such as ranged-for loops, find_if, +// remove_if, or sort. + +// When used or applied properly, this container can improve performance of +// some algorithms through cache-coherent data accesses or allowing for +// sensible SIMD programming, while keeping the structure of a single +// container, to permit a developer to continue to use existing algorithms in +// STL and the like. +// +// Consult doc/Bonus/tuple_vector_readme.md for more information. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_TUPLEVECTOR_H +#define EASTL_TUPLEVECTOR_H + +#include +#include +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + +EA_DISABLE_VC_WARNING(4244) // warning C4244: 'conversion from '___' to '___', possible loss of data +EA_DISABLE_VC_WARNING(4623) // warning C4623: default constructor was implicitly defined as deleted +EA_DISABLE_VC_WARNING(4625) // warning C4625: copy constructor was implicitly defined as deleted +EA_DISABLE_VC_WARNING(4510) // warning C4510: default constructor could not be generated + +namespace eastl +{ + /// EASTL_TUPLE_VECTOR_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_TUPLE_VECTOR_DEFAULT_NAME + #define EASTL_TUPLE_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " tuple-vector" // Unless the user overrides something, this is "EASTL tuple-vector". + #endif + + + /// EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR + #define EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_TUPLE_VECTOR_DEFAULT_NAME) + #endif + +namespace TupleVecInternal +{ + +// forward declarations +template +struct tuplevec_element; + +template +using tuplevec_element_t = typename tuplevec_element::type; + +template +struct TupleTypes {}; + +template +class TupleVecImpl; + +template +struct TupleRecurser; + +template +struct TupleIndexRecurser; + +template +struct TupleVecLeaf; + +template +struct TupleVecIter; + +// tuplevec_element helper to be able to isolate a type given an index +template +struct tuplevec_element +{ + static_assert(I != I, "tuplevec_element index out of range"); +}; + +template +struct tuplevec_element<0, T, Ts...> +{ + tuplevec_element() = delete; // tuplevec_element should only be used for compile-time assistance, and never be instantiated + typedef T type; +}; + +template +struct tuplevec_element +{ + typedef tuplevec_element_t type; +}; + +// attempt to isolate index given a type +template +struct tuplevec_index +{ +}; + +template +struct tuplevec_index> +{ + typedef void DuplicateTypeCheck; + tuplevec_index() = delete; // tuplevec_index should only be used for compile-time assistance, and never be instantiated + static const eastl_size_t index = 0; +}; + +template +struct tuplevec_index> +{ + typedef int DuplicateTypeCheck; + static_assert(is_void>::DuplicateTypeCheck>::value, "duplicate type T in tuple_vector::get(); unique types must be provided in declaration, or only use get()"); + + static const eastl_size_t index = 0; +}; + +template +struct tuplevec_index> +{ + typedef typename tuplevec_index>::DuplicateTypeCheck DuplicateTypeCheck; + static const eastl_size_t index = tuplevec_index>::index + 1; +}; + +template +struct tuplevec_index> : public tuplevec_index> +{ +}; + + +// helper to calculate the layout of the allocations for the tuple of types (esp. to take alignment into account) +template <> +struct TupleRecurser<> +{ + typedef eastl_size_t size_type; + + // This class should never be instantiated. This is just a helper for working with static functions when anonymous functions don't work + // and provide some other utilities + TupleRecurser() = delete; + + static EA_CONSTEXPR size_type GetTotalAlignment() + { + return 0; + } + + static EA_CONSTEXPR size_type GetTotalAllocationSize(size_type capacity, size_type offset) + { + EA_UNUSED(capacity); + return offset; + } + + template + static pair DoAllocate(TupleVecImpl &vec, void** ppNewLeaf, size_type capacity, size_type offset) + { + EA_UNUSED(ppNewLeaf); + + // If n is zero, then we allocate no memory and just return NULL. + // This is fine, as our default ctor initializes with NULL pointers. + size_type alignment = TupleRecurser::GetTotalAlignment(); + void* ptr = capacity ? allocate_memory(vec.get_allocator(), offset, alignment, 0) : nullptr; + + #if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY((size_t)ptr & (alignment - 1)) != 0) + { + EASTL_FAIL_MSG("tuple_vector::DoAllocate -- memory not alignment at requested alignment"); + } + #endif + + return make_pair(ptr, offset); + } + + template + static void SetNewData(TupleVecImplType &vec, void* pData, size_type capacity, size_type offset) + { + EA_UNUSED(vec); + EA_UNUSED(pData); + EA_UNUSED(capacity); + EA_UNUSED(offset); + } +}; + +template +struct TupleRecurser : TupleRecurser +{ + typedef eastl_size_t size_type; + + static EA_CONSTEXPR size_type GetTotalAlignment() + { + return max(static_cast(alignof(T)), TupleRecurser::GetTotalAlignment()); + } + + static EA_CONSTEXPR size_type GetTotalAllocationSize(size_type capacity, size_type offset) + { + return TupleRecurser::GetTotalAllocationSize(capacity, CalculateAllocationSize(offset, capacity)); + } + + template + static pair DoAllocate(TupleVecImpl &vec, void** ppNewLeaf, size_type capacity, size_type offset) + { + size_type allocationOffset = CalculatAllocationOffset(offset); + size_type allocationSize = CalculateAllocationSize(offset, capacity); + pair allocation = TupleRecurser::template DoAllocate( + vec, ppNewLeaf, capacity, allocationSize); + ppNewLeaf[I] = (void*)((uintptr_t)(allocation.first) + allocationOffset); + return allocation; + } + + template + static void SetNewData(TupleVecImplType &vec, void* pData, size_type capacity, size_type offset) + { + size_type allocationOffset = CalculatAllocationOffset(offset); + size_type allocationSize = CalculateAllocationSize(offset, capacity); + vec.TupleVecLeaf::mpData = (T*)((uintptr_t)pData + allocationOffset); + TupleRecurser::template SetNewData(vec, pData, capacity, allocationSize); + } + +private: + static EA_CONSTEXPR size_type CalculateAllocationSize(size_type offset, size_type capacity) + { + return CalculatAllocationOffset(offset) + sizeof(T) * capacity; + } + + static EA_CONSTEXPR size_type CalculatAllocationOffset(size_type offset) { return (offset + alignof(T) - 1) & (~alignof(T) + 1); } +}; + +template +struct TupleVecLeaf +{ + typedef eastl_size_t size_type; + + void DoUninitializedMoveAndDestruct(const size_type begin, const size_type end, T* pDest) + { + T* pBegin = mpData + begin; + T* pEnd = mpData + end; + eastl::uninitialized_move_ptr_if_noexcept(pBegin, pEnd, pDest); + eastl::destruct(pBegin, pEnd); + } + + void DoInsertAndFill(size_type pos, size_type n, size_type numElements, const T& arg) + { + T* pDest = mpData + pos; + T* pDataEnd = mpData + numElements; + const T temp = arg; + const size_type nExtra = (numElements - pos); + if (n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)... + { + eastl::uninitialized_move_ptr(pDataEnd - n, pDataEnd, pDataEnd); + eastl::move_backward(pDest, pDataEnd - n, pDataEnd); // We need move_backward because of potential overlap issues. + eastl::fill(pDest, pDest + n, temp); + } + else + { + eastl::uninitialized_fill_n_ptr(pDataEnd, n - nExtra, temp); + eastl::uninitialized_move_ptr(pDest, pDataEnd, pDataEnd + n - nExtra); + eastl::fill(pDest, pDataEnd, temp); + } + } + + void DoInsertRange(T* pSrcBegin, T* pSrcEnd, T* pDestBegin, size_type numDataElements) + { + size_type pos = pDestBegin - mpData; + size_type n = pSrcEnd - pSrcBegin; + T* pDataEnd = mpData + numDataElements; + const size_type nExtra = numDataElements - pos; + if (n < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)... + { + eastl::uninitialized_move_ptr(pDataEnd - n, pDataEnd, pDataEnd); + eastl::move_backward(pDestBegin, pDataEnd - n, pDataEnd); // We need move_backward because of potential overlap issues. + eastl::copy(pSrcBegin, pSrcEnd, pDestBegin); + } + else + { + eastl::uninitialized_copy(pSrcEnd - (n - nExtra), pSrcEnd, pDataEnd); + eastl::uninitialized_move_ptr(pDestBegin, pDataEnd, pDataEnd + n - nExtra); + eastl::copy(pSrcBegin, pSrcEnd - (n - nExtra), pDestBegin); + } + } + + void DoInsertValue(size_type pos, size_type numElements, T&& arg) + { + T* pDest = mpData + pos; + T* pDataEnd = mpData + numElements; + + eastl::uninitialized_move_ptr(pDataEnd - 1, pDataEnd, pDataEnd); + eastl::move_backward(pDest, pDataEnd - 1, pDataEnd); // We need move_backward because of potential overlap issues. + eastl::destruct(pDest); + ::new (pDest) T(eastl::forward(arg)); + } + + T* mpData = nullptr; +}; + +// swallow allows for parameter pack expansion of arguments as means of expanding operations performed +// if a void function is used for operation expansion, it should be wrapped in (..., 0) so that the compiler +// thinks it has a parameter to pass into the function +template +void swallow(Ts&&...) { } + +inline bool variadicAnd(bool cond) { return cond; } + +inline bool variadicAnd(bool cond, bool conds...) { return cond && variadicAnd(conds); } + +// Helper struct to check for strict compatibility between two iterators, whilst still allowing for +// conversion between TupleVecImpl::iterator and TupleVecImpl::const_iterator. +template +struct TupleVecIterCompatibleImpl : public false_type { }; + +template<> +struct TupleVecIterCompatibleImpl, TupleTypes<>> : public true_type { }; + +template +struct TupleVecIterCompatibleImpl, TupleTypes> : public integral_constant, TupleTypes>::value && + is_same::type, typename remove_const::type>::value > +{ }; + +template +struct TupleVecIterCompatible; + +template +struct TupleVecIterCompatible, TupleTypes> : + public TupleVecIterCompatibleImpl, TupleTypes> +{ }; + +// The Iterator operates by storing a persistent index internally, +// and resolving the tuple of pointers to the various parts of the original tupleVec when dereferenced. +// While resolving the tuple is a non-zero operation, it consistently generated better code than the alternative of +// storing - and harmoniously updating on each modification - a full tuple of pointers to the tupleVec's data +template +struct TupleVecIter, Ts...> + : public iterator, eastl_size_t, tuple, tuple> +{ +private: + typedef TupleVecIter, Ts...> this_type; + typedef eastl_size_t size_type; + + typedef iterator, eastl_size_t, tuple, tuple> iter_type; + + template + friend struct TupleVecIter; + + template + friend class TupleVecImpl; + + template + friend class move_iterator; +public: + typedef typename iter_type::iterator_category iterator_category; + typedef typename iter_type::value_type value_type; + typedef typename iter_type::difference_type difference_type; + typedef typename iter_type::pointer pointer; + typedef typename iter_type::reference reference; + + TupleVecIter() = default; + + template + TupleVecIter(VecImplType* tupleVec, size_type index) + : mIndex(index) + , mpData{(void*)tupleVec->TupleVecLeaf::mpData...} + { } + + template , TupleTypes>::value, bool>::type> + TupleVecIter(const TupleVecIter& other) + : mIndex(other.mIndex) + , mpData{other.mpData[Indices]...} + { + } + + bool operator==(const TupleVecIter& other) const { return mIndex == other.mIndex && mpData[0] == other.mpData[0]; } + bool operator!=(const TupleVecIter& other) const { return mIndex != other.mIndex || mpData[0] != other.mpData[0]; } + reference operator*() const { return MakeReference(); } + + this_type& operator++() { ++mIndex; return *this; } + this_type operator++(int) + { + this_type temp = *this; + ++mIndex; + return temp; + } + + this_type& operator--() { --mIndex; return *this; } + this_type operator--(int) + { + this_type temp = *this; + --mIndex; + return temp; + } + + this_type& operator+=(difference_type n) { mIndex += n; return *this; } + this_type operator+(difference_type n) const + { + this_type temp = *this; + return temp += n; + } + friend this_type operator+(difference_type n, const this_type& rhs) + { + this_type temp = rhs; + return temp += n; + } + + this_type& operator-=(difference_type n) { mIndex -= n; return *this; } + this_type operator-(difference_type n) const + { + this_type temp = *this; + return temp -= n; + } + friend this_type operator-(difference_type n, const this_type& rhs) + { + this_type temp = rhs; + return temp -= n; + } + + difference_type operator-(const this_type& rhs) const { return mIndex - rhs.mIndex; } + bool operator<(const this_type& rhs) const { return mIndex < rhs.mIndex; } + bool operator>(const this_type& rhs) const { return mIndex > rhs.mIndex; } + bool operator>=(const this_type& rhs) const { return mIndex >= rhs.mIndex; } + bool operator<=(const this_type& rhs) const { return mIndex <= rhs.mIndex; } + + reference operator[](const size_type n) const + { + return *(*this + n); + } + +private: + + value_type MakeValue() const + { + return value_type(((Ts*)mpData[Indices])[mIndex]...); + } + + reference MakeReference() const + { + return reference(((Ts*)mpData[Indices])[mIndex]...); + } + + pointer MakePointer() const + { + return pointer(&((Ts*)mpData[Indices])[mIndex]...); + } + + size_type mIndex = 0; + const void* mpData[sizeof...(Ts)]; +}; + +// TupleVecImpl +template +class TupleVecImpl, Ts...> : public TupleVecLeaf... +{ + typedef Allocator allocator_type; + typedef index_sequence index_sequence_type; + typedef TupleVecImpl this_type; + typedef TupleVecImpl const_this_type; + +public: + typedef TupleVecInternal::TupleVecIter iterator; + typedef TupleVecInternal::TupleVecIter const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef eastl_size_t size_type; + typedef eastl::tuple value_tuple; + typedef eastl::tuple reference_tuple; + typedef eastl::tuple const_reference_tuple; + typedef eastl::tuple ptr_tuple; + typedef eastl::tuple const_ptr_tuple; + typedef eastl::tuple rvalue_tuple; + + TupleVecImpl() + : mDataSizeAndAllocator(0, EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + {} + + TupleVecImpl(const allocator_type& allocator) + : mDataSizeAndAllocator(0, allocator) + {} + + TupleVecImpl(this_type&& x) + : mDataSizeAndAllocator(0, eastl::move(x.get_allocator())) + { + swap(x); + } + + TupleVecImpl(this_type&& x, const Allocator& allocator) + : mDataSizeAndAllocator(0, allocator) + { + if (get_allocator() == x.get_allocator()) // If allocators are equivalent, then we can safely swap member-by-member + { + swap(x); + } + else + { + this_type temp(eastl::move(*this)); + temp.swap(x); + } + } + + TupleVecImpl(const this_type& x) + : mDataSizeAndAllocator(0, x.get_allocator()) + { + DoInitFromIterator(x.begin(), x.end()); + } + + template + TupleVecImpl(const TupleVecImpl& x, const Allocator& allocator) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFromIterator(x.begin(), x.end()); + } + + template + TupleVecImpl(move_iterator begin, move_iterator end, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFromIterator(begin, end); + } + + TupleVecImpl(const_iterator begin, const_iterator end, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator ) + { + DoInitFromIterator(begin, end); + } + + TupleVecImpl(size_type n, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitDefaultFill(n); + } + + TupleVecImpl(size_type n, const Ts&... args) + : mDataSizeAndAllocator(0, EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + { + DoInitFillArgs(n, args...); + } + + TupleVecImpl(size_type n, const Ts&... args, const allocator_type& allocator) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFillArgs(n, args...); + } + + TupleVecImpl(size_type n, const_reference_tuple tup, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFillTuple(n, tup); + } + + TupleVecImpl(const value_tuple* first, const value_tuple* last, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFromTupleArray(first, last); + } + + TupleVecImpl(std::initializer_list iList, const allocator_type& allocator = EASTL_TUPLE_VECTOR_DEFAULT_ALLOCATOR) + : mDataSizeAndAllocator(0, allocator) + { + DoInitFromTupleArray(iList.begin(), iList.end()); + } + +protected: + // ctor to provide a pre-allocated field of data that the container will own, specifically for fixed_tuple_vector + TupleVecImpl(const allocator_type& allocator, void* pData, size_type capacity, size_type dataSize) + : mpData(pData), mNumCapacity(capacity), mDataSizeAndAllocator(dataSize, allocator) + { + TupleRecurser::template SetNewData(*this, mpData, mNumCapacity, 0); + } + +public: + ~TupleVecImpl() + { + swallow((eastl::destruct(TupleVecLeaf::mpData, TupleVecLeaf::mpData + mNumElements), 0)...); + if (mpData) + EASTLFree(get_allocator(), mpData, internalDataSize()); + } + + void assign(size_type n, const Ts&... args) + { + if (n > mNumCapacity) + { + this_type temp(n, args..., get_allocator()); // We have little choice but to reallocate with new memory. + swap(temp); + } + else if (n > mNumElements) // If n > mNumElements ... + { + size_type oldNumElements = mNumElements; + swallow((eastl::fill(TupleVecLeaf::mpData, TupleVecLeaf::mpData + oldNumElements, args), 0)...); + swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + n, args), 0)...); + mNumElements = n; + } + else // else 0 <= n <= mNumElements + { + swallow((eastl::fill(TupleVecLeaf::mpData, TupleVecLeaf::mpData + n, args), 0)...); + erase(begin() + n, end()); + } + } + + void assign(const_iterator first, const_iterator last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(!validate_iterator_pair(first, last))) + EASTL_FAIL_MSG("tuple_vector::assign -- invalid iterator pair"); +#endif + size_type newNumElements = last - first; + if (newNumElements > mNumCapacity) + { + this_type temp(first, last, get_allocator()); + swap(temp); + } + else + { + const void* ppOtherData[sizeof...(Ts)] = {first.mpData[Indices]...}; + size_type firstIdx = first.mIndex; + size_type lastIdx = last.mIndex; + if (newNumElements > mNumElements) // If n > mNumElements ... + { + size_type oldNumElements = mNumElements; + swallow((eastl::copy((Ts*)(ppOtherData[Indices]) + firstIdx, + (Ts*)(ppOtherData[Indices]) + firstIdx + oldNumElements, + TupleVecLeaf::mpData), 0)...); + swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx + oldNumElements, + (Ts*)(ppOtherData[Indices]) + lastIdx, + TupleVecLeaf::mpData + oldNumElements), 0)...); + mNumElements = newNumElements; + } + else // else 0 <= n <= mNumElements + { + swallow((eastl::copy((Ts*)(ppOtherData[Indices]) + firstIdx, (Ts*)(ppOtherData[Indices]) + lastIdx, + TupleVecLeaf::mpData), 0)...); + erase(begin() + newNumElements, end()); + } + } + } + + void assign(const value_tuple* first, const value_tuple* last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr)) + EASTL_FAIL_MSG("tuple_vector::assign from tuple array -- invalid ptrs"); +#endif + size_type newNumElements = last - first; + if (newNumElements > mNumCapacity) + { + this_type temp(first, last, get_allocator()); + swap(temp); + } + else + { + if (newNumElements > mNumElements) // If n > mNumElements ... + { + size_type oldNumElements = mNumElements; + + DoCopyFromTupleArray(begin(), begin() + oldNumElements, first); + DoUninitializedCopyFromTupleArray(begin() + oldNumElements, begin() + newNumElements, first); + mNumElements = newNumElements; + } + else // else 0 <= n <= mNumElements + { + DoCopyFromTupleArray(begin(), begin() + newNumElements, first); + erase(begin() + newNumElements, end()); + } + } + } + + reference_tuple push_back() + { + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + DoGrow(oldNumElements, oldNumCapacity, newNumElements); + swallow(::new(TupleVecLeaf::mpData + oldNumElements) Ts()...); + return back(); + } + + void push_back(const Ts&... args) + { + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + DoGrow(oldNumElements, oldNumCapacity, newNumElements); + swallow(::new(TupleVecLeaf::mpData + oldNumElements) Ts(args)...); + } + + void push_back_uninitialized() + { + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + DoGrow(oldNumElements, oldNumCapacity, newNumElements); + } + + reference_tuple emplace_back(Ts&&... args) + { + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + DoGrow(oldNumElements, oldNumCapacity, newNumElements); + swallow(::new(TupleVecLeaf::mpData + oldNumElements) Ts(eastl::forward(args))...); + return back(); + } + + iterator emplace(const_iterator pos, Ts&&... args) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::emplace -- invalid iterator"); +#endif + size_type firstIdx = pos - cbegin(); + size_type oldNumElements = mNumElements; + size_type newNumElements = mNumElements + 1; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + if (newNumElements > oldNumCapacity || firstIdx != oldNumElements) + { + if (newNumElements > oldNumCapacity) + { + const size_type newCapacity = max(GetNewCapacity(oldNumCapacity), newNumElements); + + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, newCapacity, 0); + + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + 0, firstIdx, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + firstIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + firstIdx + 1), 0)...); + swallow(::new ((Ts*)ppNewLeaf[Indices] + firstIdx) Ts(eastl::forward(args))...); + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = newCapacity; + internalDataSize() = allocation.second; + } + else + { + swallow((TupleVecLeaf::DoInsertValue(firstIdx, oldNumElements, eastl::forward(args)), 0)...); + } + } + else + { + swallow(::new (TupleVecLeaf::mpData + oldNumElements) Ts(eastl::forward(args))...); + } + return begin() + firstIdx; + } + + iterator insert(const_iterator pos, size_type n, const Ts&... args) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator"); +#endif + size_type firstIdx = pos - cbegin(); + size_type lastIdx = firstIdx + n; + size_type oldNumElements = mNumElements; + size_type newNumElements = mNumElements + n; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + if (newNumElements > oldNumCapacity || firstIdx != oldNumElements) + { + if (newNumElements > oldNumCapacity) + { + const size_type newCapacity = max(GetNewCapacity(oldNumCapacity), newNumElements); + + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, newCapacity, 0); + + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + 0, firstIdx, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + firstIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + lastIdx), 0)...); + swallow((eastl::uninitialized_fill_ptr((Ts*)ppNewLeaf[Indices] + firstIdx, (Ts*)ppNewLeaf[Indices] + lastIdx, args), 0)...); + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = newCapacity; + internalDataSize() = allocation.second; + } + else + { + swallow((TupleVecLeaf::DoInsertAndFill(firstIdx, n, oldNumElements, args), 0)...); + } + } + else + { + swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + newNumElements, args), 0)...); + } + return begin() + firstIdx; + } + + iterator insert(const_iterator pos, const_iterator first, const_iterator last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator"); + if (EASTL_UNLIKELY(!validate_iterator_pair(first, last))) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator pair"); +#endif + size_type posIdx = pos - cbegin(); + size_type firstIdx = first.mIndex; + size_type lastIdx = last.mIndex; + size_type numToInsert = last - first; + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + numToInsert; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + const void* ppOtherData[sizeof...(Ts)] = {first.mpData[Indices]...}; + if (newNumElements > oldNumCapacity || posIdx != oldNumElements) + { + if (newNumElements > oldNumCapacity) + { + const size_type newCapacity = max(GetNewCapacity(oldNumCapacity), newNumElements); + + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, newCapacity, 0); + + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + 0, posIdx, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + posIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + posIdx + numToInsert), 0)...); + swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx, + (Ts*)(ppOtherData[Indices]) + lastIdx, + (Ts*)ppNewLeaf[Indices] + posIdx), 0)...); + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = newCapacity; + internalDataSize() = allocation.second; + } + else + { + swallow((TupleVecLeaf::DoInsertRange( + (Ts*)(ppOtherData[Indices]) + firstIdx, (Ts*)(ppOtherData[Indices]) + lastIdx, + TupleVecLeaf::mpData + posIdx, oldNumElements), 0)...); + } + } + else + { + swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + firstIdx, + (Ts*)(ppOtherData[Indices]) + lastIdx, + TupleVecLeaf::mpData + posIdx), 0)...); + } + return begin() + posIdx; + } + + iterator insert(const_iterator pos, const value_tuple* first, const value_tuple* last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid iterator"); + if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr)) + EASTL_FAIL_MSG("tuple_vector::insert -- invalid source pointers"); +#endif + size_type posIdx = pos - cbegin(); + size_type numToInsert = last - first; + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements + numToInsert; + size_type oldNumCapacity = mNumCapacity; + mNumElements = newNumElements; + if (newNumElements > oldNumCapacity || posIdx != oldNumElements) + { + if (newNumElements > oldNumCapacity) + { + const size_type newCapacity = max(GetNewCapacity(oldNumCapacity), newNumElements); + + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, newCapacity, 0); + + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + 0, posIdx, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct( + posIdx, oldNumElements, (Ts*)ppNewLeaf[Indices] + posIdx + numToInsert), 0)...); + + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + // Do this after mpData is updated so that we can use new iterators + DoUninitializedCopyFromTupleArray(begin() + posIdx, begin() + posIdx + numToInsert, first); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = newCapacity; + internalDataSize() = allocation.second; + } + else + { + const size_type nExtra = oldNumElements - posIdx; + void* ppDataEnd[sizeof...(Ts)] = { (void*)(TupleVecLeaf::mpData + oldNumElements)... }; + void* ppDataBegin[sizeof...(Ts)] = { (void*)(TupleVecLeaf::mpData + posIdx)... }; + if (numToInsert < nExtra) // If the inserted values are entirely within initialized memory (i.e. are before mpEnd)... + { + swallow((eastl::uninitialized_move_ptr((Ts*)ppDataEnd[Indices] - numToInsert, + (Ts*)ppDataEnd[Indices], (Ts*)ppDataEnd[Indices]), 0)...); + // We need move_backward because of potential overlap issues. + swallow((eastl::move_backward((Ts*)ppDataBegin[Indices], + (Ts*)ppDataEnd[Indices] - numToInsert, (Ts*)ppDataEnd[Indices]), 0)...); + + DoCopyFromTupleArray(pos, pos + numToInsert, first); + } + else + { + size_type numToInitialize = numToInsert - nExtra; + swallow((eastl::uninitialized_move_ptr((Ts*)ppDataBegin[Indices], + (Ts*)ppDataEnd[Indices], (Ts*)ppDataEnd[Indices] + numToInitialize), 0)...); + + DoCopyFromTupleArray(pos, begin() + oldNumElements, first); + DoUninitializedCopyFromTupleArray(begin() + oldNumElements, pos + numToInsert, first + nExtra); + } + } + } + else + { + DoUninitializedCopyFromTupleArray(pos, pos + numToInsert, first); + } + return begin() + posIdx; + } + + iterator erase(const_iterator first, const_iterator last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(first) == isf_none || validate_iterator(last) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator"); + if (EASTL_UNLIKELY(!validate_iterator_pair(first, last))) + EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair"); +#endif + if (first != last) + { + size_type firstIdx = first - cbegin(); + size_type lastIdx = last - cbegin(); + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements - (lastIdx - firstIdx); + mNumElements = newNumElements; + swallow((eastl::move(TupleVecLeaf::mpData + lastIdx, + TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + firstIdx), 0)...); + swallow((eastl::destruct(TupleVecLeaf::mpData + newNumElements, + TupleVecLeaf::mpData + oldNumElements), 0)...); + } + return begin() + first.mIndex; + } + + iterator erase_unsorted(const_iterator pos) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(validate_iterator(pos) == isf_none)) + EASTL_FAIL_MSG("tuple_vector::erase_unsorted -- invalid iterator"); +#endif + size_type oldNumElements = mNumElements; + size_type newNumElements = oldNumElements - 1; + mNumElements = newNumElements; + swallow((eastl::move(TupleVecLeaf::mpData + newNumElements, + TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + (pos - begin())), 0)...); + swallow((eastl::destruct(TupleVecLeaf::mpData + newNumElements, + TupleVecLeaf::mpData + oldNumElements), 0)...); + return begin() + pos.mIndex; + } + + void resize(size_type n) + { + size_type oldNumElements = mNumElements; + size_type oldNumCapacity = mNumCapacity; + mNumElements = n; + if (n > oldNumElements) + { + if (n > oldNumCapacity) + { + DoReallocate(oldNumElements, eastl::max(GetNewCapacity(oldNumCapacity), n)); + } + swallow((eastl::uninitialized_default_fill_n(TupleVecLeaf::mpData + oldNumElements, n - oldNumElements), 0)...); + } + else + { + swallow((eastl::destruct(TupleVecLeaf::mpData + n, + TupleVecLeaf::mpData + oldNumElements), 0)...); + } + } + + void resize(size_type n, const Ts&... args) + { + size_type oldNumElements = mNumElements; + size_type oldNumCapacity = mNumCapacity; + mNumElements = n; + if (n > oldNumElements) + { + if (n > oldNumCapacity) + { + DoReallocate(oldNumElements, eastl::max(GetNewCapacity(oldNumCapacity), n)); + } + swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf::mpData + oldNumElements, + TupleVecLeaf::mpData + n, args), 0)...); + } + else + { + swallow((eastl::destruct(TupleVecLeaf::mpData + n, + TupleVecLeaf::mpData + oldNumElements), 0)...); + } + } + + void reserve(size_type n) + { + DoConditionalReallocate(mNumElements, mNumCapacity, n); + } + + void shrink_to_fit() + { + this_type temp(move_iterator(begin()), move_iterator(end()), get_allocator()); + swap(temp); + } + + void clear() EA_NOEXCEPT + { + size_type oldNumElements = mNumElements; + mNumElements = 0; + swallow((eastl::destruct(TupleVecLeaf::mpData, TupleVecLeaf::mpData + oldNumElements), 0)...); + } + + void pop_back() + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements <= 0)) + EASTL_FAIL_MSG("tuple_vector::pop_back -- container is empty"); +#endif + size_type oldNumElements = mNumElements--; + swallow((eastl::destruct(TupleVecLeaf::mpData + oldNumElements - 1, + TupleVecLeaf::mpData + oldNumElements), 0)...); + } + + void swap(this_type& x) + { + swallow((eastl::swap(TupleVecLeaf::mpData, x.TupleVecLeaf::mpData), 0)...); + eastl::swap(mpData, x.mpData); + eastl::swap(mNumElements, x.mNumElements); + eastl::swap(mNumCapacity, x.mNumCapacity); + eastl::swap(get_allocator(), x.get_allocator()); + eastl::swap(internalDataSize(), x.internalDataSize()); + } + + void assign(size_type n, const_reference_tuple tup) { assign(n, eastl::get(tup)...); } + void assign(std::initializer_list iList) { assign(iList.begin(), iList.end()); } + + void push_back(Ts&&... args) { emplace_back(eastl::forward(args)...); } + void push_back(const_reference_tuple tup) { push_back(eastl::get(tup)...); } + void push_back(rvalue_tuple tup) { emplace_back(eastl::forward(eastl::get(tup))...); } + + void emplace_back(rvalue_tuple tup) { emplace_back(eastl::forward(eastl::get(tup))...); } + void emplace(const_iterator pos, rvalue_tuple tup) { emplace(pos, eastl::forward(eastl::get(tup))...); } + + iterator insert(const_iterator pos, const Ts&... args) { return insert(pos, 1, args...); } + iterator insert(const_iterator pos, Ts&&... args) { return emplace(pos, eastl::forward(args)...); } + iterator insert(const_iterator pos, rvalue_tuple tup) { return emplace(pos, eastl::forward(eastl::get(tup))...); } + iterator insert(const_iterator pos, const_reference_tuple tup) { return insert(pos, eastl::get(tup)...); } + iterator insert(const_iterator pos, size_type n, const_reference_tuple tup) { return insert(pos, n, eastl::get(tup)...); } + iterator insert(const_iterator pos, std::initializer_list iList) { return insert(pos, iList.begin(), iList.end()); } + + iterator erase(const_iterator pos) { return erase(pos, pos + 1); } + reverse_iterator erase(const_reverse_iterator pos) { return reverse_iterator(erase((pos + 1).base(), (pos).base())); } + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last) { return reverse_iterator(erase((last).base(), (first).base())); } + reverse_iterator erase_unsorted(const_reverse_iterator pos) { return reverse_iterator(erase_unsorted((pos + 1).base())); } + + void resize(size_type n, const_reference_tuple tup) { resize(n, eastl::get(tup)...); } + + bool empty() const EA_NOEXCEPT { return mNumElements == 0; } + size_type size() const EA_NOEXCEPT { return mNumElements; } + size_type capacity() const EA_NOEXCEPT { return mNumCapacity; } + + iterator begin() EA_NOEXCEPT { return iterator(this, 0); } + const_iterator begin() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), 0); } + const_iterator cbegin() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), 0); } + + iterator end() EA_NOEXCEPT { return iterator(this, size()); } + const_iterator end() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), size()); } + const_iterator cend() const EA_NOEXCEPT { return const_iterator((const_this_type*)(this), size()); } + + reverse_iterator rbegin() EA_NOEXCEPT { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const EA_NOEXCEPT { return const_reverse_iterator(end()); } + const_reverse_iterator crbegin() const EA_NOEXCEPT { return const_reverse_iterator(end()); } + + reverse_iterator rend() EA_NOEXCEPT { return reverse_iterator(begin()); } + const_reverse_iterator rend() const EA_NOEXCEPT { return const_reverse_iterator(begin()); } + const_reverse_iterator crend() const EA_NOEXCEPT { return const_reverse_iterator(begin()); } + + ptr_tuple data() EA_NOEXCEPT { return ptr_tuple(TupleVecLeaf::mpData...); } + const_ptr_tuple data() const EA_NOEXCEPT { return const_ptr_tuple(TupleVecLeaf::mpData...); } + + reference_tuple at(size_type n) + { +#if EASTL_EXCEPTIONS_ENABLED + if (EASTL_UNLIKELY(n >= mNumElements)) + throw std::out_of_range("tuple_vector::at -- out of range"); +#elif EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(n >= mNumElements)) + EASTL_FAIL_MSG("tuple_vector::at -- out of range"); +#endif + return reference_tuple(*(TupleVecLeaf::mpData + n)...); + } + + const_reference_tuple at(size_type n) const + { +#if EASTL_EXCEPTIONS_ENABLED + if (EASTL_UNLIKELY(n >= mNumElements)) + throw std::out_of_range("tuple_vector::at -- out of range"); +#elif EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(n >= mNumElements)) + EASTL_FAIL_MSG("tuple_vector::at -- out of range"); +#endif + return const_reference_tuple(*(TupleVecLeaf::mpData + n)...); + } + + reference_tuple operator[](size_type n) { return at(n); } + const_reference_tuple operator[](size_type n) const { return at(n); } + + reference_tuple front() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("tuple_vector::front -- empty vector"); + #else + // We allow the user to reference an empty container. + #endif + + return at(0); + } + + const_reference_tuple front() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("tuple_vector::front -- empty vector"); + #else + // We allow the user to reference an empty container. + #endif + + return at(0); + } + + reference_tuple back() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("tuple_vector::back -- empty vector"); + #else + // We allow the user to reference an empty container. + #endif + + return at(size() - 1); + } + + const_reference_tuple back() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(mNumElements == 0)) // We don't allow the user to reference an empty container. + EASTL_FAIL_MSG("tuple_vector::back -- empty vector"); + #else + // We allow the user to reference an empty container. + #endif + + return at(size() - 1); + } + + template + tuplevec_element_t* get() + { + typedef tuplevec_element_t Element; + return TupleVecLeaf::mpData; + } + template + const tuplevec_element_t* get() const + { + typedef tuplevec_element_t Element; + return TupleVecLeaf::mpData; + } + + template + T* get() + { + typedef tuplevec_index> Index; + return TupleVecLeaf::mpData; + } + template + const T* get() const + { + typedef tuplevec_index> Index; + return TupleVecLeaf::mpData; + } + + this_type& operator=(const this_type& other) + { + if (this != &other) + { + clear(); + assign(other.begin(), other.end()); + } + return *this; + } + + this_type& operator=(this_type&& other) + { + if (this != &other) + { + swap(other); + } + return *this; + } + + this_type& operator=(std::initializer_list iList) + { + assign(iList.begin(), iList.end()); + return *this; + } + + bool validate() const EA_NOEXCEPT + { + if (mNumElements > mNumCapacity) + return false; + if (!(variadicAnd(mpData <= TupleVecLeaf::mpData...))) + return false; + void* pDataEnd = (void*)((uintptr_t)mpData + internalDataSize()); + if (!(variadicAnd(pDataEnd >= TupleVecLeaf::mpData...))) + return false; + return true; + } + + int validate_iterator(const_iterator iter) const EA_NOEXCEPT + { + if (!(variadicAnd(iter.mpData[Indices] == TupleVecLeaf::mpData...))) + return isf_none; + if (iter.mIndex < mNumElements) + return (isf_valid | isf_current | isf_can_dereference); + if (iter.mIndex <= mNumElements) + return (isf_valid | isf_current); + return isf_none; + } + + static bool validate_iterator_pair(const_iterator first, const_iterator last) EA_NOEXCEPT + { + return (first.mIndex <= last.mIndex) && variadicAnd(first.mpData[Indices] == last.mpData[Indices]...); + } + + template ::value, bool>::type> + int validate_iterator(Iterator iter) const EA_NOEXCEPT { return validate_iterator(unwrap_iterator(iter)); } + + template ::value, bool>::type> + static bool validate_iterator_pair(Iterator first, Iterator last) EA_NOEXCEPT { return validate_iterator_pair(unwrap_iterator(first), unwrap_iterator(last)); } + + allocator_type& get_allocator() EA_NOEXCEPT { return mDataSizeAndAllocator.second(); } + const allocator_type& get_allocator() const EA_NOEXCEPT { return mDataSizeAndAllocator.second(); } + + void set_allocator(const allocator_type& alloc) { mDataSizeAndAllocator.second() = alloc; } + +protected: + + void* mpData = nullptr; + size_type mNumElements = 0; + size_type mNumCapacity = 0; + + compressed_pair mDataSizeAndAllocator; + + size_type& internalDataSize() EA_NOEXCEPT { return mDataSizeAndAllocator.first(); } + size_type const& internalDataSize() const EA_NOEXCEPT { return mDataSizeAndAllocator.first(); } + + friend struct TupleRecurser<>; + template + friend struct TupleRecurser; + + template + void DoInitFromIterator(move_iterator begin, move_iterator end) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(!validate_iterator_pair(begin, end))) + EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair"); +#endif + size_type newNumElements = (size_type)(end - begin); + const void* ppOtherData[sizeof...(Ts)] = { begin.base().mpData[Indices]... }; + size_type beginIdx = begin.base().mIndex; + size_type endIdx = end.base().mIndex; + DoConditionalReallocate(0, mNumCapacity, newNumElements); + mNumElements = newNumElements; + swallow((eastl::uninitialized_move_ptr(eastl::move_iterator((Ts*)(ppOtherData[Indices]) + beginIdx), + eastl::move_iterator((Ts*)(ppOtherData[Indices]) + endIdx), + TupleVecLeaf::mpData), 0)...); + } + + void DoInitFromIterator(const_iterator begin, const_iterator end) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(!validate_iterator_pair(begin, end))) + EASTL_FAIL_MSG("tuple_vector::erase -- invalid iterator pair"); +#endif + size_type newNumElements = (size_type)(end - begin); + const void* ppOtherData[sizeof...(Ts)] = { begin.mpData[Indices]... }; + size_type beginIdx = begin.mIndex; + size_type endIdx = end.mIndex; + DoConditionalReallocate(0, mNumCapacity, newNumElements); + mNumElements = newNumElements; + swallow((eastl::uninitialized_copy_ptr((Ts*)(ppOtherData[Indices]) + beginIdx, + (Ts*)(ppOtherData[Indices]) + endIdx, + TupleVecLeaf::mpData), 0)...); + } + + void DoInitFillTuple(size_type n, const_reference_tuple tup) { DoInitFillArgs(n, eastl::get(tup)...); } + + void DoInitFillArgs(size_type n, const Ts&... args) + { + DoConditionalReallocate(0, mNumCapacity, n); + mNumElements = n; + swallow((eastl::uninitialized_fill_ptr(TupleVecLeaf::mpData, TupleVecLeaf::mpData + n, args), 0)...); + } + + void DoInitDefaultFill(size_type n) + { + DoConditionalReallocate(0, mNumCapacity, n); + mNumElements = n; + swallow((eastl::uninitialized_default_fill_n(TupleVecLeaf::mpData, n), 0)...); + } + + void DoInitFromTupleArray(const value_tuple* first, const value_tuple* last) + { +#if EASTL_ASSERT_ENABLED + if (EASTL_UNLIKELY(first > last || first == nullptr || last == nullptr)) + EASTL_FAIL_MSG("tuple_vector::ctor from tuple array -- invalid ptrs"); +#endif + size_type newNumElements = last - first; + DoConditionalReallocate(0, mNumCapacity, newNumElements); + mNumElements = newNumElements; + DoUninitializedCopyFromTupleArray(begin(), end(), first); + } + + void DoCopyFromTupleArray(iterator destPos, iterator destEnd, const value_tuple* srcTuple) + { + // assign to constructed region + while (destPos < destEnd) + { + *destPos = *srcTuple; + ++destPos; + ++srcTuple; + } + } + + void DoUninitializedCopyFromTupleArray(iterator destPos, iterator destEnd, const value_tuple* srcTuple) + { + // placement-new/copy-ctor to unconstructed regions + while (destPos < destEnd) + { + swallow(::new(eastl::get(destPos.MakePointer())) Ts(eastl::get(*srcTuple))...); + ++destPos; + ++srcTuple; + } + } + + // Try to grow the size of the container "naturally" given the number of elements being used + void DoGrow(size_type oldNumElements, size_type oldNumCapacity, size_type requiredCapacity) + { + if (requiredCapacity > oldNumCapacity) + DoReallocate(oldNumElements, GetNewCapacity(requiredCapacity)); + } + + // Reallocate to the newCapacity (IFF it's actually larger, though) + void DoConditionalReallocate(size_type oldNumElements, size_type oldNumCapacity, size_type requiredCapacity) + { + if (requiredCapacity > oldNumCapacity) + DoReallocate(oldNumElements, requiredCapacity); + } + + void DoReallocate(size_type oldNumElements, size_type requiredCapacity) + { + void* ppNewLeaf[sizeof...(Ts)]; + pair allocation = TupleRecurser::template DoAllocate( + *this, ppNewLeaf, requiredCapacity, 0); + swallow((TupleVecLeaf::DoUninitializedMoveAndDestruct(0, oldNumElements, (Ts*)ppNewLeaf[Indices]), 0)...); + swallow(TupleVecLeaf::mpData = (Ts*)ppNewLeaf[Indices]...); + + EASTLFree(get_allocator(), mpData, internalDataSize()); + mpData = allocation.first; + mNumCapacity = requiredCapacity; + internalDataSize() = allocation.second; + } + + size_type GetNewCapacity(size_type oldNumCapacity) + { + return (oldNumCapacity > 0) ? (2 * oldNumCapacity) : 1; + } +}; + +} // namespace TupleVecInternal + +// Move_iterator specialization for TupleVecIter. +// An rvalue reference of a move_iterator would normaly be "tuple &&" whereas +// what we actually want is "tuple". This specialization gives us that. +template +class move_iterator, Ts...>> +{ +public: + typedef TupleVecInternal::TupleVecIter, Ts...> iterator_type; + typedef iterator_type wrapped_iterator_type; // This is not in the C++ Standard; it's used by use to identify it as + // a wrapping iterator type. + typedef iterator_traits traits_type; + typedef typename traits_type::iterator_category iterator_category; + typedef typename traits_type::value_type value_type; + typedef typename traits_type::difference_type difference_type; + typedef typename traits_type::pointer pointer; + typedef tuple reference; + typedef move_iterator this_type; + +protected: + iterator_type mIterator; + +public: + move_iterator() : mIterator() {} + explicit move_iterator(iterator_type mi) : mIterator(mi) {} + + template + move_iterator(const move_iterator& mi) : mIterator(mi.base()) {} + + iterator_type base() const { return mIterator; } + reference operator*() const { return eastl::move(MakeReference()); } + pointer operator->() const { return mIterator; } + + this_type& operator++() { ++mIterator; return *this; } + this_type operator++(int) { + this_type tempMoveIterator = *this; + ++mIterator; + return tempMoveIterator; + } + + this_type& operator--() { --mIterator; return *this; } + this_type operator--(int) + { + this_type tempMoveIterator = *this; + --mIterator; + return tempMoveIterator; + } + + this_type operator+(difference_type n) const { return move_iterator(mIterator + n); } + this_type& operator+=(difference_type n) + { + mIterator += n; + return *this; + } + + this_type operator-(difference_type n) const { return move_iterator(mIterator - n); } + this_type& operator-=(difference_type n) + { + mIterator -= n; + return *this; + } + + difference_type operator-(const this_type& rhs) const { return mIterator - rhs.mIterator; } + bool operator<(const this_type& rhs) const { return mIterator < rhs.mIterator; } + bool operator>(const this_type& rhs) const { return mIterator > rhs.mIterator; } + bool operator>=(const this_type& rhs) const { return mIterator >= rhs.mIterator; } + bool operator<=(const this_type& rhs) const { return mIterator <= rhs.mIterator; } + + reference operator[](difference_type n) const { return *(*this + n); } + +private: + reference MakeReference() const + { + return reference(eastl::move(((Ts*)mIterator.mpData[Indices])[mIterator.mIndex])...); + } +}; + +template +inline bool operator==(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin())); +} + +template +inline bool operator!=(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return ((a.size() != b.size()) || !eastl::equal(a.begin(), a.end(), b.begin())); +} + +template +inline bool operator<(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); +} + +template +inline bool operator>(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return b < a; +} + +template +inline bool operator<=(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return !(b < a); +} + +template +inline bool operator>=(const TupleVecInternal::TupleVecImpl& a, + const TupleVecInternal::TupleVecImpl& b) +{ + return !(a < b); +} + +template +inline void swap(TupleVecInternal::TupleVecImpl& a, + TupleVecInternal::TupleVecImpl& b) +{ + a.swap(b); +} + +// A customization of swap is made for r-values of tuples-of-references - +// normally, swapping rvalues doesn't make sense, but in this case, we do want to +// swap the contents of what the tuple-of-references are referring to +// +// This is required due to TupleVecIter returning a value-type for its dereferencing, +// as opposed to an actual real reference of some sort +template +inline +typename enable_if...>::value>::type +swap(tuple&& a, tuple&& b) +{ + a.swap(b); +} + +template +inline +typename enable_if...>::value>::type +swap(tuple&& a, tuple&& b) = delete; + + +// External interface of tuple_vector +template +class tuple_vector : public TupleVecInternal::TupleVecImpl, Ts...> +{ + typedef tuple_vector this_type; + typedef TupleVecInternal::TupleVecImpl, Ts...> base_type; + using base_type::base_type; + +public: + this_type& operator=(std::initializer_list iList) + { + base_type::operator=(iList); + return *this; + } +}; + +// Variant of tuple_vector that allows a user-defined allocator type (can't mix default template params with variadics) +template +class tuple_vector_alloc + : public TupleVecInternal::TupleVecImpl, Ts...> +{ + typedef tuple_vector_alloc this_type; + typedef TupleVecInternal::TupleVecImpl, Ts...> base_type; + using base_type::base_type; + +public: + + this_type& operator=(std::initializer_list iList) + { + base_type::operator=(iList); + return *this; + } +}; + +} // namespace eastl + +EA_RESTORE_VC_WARNING() +EA_RESTORE_VC_WARNING() +EA_RESTORE_VC_WARNING() +EA_RESTORE_VC_WARNING() + +#endif // EASTL_TUPLEVECTOR_H diff --git a/include/EASTL/chrono.h b/include/EASTL/chrono.h new file mode 100644 index 0000000..4f8f710 --- /dev/null +++ b/include/EASTL/chrono.h @@ -0,0 +1,741 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the eastl::chrono specification which is part of the +// standard STL date and time library. eastl::chrono implements all the +// mechanisms required to capture and manipulate times retrieved from the +// provided clocks. It implements the all of the features to allow type safe +// durations to be used in code. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_CHRONO_H +#define EASTL_CHRONO_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include + + +// TODO: move to platform specific cpp or header file +#if defined EA_PLATFORM_MICROSOFT + #pragma warning(push, 0) + #ifndef WIN32_LEAN_AND_MEAN + #define WIN32_LEAN_AND_MEAN + #endif + EA_DISABLE_ALL_VC_WARNINGS() + #undef NOMINMAX + #define NOMINMAX + #include + #ifdef min + #undef min + #endif + #ifdef max + #undef max + #endif + EA_RESTORE_ALL_VC_WARNINGS() + #pragma warning(pop) +#endif + +#if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW) + // Nothing to do +#elif defined(EA_PLATFORM_SONY) + #include + #include +#elif defined(EA_PLATFORM_APPLE) + #include +#elif defined(EA_PLATFORM_POSIX) || defined(EA_PLATFORM_MINGW) || defined(EA_PLATFORM_ANDROID) + // Posix means Linux, Unix, and Macintosh OSX, among others (including Linux-based mobile platforms). + #if defined(EA_PLATFORM_MINGW) + #include + #endif + #include + #if (defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)) + #include + #else + #include + #include + #endif +#endif + + +namespace eastl +{ +namespace chrono +{ + /////////////////////////////////////////////////////////////////////////////// + // treat_as_floating_point + /////////////////////////////////////////////////////////////////////////////// + template + struct treat_as_floating_point : is_floating_point {}; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.4, duration_values + /////////////////////////////////////////////////////////////////////////////// + template + struct duration_values + { + public: + EASTL_FORCE_INLINE static EA_CONSTEXPR Rep zero() { return Rep(0); } + EASTL_FORCE_INLINE static EA_CONSTEXPR Rep max() { return eastl::numeric_limits::max(); } + EASTL_FORCE_INLINE static EA_CONSTEXPR Rep min() { return eastl::numeric_limits::lowest(); } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // duration fwd_decl + /////////////////////////////////////////////////////////////////////////////// + template > + class duration; + + + namespace Internal + { + /////////////////////////////////////////////////////////////////////////////// + // IsRatio + /////////////////////////////////////////////////////////////////////////////// + template struct IsRatio : eastl::false_type {}; + template struct IsRatio> : eastl::true_type {}; + template struct IsRatio> : eastl::true_type {}; + template struct IsRatio> : eastl::true_type {}; + template struct IsRatio> : eastl::true_type {}; + + + /////////////////////////////////////////////////////////////////////////////// + // IsDuration + /////////////////////////////////////////////////////////////////////////////// + template struct IsDuration : eastl::false_type{}; + template struct IsDuration> : eastl::true_type{}; + template struct IsDuration> : eastl::true_type{}; + template struct IsDuration> : eastl::true_type{}; + template struct IsDuration> : eastl::true_type{}; + + + /////////////////////////////////////////////////////////////////////////////// + // RatioGCD + /////////////////////////////////////////////////////////////////////////////// + template + struct RatioGCD + { + static_assert(IsRatio::value, "Period1 is not a eastl::ratio type"); + static_assert(IsRatio::value, "Period2 is not a eastl::ratio type"); + + typedef ratio::value, + eastl::Internal::lcm::value> type; + }; + }; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.5.7, duration_cast + /////////////////////////////////////////////////////////////////////////////// + namespace Internal + { + template ::type, + typename CommonRep = typename eastl::decay::type>::type, + bool = CommonPeriod::num == 1, + bool = CommonPeriod::den == 1> + struct DurationCastImpl; + + template + struct DurationCastImpl + { + inline static ToDuration DoCast(const FromDuration& fd) + { + return ToDuration(static_cast(fd.count())); + } + }; + + template + struct DurationCastImpl + { + inline static ToDuration DoCast(const FromDuration& d) + { + return ToDuration(static_cast(static_cast(d.count()) * + static_cast(CommonPeriod::num))); + } + }; + + template + struct DurationCastImpl + { + inline static ToDuration DoCast(const FromDuration& d) + { + return ToDuration(static_cast(static_cast(d.count()) / + static_cast(CommonPeriod::den))); + } + }; + + template + struct DurationCastImpl + { + inline static ToDuration DoCast(const FromDuration& d) + { + return ToDuration(static_cast(static_cast(d.count()) * + static_cast(CommonPeriod::num) / + static_cast(CommonPeriod::den))); + } + }; + }; // namespace Internal + + + /////////////////////////////////////////////////////////////////////////////// + // duration_cast + /////////////////////////////////////////////////////////////////////////////// + template + inline typename eastl::enable_if::value, ToDuration>::type + duration_cast(const duration& d) + { + typedef typename duration::this_type FromDuration; + return Internal::DurationCastImpl::DoCast(d); + } + + + /////////////////////////////////////////////////////////////////////////////// + // duration + /////////////////////////////////////////////////////////////////////////////// + template + class duration + { + Rep mRep; + + public: + typedef Rep rep; + typedef Period period; + typedef duration this_type; + + #if defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) + EA_CONSTEXPR duration() + : mRep() {} + + duration(const duration& other) + : mRep(Rep(other.mRep)) {} + + duration& operator=(const duration& other) + { mRep = other.mRep; return *this; } + #else + EA_CONSTEXPR duration() = default; + duration(const duration&) = default; + duration& operator=(const duration&) = default; + #endif + + + /////////////////////////////////////////////////////////////////////////////// + // conversion constructors + /////////////////////////////////////////////////////////////////////////////// + template + inline EA_CONSTEXPR explicit duration( + const Rep2& rep2, + typename eastl::enable_if::value && + (treat_as_floating_point::value || + !treat_as_floating_point::value)>::type** = 0) + : mRep(static_cast(rep2)) {} + + + template + EA_CONSTEXPR duration(const duration& d2, + typename eastl::enable_if::value || + (eastl::ratio_divide::type::den == 1 && + !treat_as_floating_point::value), + void>::type** = 0) + : mRep(duration_cast(d2).count()) {} + + /////////////////////////////////////////////////////////////////////////////// + // returns the count of ticks + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR Rep count() const { return mRep; } + + /////////////////////////////////////////////////////////////////////////////// + // static accessors of special duration values + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR inline static duration zero() { return duration(duration_values::zero()); } + EA_CONSTEXPR inline static duration min() { return duration(duration_values::min()); } + EA_CONSTEXPR inline static duration max() { return duration(duration_values::max()); } + + /////////////////////////////////////////////////////////////////////////////// + // const arithmetic operations + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR inline duration operator+() const { return *this; } + EA_CONSTEXPR inline duration operator-() const { return duration(0-mRep); } + + /////////////////////////////////////////////////////////////////////////////// + // arithmetic operations + /////////////////////////////////////////////////////////////////////////////// + inline duration operator++(int) { return duration(mRep++); } + inline duration operator--(int) { return duration(mRep--); } + inline duration& operator++() { ++mRep; return *this; } + inline duration& operator--() { --mRep; return *this; } + inline duration& operator+=(const duration& d) { mRep += d.count(); return *this; } + inline duration& operator-=(const duration& d) { mRep -= d.count(); return *this; } + inline duration& operator*=(const Rep& rhs) { mRep *= rhs; return *this; } + inline duration& operator/=(const Rep& rhs) { mRep /= rhs; return *this; } + inline duration& operator%=(const Rep& rhs) { mRep %= rhs; return *this; } + inline duration& operator%=(const duration& d) { mRep %= d.count(); return *this; } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.5.5, arithmetic operations with durations as arguments + /////////////////////////////////////////////////////////////////////////////// + template + typename eastl::common_type, duration>::type EASTL_FORCE_INLINE + operator+(const duration& lhs, const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() + common_duration_t(rhs).count()); + } + + template + typename eastl::common_type, duration>::type EASTL_FORCE_INLINE + operator-(const duration& lhs, const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() - common_duration_t(rhs).count()); + } + + template + duration::type, Period1> EASTL_FORCE_INLINE + operator*(const duration& lhs, const Rep2& rhs) + { + typedef typename duration, Period1>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() * rhs); + } + + template + duration::type, Period2> EASTL_FORCE_INLINE + operator*(const Rep1& lhs, const duration& rhs) + { + typedef duration::type, Period2> common_duration_t; + return common_duration_t(lhs * common_duration_t(rhs).count()); + } + + template + duration::type, Period1> EASTL_FORCE_INLINE + operator/(const duration& lhs, const Rep2& rhs) + { + typedef duration::type, Period1> common_duration_t; + return common_duration_t(common_duration_t(lhs).count() / rhs); + } + + template + typename eastl::common_type, duration>::type EASTL_FORCE_INLINE + operator/(const duration& lhs, const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() / common_duration_t(rhs).count()); + } + + template + duration::type, Period1> EASTL_FORCE_INLINE + operator%(const duration& lhs, const Rep2& rhs) + { + typedef duration::type, Period1> common_duration_t; + return common_duration_t(common_duration_t(lhs).count() % rhs); + } + + template + typename eastl::common_type, duration>::type EASTL_FORCE_INLINE + operator%(const duration& lhs, const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(common_duration_t(lhs).count() % common_duration_t(rhs).count()); + } + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.5.6, compares two durations + /////////////////////////////////////////////////////////////////////////////// + template + EASTL_FORCE_INLINE bool operator==(const duration& lhs, + const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(lhs).count() == common_duration_t(rhs).count(); + } + + template + EASTL_FORCE_INLINE bool operator<(const duration& lhs, + const duration& rhs) + { + typedef typename eastl::common_type, duration>::type common_duration_t; + return common_duration_t(lhs).count() < common_duration_t(rhs).count(); + } + + template + EASTL_FORCE_INLINE bool operator!=(const duration& lhs, + const duration& rhs) + { + return !(lhs == rhs); + } + + template + EASTL_FORCE_INLINE bool operator<=(const duration& lhs, + const duration& rhs) + { + return !(rhs < lhs); + } + + template + EASTL_FORCE_INLINE bool operator>(const duration& lhs, + const duration& rhs) + { + return rhs < lhs; + } + + template + EASTL_FORCE_INLINE bool operator>=(const duration& lhs, + const duration& rhs) + { + return !(lhs < rhs); + } + + + /////////////////////////////////////////////////////////////////////////////// + // standard duration units + /////////////////////////////////////////////////////////////////////////////// + typedef duration nanoseconds; + typedef duration microseconds; + typedef duration milliseconds; + typedef duration seconds; + typedef duration> minutes; + typedef duration> hours; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.6, time_point + /////////////////////////////////////////////////////////////////////////////// + template + class time_point + { + Duration mDuration; + + public: + typedef Clock clock; + typedef Duration duration; + typedef typename Duration::rep rep; + typedef typename Duration::period period; + + inline EA_CONSTEXPR time_point() : mDuration(Duration::zero()) {} + EA_CONSTEXPR explicit time_point(const Duration& other) : mDuration(other) {} + + template + inline EA_CONSTEXPR time_point( + const time_point& t, + typename eastl::enable_if::value>::type** = 0) + : mDuration(t.time_since_epoch()) {} + + EA_CONSTEXPR Duration time_since_epoch() const { return mDuration; } + + time_point& operator+=(const Duration& d) { mDuration += d; return *this; } + time_point& operator-=(const Duration& d) { mDuration -= d; return *this; } + + static EA_CONSTEXPR time_point min() { return time_point(Duration::min()); } + static EA_CONSTEXPR time_point max() { return time_point(Duration::max()); } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.6.5, time_point arithmetic + /////////////////////////////////////////////////////////////////////////////// + template + inline EA_CONSTEXPR time_point>::type> + operator+(const time_point& lhs, const duration& rhs) + { + typedef time_point>::type> common_timepoint_t; + return common_timepoint_t(lhs.time_since_epoch() + rhs); + } + + template + inline EA_CONSTEXPR time_point>::type> + operator+(const duration& lhs, const time_point& rhs) + { + typedef time_point>::type> common_timepoint_t; + return common_timepoint_t(lhs + rhs.time_since_epoch()); + } + + template + inline EA_CONSTEXPR time_point>::type> + operator-(const time_point& lhs, const duration& rhs) + { + typedef time_point>::type> common_timepoint_t; + return common_timepoint_t(lhs.time_since_epoch() - rhs); + } + + template + inline EA_CONSTEXPR typename eastl::common_type::type operator-( + const time_point& lhs, + const time_point& rhs) + { + return lhs.time_since_epoch() - rhs.time_since_epoch(); + } + + template + inline EA_CONSTEXPR bool operator==(const time_point& lhs, + const time_point& rhs) + { + return lhs.time_since_epoch() == rhs.time_since_epoch(); + } + + template + inline EA_CONSTEXPR bool operator!=(const time_point& lhs, + const time_point& rhs) + { + return !(lhs == rhs); + } + + template + inline EA_CONSTEXPR bool operator<(const time_point& lhs, const time_point& rhs) + { + return lhs.time_since_epoch() < rhs.time_since_epoch(); + } + + template + inline EA_CONSTEXPR bool operator<=(const time_point& lhs, + const time_point& rhs) + { + return !(rhs < lhs); + } + + template + inline EA_CONSTEXPR bool operator>(const time_point& lhs, const time_point& rhs) + { + return rhs < lhs; + } + + template + inline EA_CONSTEXPR bool operator>=(const time_point& lhs, + const time_point& rhs) + { + return !(lhs < rhs); + } + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.6.7, time_point_cast + /////////////////////////////////////////////////////////////////////////////// + template + EA_CONSTEXPR time_point time_point_cast( + const time_point& t, + typename eastl::enable_if::value>::type** = 0) + { + return time_point(duration_cast(t.time_since_epoch())); + } + + + /////////////////////////////////////////////////////////////////////////////// + // 20.12.7, clocks + /////////////////////////////////////////////////////////////////////////////// + + namespace Internal + { + #if defined(EA_PLATFORM_MICROSOFT) && !defined(EA_PLATFORM_MINGW) + #define EASTL_NS_PER_TICK 1 + #elif defined EA_PLATFORM_SONY + #define EASTL_NS_PER_TICK _XTIME_NSECS_PER_TICK + #elif defined EA_PLATFORM_POSIX + #define EASTL_NS_PER_TICK _XTIME_NSECS_PER_TICK + #else + #define EASTL_NS_PER_TICK 100 + #endif + + #if defined(EA_PLATFORM_POSIX) + typedef chrono::nanoseconds::period SystemClock_Period; + typedef chrono::nanoseconds::period SteadyClock_Period; + #else + typedef eastl::ratio_multiply, nano>::type SystemClock_Period; + typedef eastl::ratio_multiply, nano>::type SteadyClock_Period; + #endif + + + /////////////////////////////////////////////////////////////////////////////// + // Internal::GetTicks + /////////////////////////////////////////////////////////////////////////////// + inline uint64_t GetTicks() + { + #if defined EA_PLATFORM_MICROSOFT + auto queryFrequency = [] + { + LARGE_INTEGER frequency; + QueryPerformanceFrequency(&frequency); + return double(1000000000.0L / frequency.QuadPart); // nanoseconds per tick + }; + + auto queryCounter = [] + { + LARGE_INTEGER counter; + QueryPerformanceCounter(&counter); + return counter.QuadPart; + }; + + EA_DISABLE_VC_WARNING(4640) // warning C4640: construction of local static object is not thread-safe (VS2013) + static auto frequency = queryFrequency(); // cache cpu frequency on first call + EA_RESTORE_VC_WARNING() + return uint64_t(frequency * queryCounter()); + #elif defined EA_PLATFORM_SONY + return sceKernelGetProcessTimeCounter(); + #elif defined(EA_PLATFORM_APPLE) + return mach_absolute_time(); + #elif defined(EA_PLATFORM_POSIX) // Posix means Linux, Unix, and Macintosh OSX, among others (including Linux-based mobile platforms). + #if (defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)) + timespec ts; + int result = clock_gettime(CLOCK_MONOTONIC, &ts); + + if(result == EINVAL + ) + result = clock_gettime(CLOCK_REALTIME, &ts); + + const uint64_t nNanoseconds = (uint64_t)ts.tv_nsec + ((uint64_t)ts.tv_sec * UINT64_C(1000000000)); + return nNanoseconds; + #else + struct timeval tv; + gettimeofday(&tv, NULL); + const uint64_t nMicroseconds = (uint64_t)tv.tv_usec + ((uint64_t)tv.tv_sec * 1000000); + return nMicroseconds; + #endif + #else + #error "chrono not implemented for platform" + #endif + } + } // namespace Internal + + + /////////////////////////////////////////////////////////////////////////////// + // system_clock + /////////////////////////////////////////////////////////////////////////////// + class system_clock + { + public: + typedef long long rep; // signed arithmetic type representing the number of ticks in the clock's duration + typedef Internal::SystemClock_Period period; + typedef chrono::duration duration; // duration, capable of representing negative durations + typedef chrono::time_point time_point; + + // true if the time between ticks is always increases monotonically + EA_CONSTEXPR_OR_CONST static bool is_steady = false; + + // returns a time point representing the current point in time. + static time_point now() EA_NOEXCEPT + { + return time_point(duration(Internal::GetTicks())); + } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // steady_clock + /////////////////////////////////////////////////////////////////////////////// + class steady_clock + { + public: + typedef long long rep; // signed arithmetic type representing the number of ticks in the clock's duration + typedef Internal::SteadyClock_Period period; + typedef chrono::duration duration; // duration, capable of representing negative durations + typedef chrono::time_point time_point; + + // true if the time between ticks is always increases monotonically + EA_CONSTEXPR_OR_CONST static bool is_steady = true; + + // returns a time point representing the current point in time. + static time_point now() EA_NOEXCEPT + { + return time_point(duration(Internal::GetTicks())); + } + }; + + + /////////////////////////////////////////////////////////////////////////////// + // high_resolution_clock + /////////////////////////////////////////////////////////////////////////////// + typedef system_clock high_resolution_clock; + + +} // namespace chrono + + + /////////////////////////////////////////////////////////////////////////////// + // duration common_type specialization + /////////////////////////////////////////////////////////////////////////////// + template + struct common_type, chrono::duration> + { + typedef chrono::duration::type>::type, + typename chrono::Internal::RatioGCD::type> type; + }; + + + /////////////////////////////////////////////////////////////////////////////// + // time_point common_type specialization + /////////////////////////////////////////////////////////////////////////////// + template + struct common_type, chrono::time_point> + { + typedef chrono::time_point::type> type; + }; + + + /////////////////////////////////////////////////////////////////////////////// + // chrono_literals + /////////////////////////////////////////////////////////////////////////////// + #if EASTL_USER_LITERALS_ENABLED && EASTL_INLINE_NAMESPACES_ENABLED + EA_DISABLE_VC_WARNING(4455) // disable warning C4455: literal suffix identifiers that do not start with an underscore are reserved + inline namespace literals + { + inline namespace chrono_literals + { + /////////////////////////////////////////////////////////////////////////////// + // integer chrono literals + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR chrono::hours operator"" h(unsigned long long h) { return chrono::hours(h); } + EA_CONSTEXPR chrono::minutes operator"" min(unsigned long long m) { return chrono::minutes(m); } + EA_CONSTEXPR chrono::seconds operator"" s(unsigned long long s) { return chrono::seconds(s); } + EA_CONSTEXPR chrono::milliseconds operator"" ms(unsigned long long ms) { return chrono::milliseconds(ms); } + EA_CONSTEXPR chrono::microseconds operator"" us(unsigned long long us) { return chrono::microseconds(us); } + EA_CONSTEXPR chrono::nanoseconds operator"" ns(unsigned long long ns) { return chrono::nanoseconds(ns); } + + /////////////////////////////////////////////////////////////////////////////// + // float chrono literals + /////////////////////////////////////////////////////////////////////////////// + EA_CONSTEXPR chrono::duration> operator"" h(long double h) + { return chrono::duration>(h); } + EA_CONSTEXPR chrono::duration> operator"" min(long double m) + { return chrono::duration>(m); } + EA_CONSTEXPR chrono::duration operator"" s(long double s) + { return chrono::duration(s); } + EA_CONSTEXPR chrono::duration operator"" ms(long double ms) + { return chrono::duration(ms); } + EA_CONSTEXPR chrono::duration operator"" us(long double us) + { return chrono::duration(us); } + EA_CONSTEXPR chrono::duration operator"" ns(long double ns) + { return chrono::duration(ns); } + + } // namespace chrono_literals + }// namespace literals + EA_RESTORE_VC_WARNING() // warning: 4455 + #endif + +} // namespace eastl + + +#if EASTL_USER_LITERALS_ENABLED && EASTL_INLINE_NAMESPACES_ENABLED +namespace chrono +{ + using namespace eastl::literals::chrono_literals; +} // namespace chrono +#endif + + +#endif diff --git a/include/EASTL/core_allocator.h b/include/EASTL/core_allocator.h new file mode 100644 index 0000000..e437491 --- /dev/null +++ b/include/EASTL/core_allocator.h @@ -0,0 +1,70 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_CORE_ALLOCATOR_H +#define EASTL_CORE_ALLOCATOR_H + +#if EASTL_CORE_ALLOCATOR_ENABLED + +#include + +namespace EA +{ + namespace Allocator + { + /// EASTLCoreAllocatorImpl + /// + /// EASTL provides an out of the box implementation of the + /// ICoreAllocator interface. This is provided as a convenience for + /// users who wish to provide ICoreAllocator implementations for EASTL to use. + /// + /// EASTL has a dependency on coreallocator so to provide an out of + /// the box implementation for EASTLCoreAlloctor and EASTLCoreDeleter + /// that can be used and tested. Historically we could not test + /// ICoreAllocator interface because we relied on the code being linked + /// in user code. + /// + + class EASTLCoreAllocatorImpl : public ICoreAllocator + { + public: + virtual void* Alloc(size_t size, const char* name, unsigned int flags) + { + return ::operator new[](size, name, flags, 0, __FILE__, __LINE__); + } + + virtual void* Alloc(size_t size, const char* name, unsigned int flags, unsigned int alignment, unsigned int alignOffset = 0) + { + return ::operator new[](size, alignment, alignOffset, name, flags, 0, __FILE__, __LINE__); + } + + virtual void Free(void* ptr, size_t size = 0) + { + ::operator delete(static_cast(ptr)); + } + + virtual void* AllocDebug(size_t size, const DebugParams debugParams, unsigned int flags) + { + return Alloc(size, debugParams.mName, flags); + } + + virtual void* AllocDebug(size_t size, const DebugParams debugParams, unsigned int flags, unsigned int align, unsigned int alignOffset = 0) + { + return Alloc(size, debugParams.mName, flags, align, alignOffset); + } + + static EASTLCoreAllocatorImpl* GetDefaultAllocator(); + }; + + inline EASTLCoreAllocatorImpl* EASTLCoreAllocatorImpl::GetDefaultAllocator() + { + static EASTLCoreAllocatorImpl allocator; + return &allocator; + } + } +} + +#endif // EASTL_CORE_ALLOCATOR_ENABLED +#endif // EASTL_CORE_ALLOCATOR_H + diff --git a/include/EASTL/core_allocator_adapter.h b/include/EASTL/core_allocator_adapter.h new file mode 100644 index 0000000..d6f1827 --- /dev/null +++ b/include/EASTL/core_allocator_adapter.h @@ -0,0 +1,368 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// Implements an EASTL allocator that uses an ICoreAllocator. +// However, this header file is not dependent on ICoreAllocator or its package. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_CORE_ALLOCATOR_ADAPTER_H +#define EASTL_CORE_ALLOCATOR_ADAPTER_H + +#if EASTL_CORE_ALLOCATOR_ENABLED + + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +/// EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR +/// +/// This allows the application to override the default name for the default global core allocator. +/// However, you must be careful in your usage of this, as if this file is shared between uses then +/// you will need to be careful that your override of this doesn't conflict with others. +/// +#ifndef EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR + #define EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR AllocatorType::GetDefaultAllocator +#endif + + + +namespace EA +{ + namespace Allocator + { + /// CoreAllocatorAdapter + /// + /// Implements the EASTL allocator interface. + /// Allocates memory from an instance of ICoreAllocator or another class with an equivalent interface. + /// ICoreAllocator is a pure-virtual memory allocation interface used by a number of EA games and + /// shared libraries. It's completely unrelated to EASTL, but it's prevalent enough that it's useful + /// for EASTL to have a built-in adapter for this interface. ICoreAllocator is declared in the + /// CoreAllocator package icoreallocator_interface.h header, but CoreAllocatorAdapter can work with + /// any equivalent interface, as defined below. + /// + /// Expected interface: + /// enum AllocFlags { + /// kFlagTempMemory = 0, + /// kFlagPermMemory = 1 + /// }; + /// + /// struct CoreAllocator { + /// void* Alloc(size_t size, const char* name, unsigned int allocFlags); + /// void* Alloc(size_t size, const char* name, unsigned int allocFlags, // Not required unless you are working with types that require custom alignment. + /// unsigned int align, unsigned int alignOffset = 0); + /// void Free(void* block, size_t size = 0); + /// static CoreAllocator* GetDefaultAllocator(); + /// }; + /// + /// Example usage: + /// #include + /// typedef EA::Allocator::CoreAllocatorAdapter Adapter; + /// eastl::list widgetList(Adapter("UI/WidgetList", pSomeCoreAllocator)); + /// widgetList.push_back(Widget()); + /// + /// Example usage: + /// #include + /// eastl::list > widgetList; + /// widgetList.push_back(Widget()); + /// + /// Example usage: + /// #include + /// typedef EA::Allocator::CoreAllocatorAdapter Adapter; + /// typedef eastl::list WidgetList; + /// CoreAllocatorFixed widgetCoreAllocator(pFixedAllocatorForWidgetListValueType); // CoreAllocatorFixed is a hypothetical implementation of the ICoreAllocator interface. + /// WidgetList widgetList(Adapter("UI/WidgetList", &widgetCoreAllocator)); // Note that the widgetCoreAllocator is declared before and thus destroyed after the widget list. + /// + template + class CoreAllocatorAdapter + { + public: + typedef CoreAllocatorAdapter this_type; + + public: + // To do: Make this constructor explicit, when there is no known code dependent on it being otherwise. + CoreAllocatorAdapter(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME), AllocatorType* pAllocator = EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR()); + CoreAllocatorAdapter(const char* pName, AllocatorType* pAllocator, int flags); + CoreAllocatorAdapter(const CoreAllocatorAdapter& x); + CoreAllocatorAdapter(const CoreAllocatorAdapter& x, const char* pName); + + CoreAllocatorAdapter& operator=(const CoreAllocatorAdapter& x); + + void* allocate(size_t n, int flags = 0); + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0); + void deallocate(void* p, size_t n); + + AllocatorType* get_allocator() const; + void set_allocator(AllocatorType* pAllocator); + + int get_flags() const; + void set_flags(int flags); + + const char* get_name() const; + void set_name(const char* pName); + + public: // Public because otherwise VC++ generates (possibly invalid) warnings about inline friend template specializations. + AllocatorType* mpCoreAllocator; + int mnFlags; // Allocation flags. See ICoreAllocator/AllocFlags. + + #if EASTL_NAME_ENABLED + const char* mpName; // Debug name, used to track memory. + #endif + }; + + template + bool operator==(const CoreAllocatorAdapter& a, const CoreAllocatorAdapter& b); + + template + bool operator!=(const CoreAllocatorAdapter& a, const CoreAllocatorAdapter& b); + + + + /// EASTLICoreAllocator + /// + /// Provides a standardized typedef for ICoreAllocator; + /// + /// Example usage: + /// eastl::list widgetList("UI/WidgetList", pSomeCoreAllocator); + /// widgetList.push_back(Widget()); + /// + class ICoreAllocator; + class EASTLCoreAllocatorImpl; + + typedef CoreAllocatorAdapter EASTLICoreAllocatorAdapter; + typedef CoreAllocatorAdapter EASTLCoreAllocatorAdapter; + typedef EASTLICoreAllocatorAdapter EASTLICoreAllocator; // for backwards compatibility + + + + /// EASTLICoreDeleter + /// + /// Implements a functor which can free memory from the specified + /// ICoreAllocator interface. This is a convenience object provided for + /// users who wish to have EASTL containers deallocate memory obtained from + /// ICoreAllocator interfaces. + /// + template + class CoreDeleterAdapter + { + public: + typedef CoreDeleterAdapter this_type; + AllocatorType* mpCoreAllocator; + + public: + CoreDeleterAdapter(AllocatorType* pAllocator = EASTL_CORE_ALLOCATOR_ADAPTER_GET_DEFAULT_CORE_ALLOCATOR()) EA_NOEXCEPT + : mpCoreAllocator(pAllocator) {} + + ~CoreDeleterAdapter() EA_NOEXCEPT {} + + template + void operator()(T* p) + { + p->~T(); + mpCoreAllocator->Free(p); + } + + CoreDeleterAdapter(const CoreDeleterAdapter& in) { mpCoreAllocator = in.mpCoreAllocator; } + + CoreDeleterAdapter(CoreDeleterAdapter&& in) + { + mpCoreAllocator = in.mpCoreAllocator; + in.mpCoreAllocator = nullptr; + } + + CoreDeleterAdapter& operator=(const CoreDeleterAdapter& in) + { + mpCoreAllocator = in.mpCoreAllocator; + return *this; + } + + CoreDeleterAdapter& operator=(CoreDeleterAdapter&& in) + { + mpCoreAllocator = in.mpCoreAllocator; + in.mpCoreAllocator = nullptr; + return *this; + } + }; + + + + /// EASTLICoreDeleter + /// + /// Provides a standardized typedef for ICoreAllocator implementations. + /// + /// Example usage: + /// eastl::shared_ptr foo(pA, EASTLCoreDeleter()); + /// + typedef CoreDeleterAdapter EASTLICoreDeleterAdapter; + typedef CoreDeleterAdapter EASTLCoreDeleterAdapter; + + } // namespace Allocator + +} // namespace EA + + + + + +/////////////////////////////////////////////////////////////////////////////// +// Inlines +/////////////////////////////////////////////////////////////////////////////// + +namespace EA +{ + namespace Allocator + { + template + inline CoreAllocatorAdapter::CoreAllocatorAdapter(const char* EASTL_NAME(pName), AllocatorType* pCoreAllocator) + : mpCoreAllocator(pCoreAllocator), mnFlags(0) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + template + inline CoreAllocatorAdapter::CoreAllocatorAdapter(const char* EASTL_NAME(pName), AllocatorType* pCoreAllocator, int flags) + : mpCoreAllocator(pCoreAllocator), mnFlags(flags) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + template + inline CoreAllocatorAdapter::CoreAllocatorAdapter(const CoreAllocatorAdapter& x) + : mpCoreAllocator(x.mpCoreAllocator), mnFlags(x.mnFlags) + { + #if EASTL_NAME_ENABLED + mpName = x.mpName; + #endif + } + + template + inline CoreAllocatorAdapter::CoreAllocatorAdapter(const CoreAllocatorAdapter& x, const char* EASTL_NAME(pName)) + : mpCoreAllocator(x.mpCoreAllocator), mnFlags(x.mnFlags) + { + #if EASTL_NAME_ENABLED + mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + template + inline CoreAllocatorAdapter& CoreAllocatorAdapter::operator=(const CoreAllocatorAdapter& x) + { + mpCoreAllocator = x.mpCoreAllocator; + mnFlags = x.mnFlags; + + #if EASTL_NAME_ENABLED + mpName = x.mpName; + #endif + + return *this; + } + + template + inline void* CoreAllocatorAdapter::allocate(size_t n, int /*flags*/) + { + // It turns out that EASTL itself doesn't use the flags parameter, + // whereas the user here might well want to specify a flags + // parameter. So we use ours instead of the one passed in. + return mpCoreAllocator->Alloc(n, EASTL_NAME_VAL(mpName), (unsigned)mnFlags); + } + + template + inline void* CoreAllocatorAdapter::allocate(size_t n, size_t alignment, size_t offset, int /*flags*/) + { + // It turns out that EASTL itself doesn't use the flags parameter, + // whereas the user here might well want to specify a flags + // parameter. So we use ours instead of the one passed in. + return mpCoreAllocator->Alloc(n, EASTL_NAME_VAL(mpName), (unsigned)mnFlags, (unsigned)alignment, (unsigned)offset); + } + + template + inline void CoreAllocatorAdapter::deallocate(void* p, size_t n) + { + return mpCoreAllocator->Free(p, n); + } + + template + inline AllocatorType* CoreAllocatorAdapter::get_allocator() const + { + return mpCoreAllocator; + } + + template + inline void CoreAllocatorAdapter::set_allocator(AllocatorType* pAllocator) + { + mpCoreAllocator = pAllocator; + } + + template + inline int CoreAllocatorAdapter::get_flags() const + { + return mnFlags; + } + + template + inline void CoreAllocatorAdapter::set_flags(int flags) + { + mnFlags = flags; + } + + template + inline const char* CoreAllocatorAdapter::get_name() const + { + #if EASTL_NAME_ENABLED + return mpName; + #else + return EASTL_ALLOCATOR_DEFAULT_NAME; + #endif + } + + template + inline void CoreAllocatorAdapter::set_name(const char* pName) + { + #if EASTL_NAME_ENABLED + mpName = pName; + #else + (void)pName; + #endif + } + + + + template + inline bool operator==(const CoreAllocatorAdapter& a, const CoreAllocatorAdapter& b) + { + return (a.mpCoreAllocator == b.mpCoreAllocator) && + (a.mnFlags == b.mnFlags); + } + + template + inline bool operator!=(const CoreAllocatorAdapter& a, const CoreAllocatorAdapter& b) + { + return (a.mpCoreAllocator != b.mpCoreAllocator) || + (a.mnFlags != b.mnFlags); + } + + + } // namespace Allocator + +} // namespace EA + + +#endif // EASTL_CORE_ALLOCATOR_ENABLED +#endif // Header include guard + + + + + + + + diff --git a/include/EASTL/deque.h b/include/EASTL/deque.h new file mode 100644 index 0000000..4568a0b --- /dev/null +++ b/include/EASTL/deque.h @@ -0,0 +1,2691 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +////////////////////////////////////////////////////////////////////////////// +// deque design +// +// A deque (pronounced "deck") is a double-ended queue, though this is partially +// of a misnomer. A deque does indeed let you add and remove values from both ends +// of the container, but it's not usually used for such a thing and instead is used +// as a more flexible version of a vector. It provides operator[] (random access) +// and can insert items anywhere and not just at the front and back. +// +// While you can implement a double-ended queue via a doubly-linked list, deque is +// instead implemented as a list of arrays. The benefit of this is that memory usage +// is lower and that random access can be had with decent efficiency. +// +// Our implementation of deque is just like every other implementation of deque, +// as the C++ standard all but dictates that you make it work this way. Below +// we have a depiction of an array (or vector) of 48 items, with each node being +// a '+' character and extra capacity being a '-' character. What we have is one +// contiguous block of memory: +// +// ++++++++++++++++++++++++++++++++++++++++++++++++----------------- +// 0 47 +// +// With a deque, the same array of 48 items would be implemented as multiple smaller +// arrays of contiguous memory, each of fixed size. We will call these "sub-arrays." +// In the case here, we have six arrays of 8 nodes: +// +// ++++++++ ++++++++ ++++++++ ++++++++ ++++++++ ++++++++ +// +// With an vector, item [0] is the first item and item [47] is the last item. With a +// deque, item [0] is usually not the first item and neither is item [47]. There is +// extra capacity on both the front side and the back side of the deque. So a deque +// (of 24 items) actually looks like this: +// +// -------- -----+++ ++++++++ ++++++++ +++++--- -------- +// 0 23 +// +// To insert items at the front, you move into the capacity on the left, and to insert +// items at the back, you append items on the right. As you can see, inserting an item +// at the front doesn't require allocating new memory nor does it require moving any +// items in the container. It merely involves moving the pointer to the [0] item to +// the left by one node. +// +// We keep track of these sub-arrays by having an array of pointers, with each array +// entry pointing to each of the sub-arrays. We could alternatively use a linked +// list of pointers, but it turns out we can implement our deque::operator[] more +// efficiently if we use an array of pointers instead of a list of pointers. +// +// To implement deque::iterator, we could keep a struct which is essentially this: +// struct iterator { +// int subArrayIndex; +// int subArrayOffset; +// } +// +// In practice, we implement iterators a little differently, but in reality our +// implementation isn't much different from the above. It turns out that it's most +// simple if we also manage the location of item [0] and item [end] by using these +// same iterators. +// +// To consider: Implement the deque as a circular deque instead of a linear one. +// This would use a similar subarray layout but iterators would +// wrap around when they reached the end of the subarray pointer list. +// +////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_DEQUE_H +#define EASTL_DEQUE_H + + +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + +#if EASTL_EXCEPTIONS_ENABLED + EA_DISABLE_ALL_VC_WARNINGS() + #include // std::out_of_range, std::length_error. + EA_RESTORE_ALL_VC_WARNINGS() +#endif + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable: 4267) // 'argument' : conversion from 'size_t' to 'const uint32_t', possible loss of data. This is a bogus warning resulting from a bug in VC++. + #pragma warning(disable: 4345) // Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized + #pragma warning(disable: 4480) // nonstandard extension used: specifying underlying type for enum + #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc + #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. + #if EASTL_EXCEPTIONS_ENABLED + #pragma warning(disable: 4703) // potentially uninitialized local pointer variable used. VC++ is mistakenly analyzing the possibility of uninitialized variables, though it's not easy for it to do so. + #pragma warning(disable: 4701) // potentially uninitialized local variable used. + #endif +#endif + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +namespace eastl +{ + + /// EASTL_DEQUE_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_DEQUE_DEFAULT_NAME + #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque" // Unless the user overrides something, this is "EASTL deque". + #endif + + + /// EASTL_DEQUE_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_DEQUE_DEFAULT_ALLOCATOR + #define EASTL_DEQUE_DEFAULT_ALLOCATOR allocator_type(EASTL_DEQUE_DEFAULT_NAME) + #endif + + + /// DEQUE_DEFAULT_SUBARRAY_SIZE + /// + /// Defines the default number of items in a subarray. + /// Note that the user has the option of specifying the subarray size + /// in the deque template declaration. + /// + #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x can't handle the declaration below. + #define DEQUE_DEFAULT_SUBARRAY_SIZE(T) ((sizeof(T) <= 4) ? 64 : ((sizeof(T) <= 8) ? 32 : ((sizeof(T) <= 16) ? 16 : ((sizeof(T) <= 32) ? 8 : 4)))) + #else + #define DEQUE_DEFAULT_SUBARRAY_SIZE(T) 16 + #endif + + + + /// DequeIterator + /// + /// The DequeIterator provides both const and non-const iterators for deque. + /// It also is used for the tracking of the begin and end for the deque. + /// + template + struct DequeIterator + { + typedef DequeIterator this_type; + typedef DequeIterator iterator; + typedef DequeIterator const_iterator; + typedef ptrdiff_t difference_type; + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + typedef T value_type; + typedef T* pointer; + typedef T& reference; + + public: + DequeIterator(); + DequeIterator(const iterator& x); + + pointer operator->() const; + reference operator*() const; + + this_type& operator++(); + this_type operator++(int); + + this_type& operator--(); + this_type operator--(int); + + this_type& operator+=(difference_type n); + this_type& operator-=(difference_type n); + + this_type operator+(difference_type n) const; + this_type operator-(difference_type n) const; + + protected: + template + friend struct DequeIterator; + + template + friend struct DequeBase; + + template + friend class deque; + + template + friend bool operator==(const DequeIterator&, + const DequeIterator&); + + template + friend bool operator!=(const DequeIterator&, + const DequeIterator&); + + template + friend bool operator!=(const DequeIterator& a, + const DequeIterator& b); + + template + friend bool operator< (const DequeIterator&, + const DequeIterator&); + + template + friend bool operator> (const DequeIterator&, + const DequeIterator&); + + template + friend bool operator<=(const DequeIterator&, + const DequeIterator&); + + template + friend bool operator>=(const DequeIterator&, + const DequeIterator&); + + template + friend typename DequeIterator::difference_type + operator-(const DequeIterator& a, + const DequeIterator& b); + + protected: + T* mpCurrent; // Where we currently point. Declared first because it's used most often. + T* mpBegin; // The beginning of the current subarray. + T* mpEnd; // The end of the current subarray. To consider: remove this member, as it is always equal to 'mpBegin + kDequeSubarraySize'. Given that deque subarrays usually consist of hundreds of bytes, this isn't a massive win. Also, now that we are implementing a zero-allocation new deque policy, mpEnd may in fact not be equal to 'mpBegin + kDequeSubarraySize'. + T** mpCurrentArrayPtr; // Pointer to current subarray. We could alternatively implement this as a list node iterator if the deque used a linked list. + + struct Increment {}; + struct Decrement {}; + struct FromConst {}; + + DequeIterator(T** pCurrentArrayPtr, T* pCurrent); + DequeIterator(const const_iterator& x, FromConst) : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr){} + DequeIterator(const iterator& x, Increment); + DequeIterator(const iterator& x, Decrement); + + this_type copy(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait has_trivial_relocate, + this_type copy(const iterator& first, const iterator& last, false_type); // false means it does not. + + void copy_backward(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait has_trivial_relocate, + void copy_backward(const iterator& first, const iterator& last, false_type); // false means it does not. + + void SetSubarray(T** pCurrentArrayPtr); + }; + + + + + /// DequeBase + /// + /// The DequeBase implements memory allocation for deque. + /// See VectorBase (class vector) for an explanation of why we + /// create this separate base class. + /// + template + struct DequeBase + { + typedef T value_type; + typedef Allocator allocator_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef DequeIterator iterator; + typedef DequeIterator const_iterator; + + static const size_type npos = (size_type)-1; /// 'npos' means non-valid position or simply non-position. + static const size_type kMaxSize = (size_type)-2; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues. + + enum + { + kMinPtrArraySize = 8, /// A new empty deque has a ptrArraySize of 0, but any allocated ptrArrays use this min size. + kSubarraySize = kDequeSubarraySize /// + //kNodeSize = kDequeSubarraySize * sizeof(T) /// Disabled because it prevents the ability to do this: struct X{ eastl::deque mDequeOfSelf; }; + }; + + enum Side /// Defines the side of the deque: front or back. + { + kSideFront, /// Identifies the front side of the deque. + kSideBack /// Identifies the back side of the deque. + }; + + protected: + T** mpPtrArray; // Array of pointers to subarrays. + size_type mnPtrArraySize; // Possibly we should store this as T** mpArrayEnd. + iterator mItBegin; // Where within the subarrays is our beginning. + iterator mItEnd; // Where within the subarrays is our end. + allocator_type mAllocator; // To do: Use base class optimization to make this go away. + + public: + DequeBase(const allocator_type& allocator); + DequeBase(size_type n); + DequeBase(size_type n, const allocator_type& allocator); + ~DequeBase(); + + const allocator_type& get_allocator() const EA_NOEXCEPT; + allocator_type& get_allocator() EA_NOEXCEPT; + void set_allocator(const allocator_type& allocator); + + protected: + T* DoAllocateSubarray(); + void DoFreeSubarray(T* p); + void DoFreeSubarrays(T** pBegin, T** pEnd); + + T** DoAllocatePtrArray(size_type n); + void DoFreePtrArray(T** p, size_t n); + + iterator DoReallocSubarray(size_type nAdditionalCapacity, Side allocationSide); + void DoReallocPtrArray(size_type nAdditionalCapacity, Side allocationSide); + + void DoInit(size_type n); + + }; // DequeBase + + + + + /// deque + /// + /// Implements a conventional C++ double-ended queue. The implementation used here + /// is very much like any other deque implementations you may have seen, as it + /// follows the standard algorithm for deque design. + /// + /// Note: + /// As of this writing, deque does not support zero-allocation initial emptiness. + /// A newly created deque with zero elements will still allocate a subarray + /// pointer set. We are looking for efficient and clean ways to get around this, + /// but current efforts have resulted in less efficient and more fragile code. + /// The logic of this class doesn't lend itself to a clean implementation. + /// It turns out that deques are one of the least likely classes you'd want this + /// behaviour in, so until this functionality becomes very imporantant to somebody, + /// we will leave it as-is. It can probably be solved by adding some extra code to + /// the Do* functions and adding good comments explaining the situation. + /// + template + class deque : public DequeBase + { + public: + typedef DequeBase base_type; + typedef deque this_type; + typedef T value_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef DequeIterator iterator; + typedef DequeIterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + typedef typename base_type::allocator_type allocator_type; + + using base_type::kSideFront; + using base_type::kSideBack; + using base_type::mpPtrArray; + using base_type::mnPtrArraySize; + using base_type::mItBegin; + using base_type::mItEnd; + using base_type::mAllocator; + using base_type::npos; + using base_type::DoAllocateSubarray; + using base_type::DoFreeSubarray; + using base_type::DoFreeSubarrays; + using base_type::DoAllocatePtrArray; + using base_type::DoFreePtrArray; + using base_type::DoReallocSubarray; + using base_type::DoReallocPtrArray; + + public: + deque(); + explicit deque(const allocator_type& allocator); + explicit deque(size_type n, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR); + deque(size_type n, const value_type& value, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR); + deque(const this_type& x); + deque(this_type&& x); + deque(this_type&& x, const allocator_type& allocator); + deque(std::initializer_list ilist, const allocator_type& allocator = EASTL_DEQUE_DEFAULT_ALLOCATOR); + + template + deque(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg. + + ~deque(); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void assign(size_type n, const value_type& value); + void assign(std::initializer_list ilist); + + template // It turns out that the C++ std::deque specifies a two argument + void assign(InputIterator first, InputIterator last); // version of assign that takes (int size, int value). These are not + // iterators, so we need to do a template compiler trick to do the right thing. + + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + bool empty() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + + void resize(size_type n, const value_type& value); + void resize(size_type n); + + void shrink_to_fit(); + void set_capacity(size_type n = base_type::npos); + + reference operator[](size_type n); + const_reference operator[](size_type n) const; + + reference at(size_type n); + const_reference at(size_type n) const; + + reference front(); + const_reference front() const; + + reference back(); + const_reference back() const; + + void push_front(const value_type& value); + reference push_front(); + void push_front(value_type&& value); + + void push_back(const value_type& value); + reference push_back(); + void push_back(value_type&& value); + + void pop_front(); + void pop_back(); + + template + iterator emplace(const_iterator position, Args&&... args); + + template + void emplace_front(Args&&... args); + + template + void emplace_back(Args&&... args); + + iterator insert(const_iterator position, const value_type& value); + iterator insert(const_iterator position, value_type&& value); + void insert(const_iterator position, size_type n, const value_type& value); + iterator insert(const_iterator position, std::initializer_list ilist); + + template + void insert(const_iterator position, InputIterator first, InputIterator last); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + reverse_iterator erase(reverse_iterator position); + reverse_iterator erase(reverse_iterator first, reverse_iterator last); + + void clear(); + //void reset_lose_memory(); // Disabled until it can be implemented efficiently and cleanly. // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + template + void DoInit(Integer n, Integer value, true_type); + + template + void DoInit(InputIterator first, InputIterator last, false_type); + + template + void DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag); + + template + void DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag); + + void DoFillInit(const value_type& value); + + template + void DoAssign(Integer n, Integer value, true_type); + + template + void DoAssign(InputIterator first, InputIterator last, false_type); + + void DoAssignValues(size_type n, const value_type& value); + + template + void DoInsert(const const_iterator& position, Integer n, Integer value, true_type); + + template + void DoInsert(const const_iterator& position, const InputIterator& first, const InputIterator& last, false_type); + + template + void DoInsertFromIterator(const_iterator position, const InputIterator& first, const InputIterator& last, EASTL_ITC_NS::forward_iterator_tag); + + void DoInsertValues(const_iterator position, size_type n, const value_type& value); + + void DoSwap(this_type& x); + }; // class deque + + + + + /////////////////////////////////////////////////////////////////////// + // DequeBase + /////////////////////////////////////////////////////////////////////// + + template + DequeBase::DequeBase(const allocator_type& allocator) + : mpPtrArray(NULL), + mnPtrArraySize(0), + mItBegin(), + mItEnd(), + mAllocator(allocator) + { + // It is assumed here that the deque subclass will init us when/as needed. + } + + + template + DequeBase::DequeBase(size_type n) + : mpPtrArray(NULL), + mnPtrArraySize(0), + mItBegin(), + mItEnd(), + mAllocator(EASTL_DEQUE_DEFAULT_NAME) + { + // It's important to note that DoInit creates space for elements and assigns + // mItBegin/mItEnd to point to them, but these elements are not constructed. + // You need to immediately follow this constructor with code that constructs the values. + DoInit(n); + } + + + template + DequeBase::DequeBase(size_type n, const allocator_type& allocator) + : mpPtrArray(NULL), + mnPtrArraySize(0), + mItBegin(), + mItEnd(), + mAllocator(allocator) + { + // It's important to note that DoInit creates space for elements and assigns + // mItBegin/mItEnd to point to them, but these elements are not constructed. + // You need to immediately follow this constructor with code that constructs the values. + DoInit(n); + } + + + template + DequeBase::~DequeBase() + { + if(mpPtrArray) + { + DoFreeSubarrays(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1); + DoFreePtrArray(mpPtrArray, mnPtrArraySize); + } + } + + + template + const typename DequeBase::allocator_type& + DequeBase::get_allocator() const EA_NOEXCEPT + { + return mAllocator; + } + + + template + typename DequeBase::allocator_type& + DequeBase::get_allocator() EA_NOEXCEPT + { + return mAllocator; + } + + + template + void DequeBase::set_allocator(const allocator_type& allocator) + { + // The only time you can set an allocator is with an empty unused container, such as right after construction. + if(EASTL_LIKELY(mAllocator != allocator)) + { + if(EASTL_LIKELY(mpPtrArray && (mItBegin.mpCurrentArrayPtr == mItEnd.mpCurrentArrayPtr))) // If we are empty and so can safely deallocate the existing memory... We could also test for empty(), but that's a more expensive calculation and more involved clearing, though it would be more flexible. + { + DoFreeSubarrays(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1); + DoFreePtrArray(mpPtrArray, mnPtrArraySize); + + mAllocator = allocator; + DoInit(0); + } + else + { + EASTL_FAIL_MSG("DequeBase::set_allocator -- atempt to change allocator after allocating elements."); + } + } + } + + + template + T* DequeBase::DoAllocateSubarray() + { + T* p = (T*)allocate_memory(mAllocator, kDequeSubarraySize * sizeof(T), EASTL_ALIGN_OF(T), 0); + EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_DEBUG + memset((void*)p, 0, kDequeSubarraySize * sizeof(T)); + #endif + + return (T*)p; + } + + + template + void DequeBase::DoFreeSubarray(T* p) + { + if(p) + EASTLFree(mAllocator, p, kDequeSubarraySize * sizeof(T)); + } + + template + void DequeBase::DoFreeSubarrays(T** pBegin, T** pEnd) + { + while(pBegin < pEnd) + DoFreeSubarray(*pBegin++); + } + + template + T** DequeBase::DoAllocatePtrArray(size_type n) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(n >= 0x80000000)) + EASTL_FAIL_MSG("deque::DoAllocatePtrArray -- improbably large request."); + #endif + + T** pp = (T**)allocate_memory(mAllocator, n * sizeof(T*), EASTL_ALIGN_OF(T), 0); + EASTL_ASSERT_MSG(pp != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_DEBUG + memset((void*)pp, 0, n * sizeof(T*)); + #endif + + return pp; + } + + + template + void DequeBase::DoFreePtrArray(T** pp, size_t n) + { + if(pp) + EASTLFree(mAllocator, pp, n * sizeof(T*)); + } + + + template + typename DequeBase::iterator + DequeBase::DoReallocSubarray(size_type nAdditionalCapacity, Side allocationSide) + { + // nAdditionalCapacity refers to the amount of additional space we need to be + // able to store in this deque. Typically this function is called as part of + // an insert or append operation. This is the function that makes sure there + // is enough capacity for the new elements to be copied into the deque. + // The new capacity here is always at the front or back of the deque. + // This function returns an iterator to that points to the new begin or + // the new end of the deque space, depending on allocationSide. + + if(allocationSide == kSideFront) + { + // There might be some free space (nCurrentAdditionalCapacity) at the front of the existing subarray. + const size_type nCurrentAdditionalCapacity = (size_type)(mItBegin.mpCurrent - mItBegin.mpBegin); + + if(EASTL_UNLIKELY(nCurrentAdditionalCapacity < nAdditionalCapacity)) // If we need to grow downward into a new subarray... + { + const difference_type nSubarrayIncrease = (difference_type)(((nAdditionalCapacity - nCurrentAdditionalCapacity) + kDequeSubarraySize - 1) / kDequeSubarraySize); + difference_type i; + + if(nSubarrayIncrease > (mItBegin.mpCurrentArrayPtr - mpPtrArray)) // If there are not enough pointers in front of the current (first) one... + DoReallocPtrArray((size_type)(nSubarrayIncrease - (mItBegin.mpCurrentArrayPtr - mpPtrArray)), kSideFront); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(i = 1; i <= nSubarrayIncrease; ++i) + mItBegin.mpCurrentArrayPtr[-i] = DoAllocateSubarray(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(difference_type j = 1; j < i; ++j) + DoFreeSubarray(mItBegin.mpCurrentArrayPtr[-j]); + throw; + } + #endif + } + + return mItBegin - (difference_type)nAdditionalCapacity; + } + else // else kSideBack + { + const size_type nCurrentAdditionalCapacity = (size_type)((mItEnd.mpEnd - 1) - mItEnd.mpCurrent); + + if(EASTL_UNLIKELY(nCurrentAdditionalCapacity < nAdditionalCapacity)) // If we need to grow forward into a new subarray... + { + const difference_type nSubarrayIncrease = (difference_type)(((nAdditionalCapacity - nCurrentAdditionalCapacity) + kDequeSubarraySize - 1) / kDequeSubarraySize); + difference_type i; + + if(nSubarrayIncrease > ((mpPtrArray + mnPtrArraySize) - mItEnd.mpCurrentArrayPtr) - 1) // If there are not enough pointers after the current (last) one... + DoReallocPtrArray((size_type)(nSubarrayIncrease - (((mpPtrArray + mnPtrArraySize) - mItEnd.mpCurrentArrayPtr) - 1)), kSideBack); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(i = 1; i <= nSubarrayIncrease; ++i) + mItEnd.mpCurrentArrayPtr[i] = DoAllocateSubarray(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(difference_type j = 1; j < i; ++j) + DoFreeSubarray(mItEnd.mpCurrentArrayPtr[j]); + throw; + } + #endif + } + + return mItEnd + (difference_type)nAdditionalCapacity; + } + } + + + template + void DequeBase::DoReallocPtrArray(size_type nAdditionalCapacity, Side allocationSide) + { + // This function is not called unless the capacity is known to require a resize. + // + // We have an array of pointers (mpPtrArray), of which a segment of them are in use and + // at either end of the array are zero or more unused pointers. This function is being + // called because we need to extend the capacity on either side of this array by + // nAdditionalCapacity pointers. However, it's possible that if the user is continually + // using push_back and pop_front then the pointer array will continue to be extended + // on the back side and unused on the front side. So while we are doing this resizing + // here we also take the opportunity to recenter the pointers and thus be balanced. + // It man turn out that we don't even need to reallocate the pointer array in order + // to increase capacity on one side, as simply moving the pointers to the center may + // be enough to open up the requires space. + // + // Balanced pointer array Unbalanced pointer array (unused space at front, no free space at back) + // ----++++++++++++---- ---------+++++++++++ + + const size_type nUnusedPtrCountAtFront = (size_type)(mItBegin.mpCurrentArrayPtr - mpPtrArray); + const size_type nUsedPtrCount = (size_type)(mItEnd.mpCurrentArrayPtr - mItBegin.mpCurrentArrayPtr) + 1; + const size_type nUsedPtrSpace = nUsedPtrCount * sizeof(void*); + const size_type nUnusedPtrCountAtBack = (mnPtrArraySize - nUnusedPtrCountAtFront) - nUsedPtrCount; + value_type** pPtrArrayBegin; + + if((allocationSide == kSideBack) && (nAdditionalCapacity <= nUnusedPtrCountAtFront)) // If we can take advantage of unused pointers at the front without doing any reallocation... + { + if(nAdditionalCapacity < (nUnusedPtrCountAtFront / 2)) // Possibly use more space than required, if there's a lot of extra space. + nAdditionalCapacity = (nUnusedPtrCountAtFront / 2); + + pPtrArrayBegin = mpPtrArray + (nUnusedPtrCountAtFront - nAdditionalCapacity); + memmove(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace); + + #if EASTL_DEBUG + memset(pPtrArrayBegin + nUsedPtrCount, 0, (size_t)(mpPtrArray + mnPtrArraySize) - (size_t)(pPtrArrayBegin + nUsedPtrCount)); + #endif + } + else if((allocationSide == kSideFront) && (nAdditionalCapacity <= nUnusedPtrCountAtBack)) // If we can take advantage of unused pointers at the back without doing any reallocation... + { + if(nAdditionalCapacity < (nUnusedPtrCountAtBack / 2)) // Possibly use more space than required, if there's a lot of extra space. + nAdditionalCapacity = (nUnusedPtrCountAtBack / 2); + + pPtrArrayBegin = mItBegin.mpCurrentArrayPtr + nAdditionalCapacity; + memmove(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace); + + #if EASTL_DEBUG + memset(mpPtrArray, 0, (size_t)((uintptr_t)pPtrArrayBegin - (uintptr_t)mpPtrArray)); + #endif + } + else + { + // In this case we will have to do a reallocation. + const size_type nNewPtrArraySize = mnPtrArraySize + eastl::max_alt(mnPtrArraySize, nAdditionalCapacity) + 2; // Allocate extra capacity. + value_type** const pNewPtrArray = DoAllocatePtrArray(nNewPtrArraySize); + + pPtrArrayBegin = pNewPtrArray + (mItBegin.mpCurrentArrayPtr - mpPtrArray) + ((allocationSide == kSideFront) ? nAdditionalCapacity : 0); + + // The following is equivalent to: eastl::copy(mItBegin.mpCurrentArrayPtr, mItEnd.mpCurrentArrayPtr + 1, pPtrArrayBegin); + // It's OK to use memcpy instead of memmove because the destination is guaranteed to non-overlap the source. + if(mpPtrArray) // Could also say: 'if(mItBegin.mpCurrentArrayPtr)' + memcpy(pPtrArrayBegin, mItBegin.mpCurrentArrayPtr, nUsedPtrSpace); + + DoFreePtrArray(mpPtrArray, mnPtrArraySize); + + mpPtrArray = pNewPtrArray; + mnPtrArraySize = nNewPtrArraySize; + } + + // We need to reset the begin and end iterators, as code that calls this expects them to *not* be invalidated. + mItBegin.SetSubarray(pPtrArrayBegin); + mItEnd.SetSubarray((pPtrArrayBegin + nUsedPtrCount) - 1); + } + + + template + void DequeBase::DoInit(size_type n) + { + // This code is disabled because it doesn't currently work properly. + // We are trying to make it so that a deque can have a zero allocation + // initial empty state, but we (OK, I) am having a hard time making + // this elegant and efficient. + //if(n) + //{ + const size_type nNewPtrArraySize = (size_type)((n / kDequeSubarraySize) + 1); // Always have at least one, even if n is zero. + const size_type kMinPtrArraySize_ = kMinPtrArraySize; + + mnPtrArraySize = eastl::max_alt(kMinPtrArraySize_, (nNewPtrArraySize + 2)); + mpPtrArray = DoAllocatePtrArray(mnPtrArraySize); + + value_type** const pPtrArrayBegin = (mpPtrArray + ((mnPtrArraySize - nNewPtrArraySize) / 2)); // Try to place it in the middle. + value_type** const pPtrArrayEnd = pPtrArrayBegin + nNewPtrArraySize; + value_type** pPtrArrayCurrent = pPtrArrayBegin; + + #if EASTL_EXCEPTIONS_ENABLED + try + { + try + { + #endif + while(pPtrArrayCurrent < pPtrArrayEnd) + *pPtrArrayCurrent++ = DoAllocateSubarray(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(pPtrArrayBegin, pPtrArrayCurrent); + throw; + } + } + catch(...) + { + DoFreePtrArray(mpPtrArray, mnPtrArraySize); + mpPtrArray = NULL; + mnPtrArraySize = 0; + throw; + } + #endif + + mItBegin.SetSubarray(pPtrArrayBegin); + mItBegin.mpCurrent = mItBegin.mpBegin; + + mItEnd.SetSubarray(pPtrArrayEnd - 1); + mItEnd.mpCurrent = mItEnd.mpBegin + (difference_type)(n % kDequeSubarraySize); + //} + //else // Else we do a zero-allocation initialization. + //{ + // mpPtrArray = NULL; + // mnPtrArraySize = 0; + // + // mItBegin.mpCurrentArrayPtr = NULL; + // mItBegin.mpBegin = NULL; + // mItBegin.mpEnd = NULL; // We intentionally create a situation whereby the subarray that has no capacity. + // mItBegin.mpCurrent = NULL; + // + // mItEnd = mItBegin; + //} + } + + + + /////////////////////////////////////////////////////////////////////// + // DequeIterator + /////////////////////////////////////////////////////////////////////// + + template + DequeIterator::DequeIterator() + : mpCurrent(NULL), mpBegin(NULL), mpEnd(NULL), mpCurrentArrayPtr(NULL) + { + // Empty + } + + + template + DequeIterator::DequeIterator(T** pCurrentArrayPtr, T* pCurrent) + : mpCurrent(pCurrent), mpBegin(*pCurrentArrayPtr), mpEnd(pCurrent + kDequeSubarraySize), mpCurrentArrayPtr(pCurrentArrayPtr) + { + // Empty + } + + + template + DequeIterator::DequeIterator(const iterator& x) + : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr) + { + // Empty + } + + + template + DequeIterator::DequeIterator(const iterator& x, Increment) + : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr) + { + operator++(); + } + + + template + DequeIterator::DequeIterator(const iterator& x, Decrement) + : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr) + { + operator--(); + } + + + template + typename DequeIterator::pointer + DequeIterator::operator->() const + { + return mpCurrent; + } + + + template + typename DequeIterator::reference + DequeIterator::operator*() const + { + return *mpCurrent; + } + + + template + typename DequeIterator::this_type& + DequeIterator::operator++() + { + if(EASTL_UNLIKELY(++mpCurrent == mpEnd)) + { + mpBegin = *++mpCurrentArrayPtr; + mpEnd = mpBegin + kDequeSubarraySize; + mpCurrent = mpBegin; + } + return *this; + } + + + template + typename DequeIterator::this_type + DequeIterator::operator++(int) + { + const this_type temp(*this); + operator++(); + return temp; + } + + + template + typename DequeIterator::this_type& + DequeIterator::operator--() + { + if(EASTL_UNLIKELY(mpCurrent == mpBegin)) + { + mpBegin = *--mpCurrentArrayPtr; + mpEnd = mpBegin + kDequeSubarraySize; + mpCurrent = mpEnd; // fall through... + } + --mpCurrent; + return *this; + } + + + template + typename DequeIterator::this_type + DequeIterator::operator--(int) + { + const this_type temp(*this); + operator--(); + return temp; + } + + + template + typename DequeIterator::this_type& + DequeIterator::operator+=(difference_type n) + { + const difference_type subarrayPosition = (mpCurrent - mpBegin) + n; + + // Cast from signed to unsigned (size_t) in order to obviate the need to compare to < 0. + if((size_t)subarrayPosition < (size_t)kDequeSubarraySize) // If the new position is within the current subarray (i.e. >= 0 && < kSubArraySize)... + mpCurrent += n; + else + { + // This implementation is a branchless version which works by offsetting + // the math to always be in the positive range. Much of the values here + // reduce to constants and both the multiplication and division are of + // power of two sizes and so this calculation ends up compiling down to + // just one addition, one shift and one subtraction. This algorithm has + // a theoretical weakness in that on 32 bit systems it will fail if the + // value of n is >= (2^32 - 2^24) or 4,278,190,080 of if kDequeSubarraySize + // is >= 2^24 or 16,777,216. + EASTL_CT_ASSERT((kDequeSubarraySize & (kDequeSubarraySize - 1)) == 0); // Verify that it is a power of 2. + const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize); + + SetSubarray(mpCurrentArrayPtr + subarrayIndex); + mpCurrent = mpBegin + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize)); + } + return *this; + } + + + template + typename DequeIterator::this_type& + DequeIterator::operator-=(difference_type n) + { + return (*this).operator+=(-n); + } + + + template + typename DequeIterator::this_type + DequeIterator::operator+(difference_type n) const + { + return this_type(*this).operator+=(n); + } + + + template + typename DequeIterator::this_type + DequeIterator::operator-(difference_type n) const + { + return this_type(*this).operator+=(-n); + } + + + template + typename DequeIterator::this_type + DequeIterator::copy(const iterator& first, const iterator& last, true_type) + { + // To do: Implement this as a loop which does memcpys between subarrays appropriately. + // Currently we only do memcpy if the entire operation occurs within a single subarray. + if((first.mpBegin == last.mpBegin) && (first.mpBegin == mpBegin)) // If all operations are within the same subarray, implement the operation as a memmove. + { + memmove(mpCurrent, first.mpCurrent, (size_t)((uintptr_t)last.mpCurrent - (uintptr_t)first.mpCurrent)); + return *this + (last.mpCurrent - first.mpCurrent); + } + return eastl::copy(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this)).base(); + } + + + template + typename DequeIterator::this_type + DequeIterator::copy(const iterator& first, const iterator& last, false_type) + { + return eastl::copy(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this)).base(); + } + + + template + void DequeIterator::copy_backward(const iterator& first, const iterator& last, true_type) + { + // To do: Implement this as a loop which does memmoves between subarrays appropriately. + // Currently we only do memcpy if the entire operation occurs within a single subarray. + if((first.mpBegin == last.mpBegin) && (first.mpBegin == mpBegin)) // If all operations are within the same subarray, implement the operation as a memcpy. + memmove(mpCurrent - (last.mpCurrent - first.mpCurrent), first.mpCurrent, (size_t)((uintptr_t)last.mpCurrent - (uintptr_t)first.mpCurrent)); + else + eastl::copy_backward(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this)); + } + + + template + void DequeIterator::copy_backward(const iterator& first, const iterator& last, false_type) + { + eastl::copy_backward(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this)).base(); + } + + + template + void DequeIterator::SetSubarray(T** pCurrentArrayPtr) + { + mpCurrentArrayPtr = pCurrentArrayPtr; + mpBegin = *pCurrentArrayPtr; + mpEnd = mpBegin + kDequeSubarraySize; + } + + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const DequeIterator& a, + const DequeIterator& b) + { + return a.mpCurrent == b.mpCurrent; + } + + + template + inline bool operator!=(const DequeIterator& a, + const DequeIterator& b) + { + return a.mpCurrent != b.mpCurrent; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const DequeIterator& a, + const DequeIterator& b) + { + return a.mpCurrent != b.mpCurrent; + } + + + template + inline bool operator<(const DequeIterator& a, + const DequeIterator& b) + { + return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent < b.mpCurrent) : (a.mpCurrentArrayPtr < b.mpCurrentArrayPtr); + } + + + template + inline bool operator>(const DequeIterator& a, + const DequeIterator& b) + { + return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent > b.mpCurrent) : (a.mpCurrentArrayPtr > b.mpCurrentArrayPtr); + } + + + template + inline bool operator<=(const DequeIterator& a, + const DequeIterator& b) + { + return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent <= b.mpCurrent) : (a.mpCurrentArrayPtr <= b.mpCurrentArrayPtr); + } + + + template + inline bool operator>=(const DequeIterator& a, + const DequeIterator& b) + { + return (a.mpCurrentArrayPtr == b.mpCurrentArrayPtr) ? (a.mpCurrent >= b.mpCurrent) : (a.mpCurrentArrayPtr >= b.mpCurrentArrayPtr); + } + + + // Random access iterators must support operator + and operator -. + // You can only add an integer to an iterator, and you cannot add two iterators. + template + inline DequeIterator + operator+(ptrdiff_t n, const DequeIterator& x) + { + return x + n; // Implement (n + x) in terms of (x + n). + } + + + // You can only add an integer to an iterator, but you can subtract two iterators. + // The C++ defect report #179 mentioned above specifically refers to + // operator - and states that we support the subtraction of const and non-const iterators. + template + inline typename DequeIterator::difference_type + operator-(const DequeIterator& a, + const DequeIterator& b) + { + // This is a fairly clever algorithm that has been used in STL deque implementations since the original HP STL: + typedef typename DequeIterator::difference_type difference_type; + + return ((difference_type)kDequeSubarraySize * ((a.mpCurrentArrayPtr - b.mpCurrentArrayPtr) - 1)) + (a.mpCurrent - a.mpBegin) + (b.mpEnd - b.mpCurrent); + } + + + + + /////////////////////////////////////////////////////////////////////// + // deque + /////////////////////////////////////////////////////////////////////// + + template + inline deque::deque() + : base_type((size_type)0) + { + // Empty + } + + + template + inline deque::deque(const allocator_type& allocator) + : base_type((size_type)0, allocator) + { + // Empty + } + + + template + inline deque::deque(size_type n, const allocator_type& allocator) + : base_type(n, allocator) + { + DoFillInit(value_type()); + } + + + template + inline deque::deque(size_type n, const value_type& value, const allocator_type& allocator) + : base_type(n, allocator) + { + DoFillInit(value); + } + + + template + inline deque::deque(const this_type& x) + : base_type(x.size(), x.mAllocator) + { + eastl::uninitialized_copy(x.mItBegin, x.mItEnd, mItBegin); + } + + + template + inline deque::deque(this_type&& x) + : base_type((size_type)0, x.mAllocator) + { + swap(x); + } + + + template + inline deque::deque(this_type&& x, const allocator_type& allocator) + : base_type((size_type)0, allocator) + { + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + + + template + inline deque::deque(std::initializer_list ilist, const allocator_type& allocator) + : base_type(allocator) + { + DoInit(ilist.begin(), ilist.end(), false_type()); + } + + + template + template + inline deque::deque(InputIterator first, InputIterator last) + : base_type(EASTL_DEQUE_DEFAULT_ALLOCATOR) // Call the empty base constructor, which does nothing. We need to do all the work in our own DoInit. + { + DoInit(first, last, is_integral()); + } + + + template + inline deque::~deque() + { + // Call destructors. Parent class will free the memory. + for(iterator itCurrent(mItBegin); itCurrent != mItEnd; ++itCurrent) + itCurrent.mpCurrent->~value_type(); + } + + + template + typename deque::this_type& + deque::operator=(const this_type& x) + { + if(&x != this) // If not assigning to ourselves... + { + // If (EASTL_ALLOCATOR_COPY_ENABLED == 1) and the current contents are allocated by an + // allocator that's unequal to x's allocator, we need to reallocate our elements with + // our current allocator and reallocate it with x's allocator. If the allocators are + // equal then we can use a more optimal algorithm that doesn't reallocate our elements + // but instead can copy them in place. + + #if EASTL_ALLOCATOR_COPY_ENABLED + bool bSlowerPathwayRequired = (mAllocator != x.mAllocator); + #else + bool bSlowerPathwayRequired = false; + #endif + + if(bSlowerPathwayRequired) + { + // We can't currently use set_capacity(0) or shrink_to_fit, because they + // leave a remaining allocation with our old allocator. So we do a similar + // thing but set our allocator to x.mAllocator while doing so. + this_type temp(x.mAllocator); + DoSwap(temp); + // Now we have an empty container with an allocator equal to x.mAllocator, ready to assign from x. + } + + DoAssign(x.begin(), x.end(), eastl::false_type()); + } + + return *this; + } + + + template + inline typename deque::this_type& + deque::operator=(this_type&& x) + { + if(this != &x) + { + set_capacity(0); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor. + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + return *this; + } + + + template + inline typename deque::this_type& + deque::operator=(std::initializer_list ilist) + { + DoAssign(ilist.begin(), ilist.end(), false_type()); + return *this; + } + + + template + inline void deque::assign(size_type n, const value_type& value) + { + DoAssignValues(n, value); + } + + + template + inline void deque::assign(std::initializer_list ilist) + { + DoAssign(ilist.begin(), ilist.end(), false_type()); + } + + + // It turns out that the C++ std::deque specifies a two argument + // version of assign that takes (int size, int value). These are not + // iterators, so we need to do a template compiler trick to do the right thing. + template + template + inline void deque::assign(InputIterator first, InputIterator last) + { + DoAssign(first, last, is_integral()); + } + + + template + inline typename deque::iterator + deque::begin() EA_NOEXCEPT + { + return mItBegin; + } + + + template + inline typename deque::const_iterator + deque::begin() const EA_NOEXCEPT + { + return mItBegin; + } + + + template + inline typename deque::const_iterator + deque::cbegin() const EA_NOEXCEPT + { + return mItBegin; + } + + + template + inline typename deque::iterator + deque::end() EA_NOEXCEPT + { + return mItEnd; + } + + + template + typename deque::const_iterator + deque::end() const EA_NOEXCEPT + { + return mItEnd; + } + + + template + inline typename deque::const_iterator + deque::cend() const EA_NOEXCEPT + { + return mItEnd; + } + + + template + inline typename deque::reverse_iterator + deque::rbegin() EA_NOEXCEPT + { + return reverse_iterator(mItEnd); + } + + + template + inline typename deque::const_reverse_iterator + deque::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(mItEnd); + } + + + template + inline typename deque::const_reverse_iterator + deque::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(mItEnd); + } + + + template + inline typename deque::reverse_iterator + deque::rend() EA_NOEXCEPT + { + return reverse_iterator(mItBegin); + } + + + template + inline typename deque::const_reverse_iterator + deque::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(mItBegin); + } + + + template + inline typename deque::const_reverse_iterator + deque::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(mItBegin); + } + + + template + inline bool deque::empty() const EA_NOEXCEPT + { + return mItBegin.mpCurrent == mItEnd.mpCurrent; + } + + + template + typename deque::size_type + inline deque::size() const EA_NOEXCEPT + { + return (size_type)(mItEnd - mItBegin); + } + + + template + inline void deque::resize(size_type n, const value_type& value) + { + const size_type nSizeCurrent = size(); + + if(n > nSizeCurrent) // We expect that more often than not, resizes will be upsizes. + insert(mItEnd, n - nSizeCurrent, value); + else + erase(mItBegin + (difference_type)n, mItEnd); + } + + + template + inline void deque::resize(size_type n) + { + resize(n, value_type()); + } + + + template + inline void deque::shrink_to_fit() + { + this_type x(eastl::make_move_iterator(begin()), eastl::make_move_iterator(end())); + swap(x); + } + + + template + inline void deque::set_capacity(size_type n) + { + // Currently there isn't a way to remove all allocations from a deque, as it + // requires a single starting allocation for the subarrays. So we can't just + // free all memory without leaving it in a bad state. So the best means of + // implementing set_capacity() is to do what we do below. + + if(n == 0) + { + this_type temp(mAllocator); + DoSwap(temp); + } + else if(n < size()) + { + // We currently ignore the request to reduce capacity. To do: Implement this + // and do it in a way that doesn't result in temporarily ~doubling our memory usage. + // That might involve trimming unused subarrays from the front or back of + // the container. + resize(n); + } + } + + + template + typename deque::reference + deque::operator[](size_type n) + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(n >= (size_type)(mItEnd - mItBegin))) + EASTL_FAIL_MSG("deque::operator[] -- out of range"); + #elif EASTL_ASSERT_ENABLED + // We allow taking a reference to deque[0] + if (EASTL_UNLIKELY((n != 0) && n >= (size_type)(mItEnd - mItBegin))) + EASTL_FAIL_MSG("deque::operator[] -- out of range"); + #endif + + // See DequeIterator::operator+=() for an explanation of the code below. + iterator it(mItBegin); + + const difference_type subarrayPosition = (difference_type)((it.mpCurrent - it.mpBegin) + (difference_type)n); + const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize); + + return *(*(it.mpCurrentArrayPtr + subarrayIndex) + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize))); + } + + + template + typename deque::const_reference + deque::operator[](size_type n) const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(n >= (size_type)(mItEnd - mItBegin))) + EASTL_FAIL_MSG("deque::operator[] -- out of range"); + #elif EASTL_ASSERT_ENABLED + // We allow the user to use a reference to deque[0] of an empty container. + if (EASTL_UNLIKELY((n != 0) && n >= (size_type)(mItEnd - mItBegin))) + EASTL_FAIL_MSG("deque::operator[] -- out of range"); + #endif + + // See DequeIterator::operator+=() for an explanation of the code below. + iterator it(mItBegin); + + const difference_type subarrayPosition = (it.mpCurrent - it.mpBegin) + (difference_type)n; + const difference_type subarrayIndex = (((16777216 + subarrayPosition) / (difference_type)kDequeSubarraySize)) - (16777216 / (difference_type)kDequeSubarraySize); + + return *(*(it.mpCurrentArrayPtr + subarrayIndex) + (subarrayPosition - (subarrayIndex * (difference_type)kDequeSubarraySize))); + } + + + template + typename deque::reference + deque::at(size_type n) + { + #if EASTL_EXCEPTIONS_ENABLED + if(n >= (size_type)(mItEnd - mItBegin)) + throw std::out_of_range("deque::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(n >= (size_type)(mItEnd - mItBegin)) + EASTL_FAIL_MSG("deque::at -- out of range"); + #endif + return *(mItBegin.operator+((difference_type)n)); + } + + + template + typename deque::const_reference + deque::at(size_type n) const + { + #if EASTL_EXCEPTIONS_ENABLED + if(n >= (size_type)(mItEnd - mItBegin)) + throw std::out_of_range("deque::at -- out of range"); + #elif EASTL_ASSERT_ENABLED + if(n >= (size_type)(mItEnd - mItBegin)) + EASTL_FAIL_MSG("deque::at -- out of range"); + #endif + return *(mItBegin.operator+((difference_type)n)); + } + + + template + typename deque::reference + deque::front() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::front -- empty deque"); + #else + // We allow the user to reference an empty container. + #endif + + return *mItBegin; + } + + + template + typename deque::const_reference + deque::front() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::front -- empty deque"); + #else + // We allow the user to reference an empty container. + #endif + + return *mItBegin; + } + + + template + typename deque::reference + deque::back() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::back -- empty deque"); + #else + // We allow the user to reference an empty container. + #endif + + return *iterator(mItEnd, typename iterator::Decrement()); + } + + + template + typename deque::const_reference + deque::back() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::back -- empty deque"); + #else + // We allow the user to reference an empty container. + #endif + + return *iterator(mItEnd, typename iterator::Decrement()); + } + + + template + void deque::push_front(const value_type& value) + { + emplace_front(value); + } + + + template + void deque::push_front(value_type&& value) + { + emplace_front(eastl::move(value)); + } + + + template + typename deque::reference + deque::push_front() + { + emplace_front(value_type()); + return *mItBegin; // Same as return front(); + } + + + template + void deque::push_back(const value_type& value) + { + emplace_back(value); + } + + + template + void deque::push_back(value_type&& value) + { + emplace_back(eastl::move(value)); + } + + + template + typename deque::reference + deque::push_back() + { + emplace_back(value_type()); + return *iterator(mItEnd, typename iterator::Decrement()); // Same thing as return back(); + } + + + template + void deque::pop_front() + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::pop_front -- empty deque"); + #endif + + if((mItBegin.mpCurrent + 1) != mItBegin.mpEnd) // If the operation is very simple... + (mItBegin.mpCurrent++)->~value_type(); + else + { + // This is executed only when we are popping the end (last) item off the front-most subarray. + // In this case we need to free the subarray and point mItBegin to the next subarray. + #ifdef EA_DEBUG + value_type** pp = mItBegin.mpCurrentArrayPtr; + #endif + + mItBegin.mpCurrent->~value_type(); // mpCurrent == mpEnd - 1 + DoFreeSubarray(mItBegin.mpBegin); + mItBegin.SetSubarray(mItBegin.mpCurrentArrayPtr + 1); + mItBegin.mpCurrent = mItBegin.mpBegin; + + #ifdef EA_DEBUG + *pp = NULL; + #endif + } + } + + + template + void deque::pop_back() + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY((size_type)(mItEnd == mItBegin))) + EASTL_FAIL_MSG("deque::pop_back -- empty deque"); + #endif + + if(mItEnd.mpCurrent != mItEnd.mpBegin) // If the operation is very simple... + (--mItEnd.mpCurrent)->~value_type(); + else + { + // This is executed only when we are popping the first item off the last subarray. + // In this case we need to free the subarray and point mItEnd to the previous subarray. + #ifdef EA_DEBUG + value_type** pp = mItEnd.mpCurrentArrayPtr; + #endif + + DoFreeSubarray(mItEnd.mpBegin); + mItEnd.SetSubarray(mItEnd.mpCurrentArrayPtr - 1); + mItEnd.mpCurrent = mItEnd.mpEnd - 1; // Recall that mItEnd points to one-past the last item in the container. + mItEnd.mpCurrent->~value_type(); // Thus we need to call the destructor on the item *before* that last item. + + #ifdef EA_DEBUG + *pp = NULL; + #endif + } + } + + + template + template + typename deque::iterator + deque::emplace(const_iterator position, Args&&... args) + { + if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If we are doing the same thing as push_back... + { + emplace_back(eastl::forward(args)...); + return iterator(mItEnd, typename iterator::Decrement()); // Unfortunately, we need to make an iterator here, as the above push_back is an operation that can invalidate existing iterators. + } + else if(EASTL_UNLIKELY(position.mpCurrent == mItBegin.mpCurrent)) // If we are doing the same thing as push_front... + { + emplace_front(eastl::forward(args)...); + return mItBegin; + } + + iterator itPosition(position, typename iterator::FromConst()); + value_type valueSaved(eastl::forward(args)...); // We need to save this because value may come from within our container. It would be somewhat tedious to make a workaround that could avoid this. + const difference_type i(itPosition - mItBegin); + + #if EASTL_ASSERT_ENABLED + EASTL_ASSERT(!empty()); // The push_front and push_back calls below assume that we are non-empty. It turns out this is never called unless so. + + if(EASTL_UNLIKELY(!(validate_iterator(itPosition) & isf_valid))) + EASTL_FAIL_MSG("deque::emplace -- invalid iterator"); + #endif + + if(i < (difference_type)(size() / 2)) // Should we insert at the front or at the back? We divide the range in half. + { + emplace_front(eastl::move(*mItBegin)); // This operation potentially invalidates all existing iterators and so we need to assign them anew relative to mItBegin below. + + itPosition = mItBegin + i; + + const iterator newPosition (itPosition, typename iterator::Increment()); + iterator oldBegin (mItBegin, typename iterator::Increment()); + const iterator oldBeginPlus1(oldBegin, typename iterator::Increment()); + + oldBegin.copy(oldBeginPlus1, newPosition, eastl::has_trivial_relocate()); + } + else + { + emplace_back(eastl::move(*iterator(mItEnd, typename iterator::Decrement()))); + + itPosition = mItBegin + i; + + iterator oldBack (mItEnd, typename iterator::Decrement()); + const iterator oldBackMinus1(oldBack, typename iterator::Decrement()); + + oldBack.copy_backward(itPosition, oldBackMinus1, eastl::has_trivial_relocate()); + } + + *itPosition = eastl::move(valueSaved); + + return itPosition; + } + + template + template + void deque::emplace_front(Args&&... args) + { + if(mItBegin.mpCurrent != mItBegin.mpBegin) // If we have room in the first subarray... we hope that usually this 'new' pathway gets executed, as it is slightly faster. + ::new((void*)--mItBegin.mpCurrent) value_type(eastl::forward(args)...); // Construct in place. If args is a single arg of type value_type&& then it this will be a move construction. + else + { + // To consider: Detect if value isn't coming from within this container and handle that efficiently. + value_type valueSaved(eastl::forward(args)...); // We need to make a temporary, because args may be a value_type that comes from within our container and the operations below may change the container. But we can use move instead of copy. + + if(mItBegin.mpCurrentArrayPtr == mpPtrArray) // If there are no more pointers in front of the current (first) one... + DoReallocPtrArray(1, kSideFront); + + mItBegin.mpCurrentArrayPtr[-1] = DoAllocateSubarray(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + mItBegin.SetSubarray(mItBegin.mpCurrentArrayPtr - 1); + mItBegin.mpCurrent = mItBegin.mpEnd - 1; + ::new((void*)mItBegin.mpCurrent) value_type(eastl::move(valueSaved)); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + ++mItBegin; // The exception could only occur in the new operation above, after we have incremented mItBegin. So we need to undo it. + DoFreeSubarray(mItBegin.mpCurrentArrayPtr[-1]); + throw; + } + #endif + } + } + + template + template + void deque::emplace_back(Args&&... args) + { + if((mItEnd.mpCurrent + 1) != mItEnd.mpEnd) // If we have room in the last subarray... we hope that usually this 'new' pathway gets executed, as it is slightly faster. + ::new((void*)mItEnd.mpCurrent++) value_type(eastl::forward(args)...); // Construct in place. If args is a single arg of type value_type&& then it this will be a move construction. + else + { + // To consider: Detect if value isn't coming from within this container and handle that efficiently. + value_type valueSaved(eastl::forward(args)...); // We need to make a temporary, because args may be a value_type that comes from within our container and the operations below may change the container. But we can use move instead of copy. + if(((mItEnd.mpCurrentArrayPtr - mpPtrArray) + 1) >= (difference_type)mnPtrArraySize) // If there are no more pointers after the current (last) one. + DoReallocPtrArray(1, kSideBack); + + mItEnd.mpCurrentArrayPtr[1] = DoAllocateSubarray(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new((void*)mItEnd.mpCurrent) value_type(eastl::move(valueSaved)); // We can move valueSaved into position. + mItEnd.SetSubarray(mItEnd.mpCurrentArrayPtr + 1); + mItEnd.mpCurrent = mItEnd.mpBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + // No need to execute '--mItEnd', as the exception could only occur in the new operation above before we set mItEnd. + DoFreeSubarray(mItEnd.mpCurrentArrayPtr[1]); + throw; + } + #endif + } + } + + + template + typename deque::iterator + deque::insert(const_iterator position, const value_type& value) + { + return emplace(position, value); + } + + + template + typename deque::iterator + deque::insert(const_iterator position, value_type&& value) + { + return emplace(position, eastl::move(value)); + } + + + template + void deque::insert(const_iterator position, size_type n, const value_type& value) + { + DoInsertValues(position, n, value); + } + + + template + template + void deque::insert(const_iterator position, InputIterator first, InputIterator last) + { + DoInsert(position, first, last, is_integral()); // The C++ standard requires this sort of behaviour, as InputIterator might actually be Integer and 'first' is really 'count' and 'last' is really 'value'. + } + + + template + typename deque::iterator + deque::insert(const_iterator position, std::initializer_list ilist) + { + const difference_type i(position - mItBegin); + DoInsert(position, ilist.begin(), ilist.end(), false_type()); + return (mItBegin + i); + } + + + template + typename deque::iterator + deque::erase(const_iterator position) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(validate_iterator(position) & isf_valid))) + EASTL_FAIL_MSG("deque::erase -- invalid iterator"); + + if(EASTL_UNLIKELY(position == end())) + EASTL_FAIL_MSG("deque::erase -- end() iterator is an invalid iterator for erase"); + #endif + + iterator itPosition(position, typename iterator::FromConst()); + iterator itNext(itPosition, typename iterator::Increment()); + const difference_type i(itPosition - mItBegin); + + if(i < (difference_type)(size() / 2)) // Should we move the front entries forward or the back entries backward? We divide the range in half. + { + itNext.copy_backward(mItBegin, itPosition, eastl::has_trivial_relocate()); + pop_front(); + } + else + { + itPosition.copy(itNext, mItEnd, eastl::has_trivial_relocate()); + pop_back(); + } + + return mItBegin + i; + } + + + template + typename deque::iterator + deque::erase(const_iterator first, const_iterator last) + { + iterator itFirst(first, typename iterator::FromConst()); + iterator itLast(last, typename iterator::FromConst()); + + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(validate_iterator(itFirst) & isf_valid))) + EASTL_FAIL_MSG("deque::erase -- invalid iterator"); + if(EASTL_UNLIKELY(!(validate_iterator(itLast) & isf_valid))) + EASTL_FAIL_MSG("deque::erase -- invalid iterator"); + #endif + + if((itFirst != mItBegin) || (itLast != mItEnd)) // If not erasing everything... (We expect that the user won't call erase(begin, end) because instead the user would just call clear.) + { + const difference_type n(itLast - itFirst); + const difference_type i(itFirst - mItBegin); + + if(i < (difference_type)((size() - n) / 2)) // Should we move the front entries forward or the back entries backward? We divide the range in half. + { + const iterator itNewBegin(mItBegin + n); + value_type** const pPtrArrayBegin = mItBegin.mpCurrentArrayPtr; + + itLast.copy_backward(mItBegin, itFirst, eastl::has_trivial_relocate()); + + for(; mItBegin != itNewBegin; ++mItBegin) // Question: If value_type is a POD type, will the compiler generate this loop at all? + mItBegin.mpCurrent->~value_type(); // If so, then we need to make a specialization for destructing PODs. + + DoFreeSubarrays(pPtrArrayBegin, itNewBegin.mpCurrentArrayPtr); + + // mItBegin = itNewBegin; <-- Not necessary, as the above loop makes it so already. + } + else // Else we will be moving back entries backward. + { + iterator itNewEnd(mItEnd - n); + value_type** const pPtrArrayEnd = itNewEnd.mpCurrentArrayPtr + 1; + + itFirst.copy(itLast, mItEnd, eastl::has_trivial_relocate()); + + for(iterator itTemp(itNewEnd); itTemp != mItEnd; ++itTemp) + itTemp.mpCurrent->~value_type(); + + DoFreeSubarrays(pPtrArrayEnd, mItEnd.mpCurrentArrayPtr + 1); + + mItEnd = itNewEnd; + } + + return mItBegin + i; + } + + clear(); + return mItEnd; + } + + + template + typename deque::reverse_iterator + deque::erase(reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename deque::reverse_iterator + deque::erase(reverse_iterator first, reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase(last.base(), first.base())); + } + + + template + void deque::clear() + { + // Destroy all values and all subarrays they belong to, except for the first one, + // as we need to reserve some space for a valid mItBegin/mItEnd. + if(mItBegin.mpCurrentArrayPtr != mItEnd.mpCurrentArrayPtr) // If there are multiple subarrays (more often than not, this will be so)... + { + for(value_type* p1 = mItBegin.mpCurrent; p1 < mItBegin.mpEnd; ++p1) + p1->~value_type(); + for(value_type* p2 = mItEnd.mpBegin; p2 < mItEnd.mpCurrent; ++p2) + p2->~value_type(); + DoFreeSubarray(mItEnd.mpBegin); // Leave mItBegin with a valid subarray. + } + else + { + for(value_type* p = mItBegin.mpCurrent; p < mItEnd.mpCurrent; ++p) + p->~value_type(); + // Don't free the one existing subarray, as we need it for mItBegin/mItEnd. + } + + for(value_type** pPtrArray = mItBegin.mpCurrentArrayPtr + 1; pPtrArray < mItEnd.mpCurrentArrayPtr; ++pPtrArray) + { + for(value_type* p = *pPtrArray, *pEnd = *pPtrArray + kDequeSubarraySize; p < pEnd; ++p) + p->~value_type(); + DoFreeSubarray(*pPtrArray); + } + + mItEnd = mItBegin; // mItBegin/mItEnd will not be dereferencable. + } + + + //template + //void deque::reset_lose_memory() + //{ + // // The reset_lose_memory function is a special extension function which unilaterally + // // resets the container to an empty state without freeing the memory of + // // the contained objects. This is useful for very quickly tearing down a + // // container built into scratch memory. + // + // // Currently we are unable to get this reset_lose_memory operation to work correctly + // // as we haven't been able to find a good way to have a deque initialize + // // without allocating memory. We can lose the old memory, but DoInit + // // would necessarily do a ptrArray allocation. And this is not within + // // our definition of how reset_lose_memory works. + // base_type::DoInit(0); + // + //} + + + template + void deque::swap(deque& x) + { + #if defined(EASTL_DEQUE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR) && EASTL_DEQUE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + if(mAllocator == x.mAllocator) // If allocators are equivalent... + DoSwap(x); + else // else swap the contents. + { + const this_type temp(*this); // Can't call eastl::swap because that would + *this = x; // itself call this member swap function. + x = temp; + } + #else + // NOTE(rparolin): The previous implementation required T to be copy-constructible in the fall-back case where + // allocators with unique instances copied elements. This was an unnecessary restriction and prevented the common + // usage of deque with non-copyable types (eg. eastl::deque or eastl::deque). + // + // The previous implementation violated the following requirements of deque::swap so the fall-back code has + // been removed. EASTL implicitly defines 'propagate_on_container_swap = false' therefore the fall-back case is + // undefined behaviour. We simply swap the contents and the allocator as that is the common expectation of + // users and does not put the container into an invalid state since it can not free its memory via its current + // allocator instance. + // + DoSwap(x); + #endif + } + + + template + template + void deque::DoInit(Integer n, Integer value, true_type) + { + base_type::DoInit(n); // Call the base uninitialized init function. + DoFillInit(value); + } + + + template + template + void deque::DoInit(InputIterator first, InputIterator last, false_type) + { + typedef typename eastl::iterator_traits::iterator_category IC; + DoInitFromIterator(first, last, IC()); + } + + + template + template + void deque::DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag) + { + base_type::DoInit(0); // Call the base uninitialized init function, but don't actually allocate any values. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We have little choice but to turn through the source iterator and call + // push_back for each item. It can be slow because it will keep reallocating the + // container memory as we go. We are not allowed to use distance() on an InputIterator. + for(; first != last; ++first) // InputIterators by definition actually only allow you to iterate through them once. + { // Thus the standard *requires* that we do this (inefficient) implementation. + push_back(*first); // Luckily, InputIterators are in practice almost never used, so this code will likely never get executed. + } + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + clear(); + throw; + } + #endif + } + + + template + template + void deque::DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag) + { + typedef typename eastl::remove_const::type non_const_iterator_type; // If T is a const type (e.g. const int) then we need to initialize it as if it were non-const. + typedef typename eastl::remove_const::type non_const_value_type; + + const size_type n = (size_type)eastl::distance(first, last); + value_type** pPtrArrayCurrent; + + base_type::DoInit(n); // Call the base uninitialized init function. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(pPtrArrayCurrent = mItBegin.mpCurrentArrayPtr; pPtrArrayCurrent < mItEnd.mpCurrentArrayPtr; ++pPtrArrayCurrent) // Copy to the known-to-be-completely-used subarrays. + { + // We implment an algorithm here whereby we use uninitialized_copy() and advance() instead of just iterating from first to last and constructing as we go. The reason for this is that we can take advantage of POD data types and implement construction as memcpy operations. + ForwardIterator current(first); // To do: Implement a specialization of this algorithm for non-PODs which eliminates the need for 'current'. + + eastl::advance(current, kDequeSubarraySize); + eastl::uninitialized_copy((non_const_iterator_type)first, (non_const_iterator_type)current, (non_const_value_type*)*pPtrArrayCurrent); + first = current; + } + + eastl::uninitialized_copy((non_const_iterator_type)first, (non_const_iterator_type)last, (non_const_value_type*)mItEnd.mpBegin); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(iterator itCurrent(mItBegin), itEnd(pPtrArrayCurrent, *pPtrArrayCurrent); itCurrent != itEnd; ++itCurrent) + itCurrent.mpCurrent->~value_type(); + throw; + } + #endif + } + + + template + void deque::DoFillInit(const value_type& value) + { + value_type** pPtrArrayCurrent = mItBegin.mpCurrentArrayPtr; + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + while(pPtrArrayCurrent < mItEnd.mpCurrentArrayPtr) + { + eastl::uninitialized_fill(*pPtrArrayCurrent, *pPtrArrayCurrent + kDequeSubarraySize, value); + ++pPtrArrayCurrent; + } + eastl::uninitialized_fill(mItEnd.mpBegin, mItEnd.mpCurrent, value); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(iterator itCurrent(mItBegin), itEnd(pPtrArrayCurrent, *pPtrArrayCurrent); itCurrent != itEnd; ++itCurrent) + itCurrent.mpCurrent->~value_type(); + throw; + } + #endif + } + + + template + template + void deque::DoAssign(Integer n, Integer value, true_type) // false_type means this is the integer version instead of iterator version. + { + DoAssignValues(static_cast(n), static_cast(value)); + } + + + template + template + void deque::DoAssign(InputIterator first, InputIterator last, false_type) // false_type means this is the iterator version instead of integer version. + { + // Actually, the implementation below requires first/last to be a ForwardIterator and not just an InputIterator. + // But Paul Pedriana if you somehow need to work with an InputIterator and we can deal with it. + const size_type n = (size_type)eastl::distance(first, last); + const size_type nSize = size(); + + if(n > nSize) // If we are increasing the size... + { + InputIterator atEnd(first); + + eastl::advance(atEnd, (difference_type)nSize); + eastl::copy(first, atEnd, mItBegin); + insert(mItEnd, atEnd, last); + } + else // n is <= size. + { + iterator itEnd(eastl::copy(first, last, mItBegin)); + + if(n < nSize) // If we need to erase any trailing elements... + erase(itEnd, mItEnd); + } + } + + + template + void deque::DoAssignValues(size_type n, const value_type& value) + { + const size_type nSize = size(); + + if(n > nSize) // If we are increasing the size... + { + eastl::fill(mItBegin, mItEnd, value); + insert(mItEnd, n - nSize, value); + } + else + { + erase(mItBegin + (difference_type)n, mItEnd); + eastl::fill(mItBegin, mItEnd, value); + } + } + + + template + template + void deque::DoInsert(const const_iterator& position, Integer n, Integer value, true_type) + { + DoInsertValues(position, (size_type)n, (value_type)value); + } + + + template + template + void deque::DoInsert(const const_iterator& position, const InputIterator& first, const InputIterator& last, false_type) + { + typedef typename eastl::iterator_traits::iterator_category IC; + DoInsertFromIterator(position, first, last, IC()); + } + + + template + template + void deque::DoInsertFromIterator(const_iterator position, const InputIterator& first, const InputIterator& last, EASTL_ITC_NS::forward_iterator_tag) + { + const size_type n = (size_type)eastl::distance(first, last); + + // This implementation is nearly identical to DoInsertValues below. + // If you make a bug fix to one, you will likely want to fix the other. + if(position.mpCurrent == mItBegin.mpCurrent) // If inserting at the beginning or into an empty container... + { + iterator itNewBegin(DoReallocSubarray(n, kSideFront)); // itNewBegin to mItBegin refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We would like to use move here instead of copy when possible, which would be useful for + // when inserting from a std::initializer_list, for example. + // To do: solve this by having a template or runtime parameter which specifies move vs copy. + eastl::uninitialized_copy(first, last, itNewBegin); + mItBegin = itNewBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr); + throw; + } + #endif + } + else if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If inserting at the end (i.e. appending)... + { + const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); // mItEnd to itNewEnd refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We would like to use move here instead of copy when possible, which would be useful for + // when inserting from a std::initializer_list, for example. + // To do: solve this by having a template or runtime parameter which specifies move vs copy. + eastl::uninitialized_copy(first, last, mItEnd); + mItEnd = itNewEnd; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1); + throw; + } + #endif + } + else + { + const difference_type nInsertionIndex = position - mItBegin; + const size_type nSize = size(); + + if(nInsertionIndex < (difference_type)(nSize / 2)) // If the insertion index is in the front half of the deque... grow the deque at the front. + { + const iterator itNewBegin(DoReallocSubarray(n, kSideFront)); // itNewBegin to mItBegin refers to memory that isn't initialized yet; so it's not truly a valid iterator. Or at least not a dereferencable one. + const iterator itOldBegin(mItBegin); + const iterator itPosition(mItBegin + nInsertionIndex); // We need to reset this value because the reallocation above can invalidate iterators. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We have a problem here: we would like to use move instead of copy, but it may be that the range to be inserted comes from + // this container and comes from the segment we need to move. So we can't use move operations unless we are careful to handle + // that situation. The newly inserted contents must be contents that were moved to and not moved from. To do: solve this. + if(nInsertionIndex >= (difference_type)n) // If the newly inserted items will be entirely within the old area... + { + iterator itUCopyEnd(mItBegin + (difference_type)n); + + eastl::uninitialized_copy(mItBegin, itUCopyEnd, itNewBegin); // This can throw. + itUCopyEnd = eastl::copy(itUCopyEnd, itPosition, itOldBegin); // Recycle 'itUCopyEnd' to mean something else. + eastl::copy(first, last, itUCopyEnd); + } + else // Else the newly inserted items are going within the newly allocated area at the front. + { + InputIterator mid(first); + + eastl::advance(mid, (difference_type)n - nInsertionIndex); + eastl::uninitialized_copy_copy(mItBegin, itPosition, first, mid, itNewBegin); // This can throw. + eastl::copy(mid, last, itOldBegin); + } + mItBegin = itNewBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr); + throw; + } + #endif + } + else + { + const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); + const iterator itOldEnd(mItEnd); + const difference_type nPushedCount = (difference_type)nSize - nInsertionIndex; + const iterator itPosition(mItEnd - nPushedCount); // We need to reset this value because the reallocation above can invalidate iterators. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // We have a problem here: we would like to use move instead of copy, but it may be that the range to be inserted comes from + // this container and comes from the segment we need to move. So we can't use move operations unless we are careful to handle + // that situation. The newly inserted contents must be contents that were moved to and not moved from. To do: solve this. + if(nPushedCount > (difference_type)n) + { + const iterator itUCopyEnd(mItEnd - (difference_type)n); + + eastl::uninitialized_copy(itUCopyEnd, mItEnd, mItEnd); + eastl::copy_backward(itPosition, itUCopyEnd, itOldEnd); + eastl::copy(first, last, itPosition); + } + else + { + InputIterator mid(first); + + eastl::advance(mid, nPushedCount); + eastl::uninitialized_copy_copy(mid, last, itPosition, mItEnd, mItEnd); + eastl::copy(first, mid, itPosition); + } + mItEnd = itNewEnd; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1); + throw; + } + #endif + } + } + } + + + template + void deque::DoInsertValues(const_iterator position, size_type n, const value_type& value) + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(!(validate_iterator(position) & isf_valid))) + EASTL_FAIL_MSG("deque::insert -- invalid iterator"); + #endif + + // This implementation is nearly identical to DoInsertFromIterator above. + // If you make a bug fix to one, you will likely want to fix the other. + if(position.mpCurrent == mItBegin.mpCurrent) // If inserting at the beginning... + { + const iterator itNewBegin(DoReallocSubarray(n, kSideFront)); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // Note that we don't make a temp copy of 'value' here. This is because in a + // deque, insertion at either the front or back doesn't cause a reallocation + // or move of data in the middle. That's a key feature of deques, in fact. + eastl::uninitialized_fill(itNewBegin, mItBegin, value); + mItBegin = itNewBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr); + throw; + } + #endif + } + else if(EASTL_UNLIKELY(position.mpCurrent == mItEnd.mpCurrent)) // If inserting at the end (i.e. appending)... + { + const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // Note that we don't make a temp copy of 'value' here. This is because in a + // deque, insertion at either the front or back doesn't cause a reallocation + // or move of data in the middle. That's a key feature of deques, in fact. + eastl::uninitialized_fill(mItEnd, itNewEnd, value); + mItEnd = itNewEnd; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1); + throw; + } + #endif + } + else + { + // A key purpose of a deque is to implement insertions and removals more efficiently + // than with a vector. We are inserting into the middle of the deque here. A quick and + // dirty implementation of this would be to reallocate the subarrays and simply push + // all values in the middle upward like you would do with a vector. Instead we implement + // the minimum amount of reallocations needed but may need to do some value moving, + // as the subarray sizes need to remain constant and can have no holes in them. + const difference_type nInsertionIndex = position - mItBegin; + const size_type nSize = size(); + const value_type valueSaved(value); + + if(nInsertionIndex < (difference_type)(nSize / 2)) // If the insertion index is in the front half of the deque... grow the deque at the front. + { + const iterator itNewBegin(DoReallocSubarray(n, kSideFront)); + const iterator itOldBegin(mItBegin); + const iterator itPosition(mItBegin + nInsertionIndex); // We need to reset this value because the reallocation above can invalidate iterators. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(nInsertionIndex >= (difference_type)n) // If the newly inserted items will be entirely within the old area... + { + iterator itUCopyEnd(mItBegin + (difference_type)n); + + eastl::uninitialized_move_if_noexcept(mItBegin, itUCopyEnd, itNewBegin); // This can throw. + itUCopyEnd = eastl::move(itUCopyEnd, itPosition, itOldBegin); // Recycle 'itUCopyEnd' to mean something else. + eastl::fill(itUCopyEnd, itPosition, valueSaved); + } + else // Else the newly inserted items are going within the newly allocated area at the front. + { + eastl::uninitialized_move_fill(mItBegin, itPosition, itNewBegin, mItBegin, valueSaved); // This can throw. + eastl::fill(itOldBegin, itPosition, valueSaved); + } + mItBegin = itNewBegin; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(itNewBegin.mpCurrentArrayPtr, mItBegin.mpCurrentArrayPtr); + throw; + } + #endif + } + else // Else the insertion index is in the back half of the deque, so grow the deque at the back. + { + const iterator itNewEnd(DoReallocSubarray(n, kSideBack)); + const iterator itOldEnd(mItEnd); + const difference_type nPushedCount = (difference_type)nSize - nInsertionIndex; + const iterator itPosition(mItEnd - nPushedCount); // We need to reset this value because the reallocation above can invalidate iterators. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(nPushedCount > (difference_type)n) // If the newly inserted items will be entirely within the old area... + { + iterator itUCopyEnd(mItEnd - (difference_type)n); + + eastl::uninitialized_move_if_noexcept(itUCopyEnd, mItEnd, mItEnd); // This can throw. + itUCopyEnd = eastl::move_backward(itPosition, itUCopyEnd, itOldEnd); // Recycle 'itUCopyEnd' to mean something else. + eastl::fill(itPosition, itUCopyEnd, valueSaved); + } + else // Else the newly inserted items are going within the newly allocated area at the back. + { + eastl::uninitialized_fill_move(mItEnd, itPosition + (difference_type)n, valueSaved, itPosition, mItEnd); // This can throw. + eastl::fill(itPosition, itOldEnd, valueSaved); + } + mItEnd = itNewEnd; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeSubarrays(mItEnd.mpCurrentArrayPtr + 1, itNewEnd.mpCurrentArrayPtr + 1); + throw; + } + #endif + } + } + } + + + template + inline void deque::DoSwap(this_type& x) + { + eastl::swap(mpPtrArray, x.mpPtrArray); + eastl::swap(mnPtrArraySize, x.mnPtrArraySize); + eastl::swap(mItBegin, x.mItBegin); + eastl::swap(mItEnd, x.mItEnd); + eastl::swap(mAllocator, x.mAllocator); // We do this even if EASTL_ALLOCATOR_COPY_ENABLED is 0. + + } + + + template + inline bool deque::validate() const + { + // To do: More detailed validation. + // To do: Try to make the validation resistant to crashes if the data is invalid. + if((end() - begin()) < 0) + return false; + return true; + } + + + template + inline int deque::validate_iterator(const_iterator i) const + { + // To do: We don't currently track isf_current, will need to make it do so. + // To do: Fix the validation below, as it will not catch all invalid iterators. + if((i - begin()) < 0) + return isf_none; + + if((end() - i) < 0) + return isf_none; + + if(i == end()) + return (isf_valid | isf_current); + + return (isf_valid | isf_current | isf_can_dereference); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const deque& a, const deque& b) + { + return ((a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin())); + } + + template + inline bool operator!=(const deque& a, const deque& b) + { + return ((a.size() != b.size()) || !eastl::equal(a.begin(), a.end(), b.begin())); + } + + template + inline bool operator<(const deque& a, const deque& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + inline bool operator>(const deque& a, const deque& b) + { + return b < a; + } + + template + inline bool operator<=(const deque& a, const deque& b) + { + return !(b < a); + } + + template + inline bool operator>=(const deque& a, const deque& b) + { + return !(a < b); + } + + template + inline void swap(deque& a, deque& b) + { + a.swap(b); + } + + /////////////////////////////////////////////////////////////////////// + // erase / erase_if + // + // https://en.cppreference.com/w/cpp/container/deque/erase2 + /////////////////////////////////////////////////////////////////////// + template + void erase(deque& c, const U& value) + { + // Erases all elements that compare equal to value from the container. + c.erase(eastl::remove(c.begin(), c.end(), value), c.end()); + } + + template + void erase_if(deque& c, Predicate predicate) + { + // Erases all elements that satisfy the predicate pred from the container. + c.erase(eastl::remove_if(c.begin(), c.end(), predicate), c.end()); + } + + +} // namespace eastl + + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + + + diff --git a/include/EASTL/finally.h b/include/EASTL/finally.h new file mode 100644 index 0000000..b4ed580 --- /dev/null +++ b/include/EASTL/finally.h @@ -0,0 +1,93 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// eastl::finally is an implementation of the popular cpp idiom RAII - Resource +// Acquisition Is Initialization. eastl::finally guarantees that the user +// provided callable will be executed upon whatever mechanism is used to leave +// the current scope. This can guard against user errors but this is a popular +// technique to write robust code in execution environments that have exceptions +// enabled. +// +// Example: +// void foo() +// { +// void* p = malloc(128); +// auto _ = eastl::make_finally([&] { free(p); }); +// +// // Code that may throw an exception... +// +// } // eastl::finally guaranteed to call 'free' at scope exit. +// +// References: +// * https://www.bfilipek.com/2017/04/finalact.html +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FINALLY_H +#define EASTL_FINALLY_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////////// + // finally + // + // finally is the type that calls the users callback on scope exit. + // + template + class finally + { + static_assert(!eastl::is_lvalue_reference_v, "eastl::finally requires the callable is passed as an rvalue reference."); + + Functor m_functor; + bool m_engaged = false; + + public: + finally(Functor f) : m_functor(eastl::move(f)), m_engaged(true) {} + + finally(finally&& other) : m_functor(eastl::move(other.m_functor)), m_engaged(other.m_engaged) + { + other.dismiss(); + } + + ~finally() { execute(); } + + finally(const finally&) = delete; + finally& operator=(const finally&) = delete; + finally& operator=(finally&&) = delete; + + inline void dismiss() { m_engaged = false; } + + inline void execute() + { + if (m_engaged) + m_functor(); + + dismiss(); + } + }; + + + /////////////////////////////////////////////////////////////////////////// + // make_finally + // + // this utility function is the standard mechansim to perform the required + // type deduction on the users provided callback inorder to create a + // 'finally' object. + // + template + auto make_finally(F&& f) + { + return finally(eastl::forward(f)); + } +} + +#endif // EASTL_FINALLY_H diff --git a/include/EASTL/fixed_allocator.h b/include/EASTL/fixed_allocator.h new file mode 100644 index 0000000..e6a12ea --- /dev/null +++ b/include/EASTL/fixed_allocator.h @@ -0,0 +1,466 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the following +// fixed_allocator +// fixed_allocator_with_overflow +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_ALLOCATOR_H +#define EASTL_FIXED_ALLOCATOR_H + + +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #include + #pragma warning(pop) +#else + #include +#endif + +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable: 4275) // non dll-interface class used as base for DLL-interface classkey 'identifier' +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////////// + // fixed_allocator + /////////////////////////////////////////////////////////////////////////// + + /// fixed_allocator + /// + /// Implements an allocator which allocates a single fixed size where + /// the size, alignment, and memory used for the pool is defined at + /// runtime by the user. This is different from fixed containers + /// such as fixed_list whereby the size and alignment are determined + /// at compile time and the memory is directly built into the container's + /// member data. + /// + /// If the pool's memory is exhausted or was never initialized, the + /// allocate function returns NULL. Consider the fixed_allocator_with_overflow + /// class as an alternative in order to deal with this situation. + /// + /// This class requires the user to call container.get_allocator().init() + /// after constructing the container. There currently isn't a way to + /// construct the container with the initialization parameters, though + /// with some effort such a thing could probably be made possible. + /// It's not as simple as it might first seem, due to the non-copyable + /// nature of fixed allocators. A side effect of this limitation is that + /// you cannot copy-construct a container using fixed_allocators. + /// + /// Another side-effect is that you cannot swap two containers using + /// a fixed_allocator, as a swap requires temporary memory allocated by + /// an equivalent allocator, and such a thing cannot be done implicitly. + /// A workaround for the swap limitation is that you can implement your + /// own swap whereby you provide an explicitly created temporary object. + /// + /// Note: Be careful to set the allocator's node size to the size of the + /// container node and not the size of the contained object. Note that the + /// example code below uses IntListNode. + /// + /// Example usage: + /// typedef eastl::list IntList; + /// typedef IntList::node_type IntListNode; + /// + /// IntListNode buffer[200]; + /// IntList intList; + /// intList.get_allocator().init(buffer, sizeof(buffer), sizeof(IntListNode), __alignof(IntListNode)); + /// + class EASTL_API fixed_allocator : public fixed_pool_base + { + public: + /// fixed_allocator + /// + /// Default constructor. The user usually will need to call init() after + /// constructing via this constructor. + /// + fixed_allocator(const char* /*pName*/ = EASTL_FIXED_POOL_DEFAULT_NAME) + : fixed_pool_base(NULL) + { + } + + + /// fixed_allocator + /// + /// Copy constructor. The user usually will need to call init() after + /// constructing via this constructor. By their nature, fixed-allocators + /// cannot be copied in any useful way, as by their nature the user + /// must manually initialize them. + /// + fixed_allocator(const fixed_allocator&) + : fixed_pool_base(NULL) + { + } + + + /// operator= + /// + /// By their nature, fixed-allocators cannot be copied in any + /// useful way, as by their nature the user must manually + /// initialize them. + /// + fixed_allocator& operator=(const fixed_allocator&) + { + return *this; + } + + + // init + // + // No init here, as the base class version is sufficient. + // + //void init(void* pMemory, size_t memorySize, size_t nodeSize, + // size_t alignment, size_t alignmentOffset = 0); + + + /// allocate + /// + /// Allocates a new object of the size specified upon class initialization. + /// Returns NULL if there is no more memory. + /// + void* allocate(size_t n, int /*flags*/ = 0) + { + // To consider: Verify that 'n' is what the user initialized us with. + + Link* pLink = mpHead; + + if(pLink) // If we have space... + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(++mnCurrentSize > mnPeakSize) + mnPeakSize = mnCurrentSize; + #endif + + mpHead = pLink->mpNext; + return pLink; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if(mpNext != mpCapacity) + { + pLink = mpNext; + + mpNext = reinterpret_cast(reinterpret_cast(mpNext) + n); + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(++mnCurrentSize > mnPeakSize) + mnPeakSize = mnCurrentSize; + #endif + + return pLink; + } + + // EASTL_ASSERT(false); To consider: enable this assert. However, we intentionally disable it because this isn't necessarily an assertable error. + return NULL; + } + } + + + /// allocate + /// + void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0) + { + return allocate(n, flags); + } + + + /// deallocate + /// + /// Frees the given object which was allocated by allocate(). + /// If the given node was not allocated by allocate() then the behaviour + /// is undefined. + /// + void deallocate(void* p, size_t) + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + --mnCurrentSize; + #endif + + ((Link*)p)->mpNext = mpHead; + mpHead = ((Link*)p); + } + + + using fixed_pool_base::can_allocate; + + + const char* get_name() const + { + return EASTL_FIXED_POOL_DEFAULT_NAME; + } + + + void set_name(const char*) + { + // Nothing to do. We don't allocate memory. + } + + }; // fixed_allocator + + bool operator==(const fixed_allocator& a, const fixed_allocator& b); + bool operator!=(const fixed_allocator& a, const fixed_allocator& b); + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_allocator_with_overflow + /////////////////////////////////////////////////////////////////////////// + + /// fixed_allocator_with_overflow + /// + /// Implements an allocator which allocates a single fixed size where + /// the size, alignment, and memory used for the pool is defined at + /// runtime by the user. This is different from fixed containers + /// such as fixed_list whereby the size and alignment are determined + /// at compile time and the memory is directly built into the container's + /// member data. + /// + /// Note: Be careful to set the allocator's node size to the size of the + /// container node and not the size of the contained object. Note that the + /// example code below uses IntListNode. + /// + /// This class requires the user to call container.get_allocator().init() + /// after constructing the container. There currently isn't a way to + /// construct the container with the initialization parameters, though + /// with some effort such a thing could probably be made possible. + /// It's not as simple as it might first seem, due to the non-copyable + /// nature of fixed allocators. A side effect of this limitation is that + /// you cannot copy-construct a container using fixed_allocators. + /// + /// Another side-effect is that you cannot swap two containers using + /// a fixed_allocator, as a swap requires temporary memory allocated by + /// an equivalent allocator, and such a thing cannot be done implicitly. + /// A workaround for the swap limitation is that you can implement your + /// own swap whereby you provide an explicitly created temporary object. + /// + /// Example usage: + /// typedef eastl::list IntList; + /// typedef IntList::node_type IntListNode; + /// + /// IntListNode buffer[200]; + /// IntList intList; + /// intList.get_allocator().init(buffer, sizeof(buffer), sizeof(IntListNode), __alignof(IntListNode)); + /// + class EASTL_API fixed_allocator_with_overflow : public fixed_pool_base + { + public: + /// fixed_allocator_with_overflow + /// + /// Default constructor. The user usually will need to call init() after + /// constructing via this constructor. + /// + fixed_allocator_with_overflow(const char* pName = EASTL_FIXED_POOL_DEFAULT_NAME) + : fixed_pool_base(NULL) + , mOverflowAllocator(pName) + , mpPoolBegin(nullptr) + , mpPoolEnd(nullptr) + , mnNodeSize(0) + { + } + + + /// fixed_allocator_with_overflow + /// + /// Copy constructor. The user usually will need to call init() after + /// constructing via this constructor. By their nature, fixed-allocators + /// cannot be copied in any useful way, as by their nature the user + /// must manually initialize them. + /// + fixed_allocator_with_overflow(const fixed_allocator_with_overflow&) + : fixed_pool_base(NULL) + , mpPoolBegin(nullptr) + , mpPoolEnd(nullptr) + , mnNodeSize(0) + { + } + + + /// operator= + /// + /// By their nature, fixed-allocators cannot be copied in any + /// useful way, as by their nature the user must manually + /// initialize them. + /// + fixed_allocator_with_overflow& operator=(const fixed_allocator_with_overflow& x) + { + #if EASTL_ALLOCATOR_COPY_ENABLED + mOverflowAllocator = x.mOverflowAllocator; + #else + (void)x; + #endif + + return *this; + } + + + /// init + /// + void init(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0) + { + fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + + mpPoolBegin = pMemory; + mpPoolEnd = (void*)((uintptr_t)pMemory + memorySize); + mnNodeSize = (eastl_size_t)nodeSize; + } + + + /// allocate + /// + /// Allocates a new object of the size specified upon class initialization. + /// Returns NULL if there is no more memory. + /// + void* allocate(size_t /*n*/, int /*flags*/ = 0) + { + // To consider: Verify that 'n' is what the user initialized us with. + + void* p; + + if(mpHead) // If we have space... + { + p = mpHead; + mpHead = mpHead->mpNext; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if (mpNext != mpCapacity) + { + p = mpNext; + mpNext = reinterpret_cast(reinterpret_cast(mpNext) + mnNodeSize); + } + else + p = mOverflowAllocator.allocate(mnNodeSize); + } + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(p && (++mnCurrentSize > mnPeakSize)) + mnPeakSize = mnCurrentSize; + #endif + + return p; + } + + + /// allocate + /// + void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0) + { + return allocate(n, flags); + } + + + /// deallocate + /// + /// Frees the given object which was allocated by allocate(). + /// If the given node was not allocated by allocate() then the behaviour + /// is undefined. + /// + void deallocate(void* p, size_t) + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + --mnCurrentSize; + #endif + + if((p >= mpPoolBegin) && (p < mpPoolEnd)) + { + ((Link*)p)->mpNext = mpHead; + mpHead = ((Link*)p); + } + else + mOverflowAllocator.deallocate(p, (size_t)mnNodeSize); + } + + + using fixed_pool_base::can_allocate; + + + const char* get_name() const + { + return mOverflowAllocator.get_name(); + } + + + void set_name(const char* pName) + { + mOverflowAllocator.set_name(pName); + } + + protected: + EASTLAllocatorType mOverflowAllocator; // To consider: Allow the user to define the type of this, presumably via a template parameter. + void* mpPoolBegin; // To consider: We have these member variables and ideally we shouldn't need them. The problem is that + void* mpPoolEnd; // the information about the pool buffer and object size is stored in the owning container + eastl_size_t mnNodeSize; // and we can't have access to it without increasing the amount of code we need and by templating + // more code. It may turn out that simply storing data here is smaller in the end. + }; // fixed_allocator_with_overflow // Granted, this class is usually used for debugging purposes, but perhaps there is an elegant solution. + + bool operator==(const fixed_allocator_with_overflow& a, const fixed_allocator_with_overflow& b); + bool operator!=(const fixed_allocator_with_overflow& a, const fixed_allocator_with_overflow& b); + + + + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + inline bool operator==(const fixed_allocator&, const fixed_allocator&) + { + return false; + } + + inline bool operator!=(const fixed_allocator&, const fixed_allocator&) + { + return false; + } + + inline bool operator==(const fixed_allocator_with_overflow&, const fixed_allocator_with_overflow&) + { + return false; + } + + inline bool operator!=(const fixed_allocator_with_overflow&, const fixed_allocator_with_overflow&) + { + return false; + } + + +} // namespace eastl + + +#if defined(_MSC_VER) + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + diff --git a/include/EASTL/fixed_function.h b/include/EASTL/fixed_function.h new file mode 100644 index 0000000..6aed768 --- /dev/null +++ b/include/EASTL/fixed_function.h @@ -0,0 +1,218 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FIXED_FUNCTION_H +#define EASTL_FIXED_FUNCTION_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include + +namespace eastl +{ + template + class fixed_function; + + namespace internal + { + template + struct is_fixed_function + : public eastl::false_type {}; + + template + struct is_fixed_function> + : public eastl::true_type {}; + + template + EA_CONSTEXPR bool is_fixed_function_v = is_fixed_function::value; + } + + #define EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(TYPE) \ + static_assert(sizeof(TYPE) <= sizeof(typename Base::FunctorStorageType), \ + "fixed_function local buffer is not large enough to hold the callable object.") + + #define EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES) \ + static_assert(SIZE_IN_BYTES >= NEW_SIZE_IN_BYTES, \ + "fixed_function local buffer is not large enough to hold the new fixed_function type.") + + template + using EASTL_DISABLE_OVERLOAD_IF_FIXED_FUNCTION = + eastl::disable_if_t>>; + + + // fixed_function + // + template + class fixed_function : public internal::function_detail + { + using Base = internal::function_detail; + + public: + using typename Base::result_type; + + fixed_function() EA_NOEXCEPT = default; + fixed_function(std::nullptr_t p) EA_NOEXCEPT + : Base(p) + { + } + + fixed_function(const fixed_function& other) + : Base(other) + { + } + + fixed_function(fixed_function&& other) + : Base(eastl::move(other)) + { + } + + template > + fixed_function(Functor functor) + : Base(eastl::move(functor)) + { + EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(Functor); + } + + template + fixed_function(const fixed_function& other) + : Base(other) + { + EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES); + } + + template + fixed_function(fixed_function&& other) + : Base(eastl::move(other)) + { + EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES); + } + + ~fixed_function() EA_NOEXCEPT = default; + + fixed_function& operator=(const fixed_function& other) + { + Base::operator=(other); + return *this; + } + + fixed_function& operator=(fixed_function&& other) + { + Base::operator=(eastl::move(other)); + return *this; + } + + fixed_function& operator=(std::nullptr_t p) EA_NOEXCEPT + { + Base::operator=(p); + return *this; + } + + template + fixed_function& operator=(const fixed_function& other) + { + EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES); + + Base::operator=(other); + return *this; + } + + template + fixed_function& operator=(fixed_function&& other) + { + EASTL_INTERNAL_FIXED_FUNCTION_NEW_SIZE_STATIC_ASSERT(NEW_SIZE_IN_BYTES); + + Base::operator=(eastl::move(other)); + return *this; + } + + template > + fixed_function& operator=(Functor&& functor) + { + EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(eastl::decay_t); + Base::operator=(eastl::forward(functor)); + return *this; + } + + template + fixed_function& operator=(eastl::reference_wrapper f) EA_NOEXCEPT + { + EASTL_INTERNAL_FIXED_FUNCTION_STATIC_ASSERT(eastl::reference_wrapper); + Base::operator=(f); + return *this; + } + + void swap(fixed_function& other) EA_NOEXCEPT + { + Base::swap(other); + } + + explicit operator bool() const EA_NOEXCEPT + { + return Base::operator bool(); + } + + R operator ()(Args... args) const + { + return Base::operator ()(eastl::forward(args)...); + } + + #if EASTL_RTTI_ENABLED + const std::type_info& target_type() const EA_NOEXCEPT + { + return Base::target_type(); + } + + template + Functor* target() EA_NOEXCEPT + { + return Base::target(); + } + + template + const Functor* target() const EA_NOEXCEPT + { + return Base::target(); + } + #endif + }; + + template + bool operator==(const fixed_function& f, std::nullptr_t) EA_NOEXCEPT + { + return !f; + } + + template + bool operator==(std::nullptr_t, const fixed_function& f) EA_NOEXCEPT + { + return !f; + } + + template + bool operator!=(const fixed_function& f, std::nullptr_t) EA_NOEXCEPT + { + return !!f; + } + + template + bool operator!=(std::nullptr_t, const fixed_function& f) EA_NOEXCEPT + { + return !!f; + } + + template + void swap(fixed_function& lhs, fixed_function& rhs) + { + lhs.swap(rhs); + } + +} // namespace eastl + +#endif // EASTL_FIXED_FUNCTION_H diff --git a/include/EASTL/fixed_hash_map.h b/include/EASTL/fixed_hash_map.h new file mode 100644 index 0000000..af6663d --- /dev/null +++ b/include/EASTL/fixed_hash_map.h @@ -0,0 +1,822 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a hash_map and hash_multimap which use a fixed size +// memory pool for its buckets and nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_HASH_MAP_H +#define EASTL_FIXED_HASH_MAP_H + + +#include +#include + +EA_DISABLE_VC_WARNING(4127) // Conditional expression is constant + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +namespace eastl +{ + /// EASTL_FIXED_HASH_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_HASH_MAP_DEFAULT_NAME + #define EASTL_FIXED_HASH_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_map" // Unless the user overrides something, this is "EASTL fixed_hash_map". + #endif + + #ifndef EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME + #define EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_multimap" // Unless the user overrides something, this is "EASTL fixed_hash_multimap". + #endif + + + /// EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR + /// EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR + #define EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MAP_DEFAULT_NAME) + #endif + + #ifndef EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR + #define EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME) + #endif + + + + /// fixed_hash_map + /// + /// Implements a hash_map with a fixed block of memory identified by the nodeCount and bucketCount + /// template parameters. + /// + /// Template parameters: + /// Key The key type for the map. This is a map of Key to T (value). + /// T The value type for the map. + /// nodeCount The max number of objects to contain. This value must be >= 1. + /// bucketCount The number of buckets to use. This value must be >= 2. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Hash hash_set hash function. See hash_set. + /// Predicate hash_set equality testing function. See hash_set. + /// + template , typename Predicate = eastl::equal_to, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType> + class fixed_hash_map : public hash_map::node_type), + nodeCount, + EASTL_ALIGN_OF(eastl::pair), + 0, + bEnableOverflow, + OverflowAllocator>, + bCacheHashCode> + { + public: + typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(eastl::pair), 0, + bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef hash_map base_type; + typedef fixed_hash_map this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::mAllocator; + using base_type::clear; + + protected: + node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket. + char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + public: + explicit fixed_hash_map(const overflow_allocator_type& overflowAllocator); + + explicit fixed_hash_map(const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_map(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator); + + template + fixed_hash_map(InputIterator first, InputIterator last, + const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_map(const this_type& x); + fixed_hash_map(this_type&& x); + fixed_hash_map(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_hash_map(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MAP_DEFAULT_ALLOCATOR); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + + void clear(bool clearBuckets); + }; // fixed_hash_map + + + + + + /// fixed_hash_multimap + /// + /// Implements a hash_multimap with a fixed block of memory identified by the nodeCount and bucketCount + /// template parameters. + /// + /// Template parameters: + /// Key The key type for the map. This is a map of Key to T (value). + /// T The value type for the map. + /// nodeCount The max number of objects to contain. This value must be >= 1. + /// bucketCount The number of buckets to use. This value must be >= 2. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Hash hash_set hash function. See hash_set. + /// Predicate hash_set equality testing function. See hash_set. + /// + template , typename Predicate = eastl::equal_to, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType> + class fixed_hash_multimap : public hash_multimap::node_type), + nodeCount, + EASTL_ALIGN_OF(eastl::pair), + 0, + bEnableOverflow, + OverflowAllocator>, + bCacheHashCode> + { + public: + typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(eastl::pair), 0, + bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef hash_multimap base_type; + typedef fixed_hash_multimap this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::mAllocator; + using base_type::clear; + + protected: + node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket. + char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + public: + explicit fixed_hash_multimap(const overflow_allocator_type& overflowAllocator); + + explicit fixed_hash_multimap(const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_multimap(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator); + + template + fixed_hash_multimap(InputIterator first, InputIterator last, + const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_multimap(const this_type& x); + fixed_hash_multimap(this_type&& x); + fixed_hash_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_hash_multimap(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MULTIMAP_DEFAULT_ALLOCATOR); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + + void clear(bool clearBuckets); + }; // fixed_hash_multimap + + + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_hash_map + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_hash_map:: + fixed_hash_map(const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + template + fixed_hash_map:: + fixed_hash_map(InputIterator first, InputIterator last, + const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(first, last); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(const this_type& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(this_type&& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_map:: + fixed_hash_map(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(ilist.begin(), ilist.end()); + } + + + template + inline typename fixed_hash_map::this_type& + fixed_hash_map::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_map::this_type& + fixed_hash_map::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_map::this_type& + fixed_hash_map::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_hash_map:: + swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_hash_map:: + reset_lose_memory() + { + base_type::mnBucketCount = (size_type)base_type::mRehashPolicy.GetPrevBucketCount((uint32_t)bucketCount); + base_type::mnElementCount = 0; + base_type::mRehashPolicy.mnNextResize = 0; + base_type::get_allocator().reset(mNodeBuffer); + } + + + template + inline typename fixed_hash_map::size_type + fixed_hash_map::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_hash_map::overflow_allocator_type& + fixed_hash_map::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_hash_map::overflow_allocator_type& + fixed_hash_map::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_hash_map:: + set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + template + inline void fixed_hash_map:: + clear(bool clearBuckets) + { + base_type::DoFreeNodes(base_type::mpBucketArray, base_type::mnBucketCount); + if(clearBuckets) + { + base_type::DoFreeBuckets(base_type::mpBucketArray, base_type::mnBucketCount); + reset_lose_memory(); + } + base_type::mpBucketArray = (node_type**)mBucketBuffer; + base_type::mnElementCount = 0; + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_hash_map& a, + fixed_hash_map& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_hash_multimap + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + template + fixed_hash_multimap:: + fixed_hash_multimap(InputIterator first, InputIterator last, + const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(first, last); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(const this_type& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(),fixed_allocator_type(NULL, mBucketBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(this_type&& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(),fixed_allocator_type(NULL, mBucketBuffer)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multimap:: + fixed_hash_multimap(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(ilist.begin(), ilist.end()); + } + + + template + inline typename fixed_hash_multimap::this_type& + fixed_hash_multimap::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_multimap::this_type& + fixed_hash_multimap::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_multimap::this_type& + fixed_hash_multimap::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_hash_multimap:: + swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_hash_multimap:: + reset_lose_memory() + { + base_type::mnBucketCount = (size_type)base_type::mRehashPolicy.GetPrevBucketCount((uint32_t)bucketCount); + base_type::mnElementCount = 0; + base_type::mRehashPolicy.mnNextResize = 0; + base_type::get_allocator().reset(mNodeBuffer); + } + + + template + inline typename fixed_hash_multimap::size_type + fixed_hash_multimap::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_hash_multimap::overflow_allocator_type& + fixed_hash_multimap::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_hash_multimap::overflow_allocator_type& + fixed_hash_multimap::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_hash_multimap::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + template + inline void fixed_hash_multimap:: + clear(bool clearBuckets) + { + base_type::DoFreeNodes(base_type::mpBucketArray, base_type::mnBucketCount); + if(clearBuckets) + { + base_type::DoFreeBuckets(base_type::mpBucketArray, base_type::mnBucketCount); + reset_lose_memory(); + } + base_type::mpBucketArray = (node_type**)mBucketBuffer; + base_type::mnElementCount = 0; + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_hash_multimap& a, + fixed_hash_multimap& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + +} // namespace eastl + +EA_RESTORE_VC_WARNING() + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/fixed_hash_set.h b/include/EASTL/fixed_hash_set.h new file mode 100644 index 0000000..0db9f49 --- /dev/null +++ b/include/EASTL/fixed_hash_set.h @@ -0,0 +1,782 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a hash_set which uses a fixed size memory pool for +// its buckets and nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_HASH_SET_H +#define EASTL_FIXED_HASH_SET_H + + +#include +#include + +EA_DISABLE_VC_WARNING(4127) // Conditional expression is constant + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_HASH_SET_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_HASH_SET_DEFAULT_NAME + #define EASTL_FIXED_HASH_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_set" // Unless the user overrides something, this is "EASTL fixed_hash_set". + #endif + + #ifndef EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME + #define EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_multiset" // Unless the user overrides something, this is "EASTL fixed_hash_multiset". + #endif + + + /// EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR + /// EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR + #define EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_SET_DEFAULT_NAME) + #endif + + #ifndef EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR + #define EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME) + #endif + + + + /// fixed_hash_set + /// + /// Implements a hash_set with a fixed block of memory identified by the nodeCount and bucketCount + /// template parameters. + /// + /// Template parameters: + /// Value The type of object the hash_set holds. + /// nodeCount The max number of objects to contain. This value must be >= 1. + /// bucketCount The number of buckets to use. This value must be >= 2. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Hash hash_set hash function. See hash_set. + /// Predicate hash_set equality testing function. See hash_set. + /// + template , typename Predicate = eastl::equal_to, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType> + class fixed_hash_set : public hash_set::node_type), + nodeCount, + EASTL_ALIGN_OF(Value), + 0, + bEnableOverflow, + OverflowAllocator>, + bCacheHashCode> + { + public: + typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(Value), 0, + bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef fixed_hash_set this_type; + typedef hash_set base_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::mAllocator; + + protected: + node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket. + char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + public: + explicit fixed_hash_set(const overflow_allocator_type& overflowAllocator); + + explicit fixed_hash_set(const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_set(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator); + + template + fixed_hash_set(InputIterator first, InputIterator last, + const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_set(const this_type& x); + fixed_hash_set(this_type&& x); + fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator); + + fixed_hash_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_hash_set + + + + + + + /// fixed_hash_multiset + /// + /// Implements a hash_multiset with a fixed block of memory identified by the nodeCount and bucketCount + /// template parameters. + /// + /// Value The type of object the hash_set holds. + /// nodeCount The max number of objects to contain. This value must be >= 1. + /// bucketCount The number of buckets to use. This value must be >= 2. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Hash hash_set hash function. See hash_set. + /// Predicate hash_set equality testing function. See hash_set. + /// + template , typename Predicate = eastl::equal_to, bool bCacheHashCode = false, typename OverflowAllocator = EASTLAllocatorType> + class fixed_hash_multiset : public hash_multiset::node_type), + nodeCount, + EASTL_ALIGN_OF(Value), + 0, + bEnableOverflow, + OverflowAllocator>, + bCacheHashCode> + { + public: + typedef fixed_hashtable_allocator::node_type), nodeCount, EASTL_ALIGN_OF(Value), 0, + bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef hash_multiset base_type; + typedef fixed_hash_multiset this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::mAllocator; + + protected: + node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket. + char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + public: + explicit fixed_hash_multiset(const overflow_allocator_type& overflowAllocator); + + explicit fixed_hash_multiset(const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_multiset(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator); + + template + fixed_hash_multiset(InputIterator first, InputIterator last, + const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate()); + + fixed_hash_multiset(const this_type& x); + fixed_hash_multiset(this_type&& x); + fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_hash_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_hash_multiset + + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_hash_set + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_hash_set:: + fixed_hash_set(const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + Hash(), Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_set:: + fixed_hash_set(const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + hashFunction, predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_set:: + fixed_hash_set(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + hashFunction, predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + template + fixed_hash_set:: + fixed_hash_set(InputIterator first, InputIterator last, + const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(first, last); + } + + + template + inline fixed_hash_set:: + fixed_hash_set(const this_type& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_set::fixed_hash_set(this_type&& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_set::fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + x.hash_function(), x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_set:: + fixed_hash_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(ilist.begin(), ilist.end()); + } + + + template + typename fixed_hash_set::this_type& + fixed_hash_set::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_set::this_type& + fixed_hash_set::operator=(this_type&& x) + { + operator=(x); + return *this; + } + + + template + inline typename fixed_hash_set::this_type& + fixed_hash_set::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_hash_set:: + swap(this_type& x) + { + // We must do a brute-force swap, because fixed containers cannot share memory allocations. + // Note that we create a temp value on the stack. This approach may fail if the size of the + // container is too large. We have a rule against allocating memory from the heap, and so + // if the user wants to swap two large objects of this class, the user will currently need + // to implement it manually. To consider: add code to allocate a temporary buffer if the + // size of the container is too large for the stack. + EASTL_ASSERT(sizeof(x) < EASTL_MAX_STACK_USAGE); // It is dangerous to try to create objects that are too big for the stack. + + const this_type temp(*this); // Can't call eastl::swap because that would + *this = x; // itself call this member swap function. + x = temp; + } + + + template + void fixed_hash_set:: + reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mNodeBuffer); + } + + + template + inline typename fixed_hash_set::size_type + fixed_hash_set::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_hash_set::overflow_allocator_type& + fixed_hash_set::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_hash_set::overflow_allocator_type& + fixed_hash_set::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_hash_set:: + set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_hash_set& a, + fixed_hash_set& b) + { + a.swap(b); + } + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_hash_multiset + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(const Hash& hashFunction, + const Predicate& predicate, + const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + } + + + template + template + inline fixed_hash_multiset:: + fixed_hash_multiset(InputIterator first, InputIterator last, + const Hash& hashFunction, + const Predicate& predicate) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction, + predicate, fixed_allocator_type(NULL, mBucketBuffer)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(first, last); + } + + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(const this_type& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multiset::fixed_hash_multiset(this_type&& x) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(), + x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multiset::fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), + x.hash_function(), x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here. + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + mAllocator.reset(mNodeBuffer); + base_type::insert(x.begin(), x.end()); + } + + + template + inline fixed_hash_multiset:: + fixed_hash_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), Hash(), + Predicate(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator)) + { + EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2)); + + if(!bEnableOverflow) + base_type::set_max_load_factor(10000.f); // Set it so that we will never resize. + + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME); + #endif + + mAllocator.reset(mNodeBuffer); + base_type::insert(ilist.begin(), ilist.end()); + } + + + template + inline typename fixed_hash_multiset::this_type& + fixed_hash_multiset::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_multiset::this_type& + fixed_hash_multiset::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_hash_multiset::this_type& + fixed_hash_multiset::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_hash_multiset:: + swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_hash_multiset:: + reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mNodeBuffer); + } + + + template + inline typename fixed_hash_multiset::size_type + fixed_hash_multiset::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_hash_multiset::overflow_allocator_type& + fixed_hash_multiset::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_hash_multiset::overflow_allocator_type& + fixed_hash_multiset::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_hash_multiset:: + set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_hash_multiset& a, + fixed_hash_multiset& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + +} // namespace eastl + +EA_RESTORE_VC_WARNING() + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/fixed_list.h b/include/EASTL/fixed_list.h new file mode 100644 index 0000000..9e48089 --- /dev/null +++ b/include/EASTL/fixed_list.h @@ -0,0 +1,388 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a list which uses a fixed size memory pool for its nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_LIST_H +#define EASTL_FIXED_LIST_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_LIST_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_LIST_DEFAULT_NAME + #define EASTL_FIXED_LIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_list" // Unless the user overrides something, this is "EASTL fixed_list". + #endif + + + /// EASTL_FIXED_LIST_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_LIST_DEFAULT_ALLOCATOR + #define EASTL_FIXED_LIST_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_LIST_DEFAULT_NAME) + #endif + + + + /// fixed_list + /// + /// fixed_list is a list which uses a single block of contiguous memory + /// for its nodes. The purpose of this is to reduce memory usage relative + /// to a conventional memory allocation system (with block headers), to + /// increase allocation speed (often due to avoidance of mutex locks), + /// to increase performance (due to better memory locality), and to decrease + /// memory fragmentation due to the way that fixed block allocators work. + /// + /// The primary downside to a fixed_list is that the number of nodes it + /// can contain is fixed upon its declaration. If you want a fixed_list + /// that doesn't have this limitation, then you probably don't want a + /// fixed_list. You can always create your own memory allocator that works + /// the way you want. + /// + /// Template parameters: + /// T The type of object the list holds. + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_list : public list::node_type), + nodeCount, EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef OverflowAllocator overflow_allocator_type; + typedef list base_type; + typedef fixed_list this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::iterator iterator; + + enum { kMaxSize = nodeCount }; + + using base_type::assign; + using base_type::resize; + using base_type::insert; + using base_type::size; + using base_type::get_allocator; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::internalAllocator; + + public: + fixed_list(); + explicit fixed_list(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true. + explicit fixed_list(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity. + fixed_list(size_type n, const value_type& value); + fixed_list(const this_type& x); + fixed_list(this_type&& x); + fixed_list(this_type&&, const overflow_allocator_type& overflowAllocator); + fixed_list(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_LIST_DEFAULT_ALLOCATOR); + + template + fixed_list(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter. + bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot. + bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled. + bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter. + + // OverflowAllocator + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_list + + + + /////////////////////////////////////////////////////////////////////// + // fixed_list + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_list::fixed_list() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + } + + + template + inline fixed_list::fixed_list(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + } + + + template + inline fixed_list::fixed_list(size_type n) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + + resize(n); + } + + + template + inline fixed_list::fixed_list(size_type n, const value_type& value) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + + resize(n, value); + } + + + template + inline fixed_list::fixed_list(const this_type& x) + : base_type(fixed_allocator_type(mBuffer)) + { + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_list::fixed_list(this_type&& x) + : base_type(fixed_allocator_type(mBuffer)) + { + // Since we are a fixed_list, we can't normally swap pointers unless both this and + // x are using using overflow and the overflow allocators are equal. To do: + //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator())) + //{ + // We can swap contents and may need to swap the allocators as well. + //} + + // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that + // way then we may want to make a shared implementation. + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_list::fixed_list(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + // See comments above. + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_list::fixed_list(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + assign(ilist.begin(), ilist.end()); + } + + + template + template + fixed_list::fixed_list(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_LIST_DEFAULT_NAME); + #endif + + assign(first, last); + } + + + template + inline typename fixed_list::this_type& + fixed_list::operator=(const this_type& x) + { + if(this != &x) + { + base_type::clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + internalAllocator() = x.internalAllocator(); // The primary effect of this is to copy the overflow allocator. + #endif + + base_type::assign(x.begin(), x.end()); // It would probably be better to implement this like list::operator=. + } + return *this; + } + + + template + inline typename fixed_list::this_type& + fixed_list::operator=(this_type&& x) + { + return operator=(x); + } + + + template + inline typename fixed_list::this_type& + fixed_list::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::assign(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_list::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_list::reset_lose_memory() + { + base_type::reset_lose_memory(); + get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_list::size_type + fixed_list::max_size() const + { + return kMaxSize; + } + + + template + inline bool fixed_list::full() const + { + // Note: This implementation isn't right in the case of bEnableOverflow = true because it will return + // false for the case that there are free nodes from the buffer but also nodes from the dynamic heap. + // This can happen if the container exceeds the fixed size and then frees some of the nodes from the fixed buffer. + // The only simple fix for this is to take on another member variable which tracks whether this overflow + // has occurred at some point in the past. + return !internalAllocator().can_allocate(); // This is the quickest way of detecting this. has_overflowed uses a different method because it can't use this quick method. + } + + + template + inline bool fixed_list::has_overflowed() const + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED // If we can use this faster pathway (as size() may be slow)... + return (internalAllocator().mPool.mnPeakSize > kMaxSize); + #else + return (size() > kMaxSize); + #endif + } + + + template + inline bool fixed_list::can_overflow() const + { + return bEnableOverflow; + } + + + template + inline const typename fixed_list::overflow_allocator_type& + fixed_list::get_overflow_allocator() const EA_NOEXCEPT + { + return internalAllocator().get_overflow_allocator(); + } + + + template + inline typename fixed_list::overflow_allocator_type& + fixed_list::get_overflow_allocator() EA_NOEXCEPT + { + return internalAllocator().get_overflow_allocator(); + } + + + template + inline void + fixed_list::set_overflow_allocator(const overflow_allocator_type& allocator) + { + internalAllocator().set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_list& a, + fixed_list& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/fixed_map.h b/include/EASTL/fixed_map.h new file mode 100644 index 0000000..c01db08 --- /dev/null +++ b/include/EASTL/fixed_map.h @@ -0,0 +1,580 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a map and multimap which use a fixed size memory +// pool for their nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_MAP_H +#define EASTL_FIXED_MAP_H + + +#include +#include // Included because fixed_rbtree_base resides here. + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_MAP_DEFAULT_NAME + #define EASTL_FIXED_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_map" // Unless the user overrides something, this is "EASTL fixed_map". + #endif + + #ifndef EASTL_FIXED_MULTIMAP_DEFAULT_NAME + #define EASTL_FIXED_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_multimap" // Unless the user overrides something, this is "EASTL fixed_multimap". + #endif + + + /// EASTL_FIXED_MAP_DEFAULT_ALLOCATOR + /// EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_MAP_DEFAULT_ALLOCATOR + #define EASTL_FIXED_MAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MAP_DEFAULT_NAME) + #endif + + #ifndef EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR + #define EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MULTIMAP_DEFAULT_NAME) + #endif + + + + /// fixed_map + /// + /// Implements a map with a fixed block of memory identified by the + /// nodeCount template parameter. + /// + /// Key The key object (key in the key/value pair). + /// T The mapped object (value in the key/value pair). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Compare Compare function/object for set ordering. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template , typename OverflowAllocator = EASTLAllocatorType> + class fixed_map : public map::node_type), + nodeCount, EASTL_ALIGN_OF(eastl::pair), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(eastl::pair), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef fixed_map this_type; + typedef map base_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::insert; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::mAllocator; + + public: + fixed_map(); + explicit fixed_map(const overflow_allocator_type& overflowAllocator); + explicit fixed_map(const Compare& compare); + fixed_map(const this_type& x); + fixed_map(this_type&& x); + fixed_map(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_map(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MAP_DEFAULT_ALLOCATOR); + + template + fixed_map(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_map + + + + + /// fixed_multimap + /// + /// Implements a multimap with a fixed block of memory identified by the + /// nodeCount template parameter. + /// + /// Key The key object (key in the key/value pair). + /// T The mapped object (value in the key/value pair). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Compare Compare function/object for set ordering. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template , typename OverflowAllocator = EASTLAllocatorType> + class fixed_multimap : public multimap::node_type), + nodeCount, EASTL_ALIGN_OF(eastl::pair), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(eastl::pair), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef multimap base_type; + typedef fixed_multimap this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::insert; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::mAllocator; + using base_type::get_compare; + + public: + fixed_multimap(); + fixed_multimap(const overflow_allocator_type& overflowAllocator); + explicit fixed_multimap(const Compare& compare); + fixed_multimap(const this_type& x); + fixed_multimap(this_type&& x); + fixed_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_multimap(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MULTIMAP_DEFAULT_ALLOCATOR); + + template + fixed_multimap(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_multimap + + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_map + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_map::fixed_map() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_map::fixed_map(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_map::fixed_map(const Compare& compare) + : base_type(compare, fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_map::fixed_map(const this_type& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_map::fixed_map(this_type&& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_map::fixed_map(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + fixed_map::fixed_map(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + + insert(ilist.begin(), ilist.end()); + } + + + template + template + fixed_map::fixed_map(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME); + #endif + + insert(first, last); + } + + + template + inline typename fixed_map::this_type& + fixed_map::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_map::this_type& + fixed_map::operator=(std::initializer_list ilist) + { + base_type::clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_map::this_type& + fixed_map::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline void fixed_map::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_map::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_map::size_type + fixed_map::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_map::overflow_allocator_type& + fixed_map::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_map::overflow_allocator_type& + fixed_map::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void + fixed_map::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_map& a, + fixed_map& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_multimap + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_multimap::fixed_multimap() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multimap::fixed_multimap(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multimap::fixed_multimap(const Compare& compare) + : base_type(compare, fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multimap::fixed_multimap(const this_type& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_multimap::fixed_multimap(this_type&& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_multimap::fixed_multimap(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + fixed_multimap::fixed_multimap(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + + insert(ilist.begin(), ilist.end()); + } + + + template + template + fixed_multimap:: + fixed_multimap(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME); + #endif + + insert(first, last); + } + + + template + inline typename fixed_multimap::this_type& + fixed_multimap::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_multimap::this_type& + fixed_multimap::operator=(std::initializer_list ilist) + { + base_type::clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_multimap::this_type& + fixed_multimap::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline void fixed_multimap::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_multimap::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_multimap::size_type + fixed_multimap::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_multimap::overflow_allocator_type& + fixed_multimap::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_multimap::overflow_allocator_type& + fixed_multimap::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void + fixed_multimap::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_multimap& a, + fixed_multimap& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + diff --git a/include/EASTL/fixed_set.h b/include/EASTL/fixed_set.h new file mode 100644 index 0000000..e5f0023 --- /dev/null +++ b/include/EASTL/fixed_set.h @@ -0,0 +1,578 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a set and multiset which use a fixed size memory +// pool for their nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_SET_H +#define EASTL_FIXED_SET_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_SET_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_SET_DEFAULT_NAME + #define EASTL_FIXED_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_set" // Unless the user overrides something, this is "EASTL fixed_set". + #endif + + #ifndef EASTL_FIXED_MULTISET_DEFAULT_NAME + #define EASTL_FIXED_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_multiset" // Unless the user overrides something, this is "EASTL fixed_multiset". + #endif + + + /// EASTL_FIXED_SET_DEFAULT_ALLOCATOR + /// EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_SET_DEFAULT_ALLOCATOR + #define EASTL_FIXED_SET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_SET_DEFAULT_NAME) + #endif + + #ifndef EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR + #define EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_MULTISET_DEFAULT_NAME) + #endif + + + + /// fixed_set + /// + /// Implements a set with a fixed block of memory identified by the + /// nodeCount template parameter. + /// + /// Template parameters: + /// Key The type of object the set holds (a.k.a. value). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Compare Compare function/object for set ordering. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template , typename OverflowAllocator = EASTLAllocatorType> + class fixed_set : public set::node_type), + nodeCount, EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef set base_type; + typedef fixed_set this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::insert; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::mAllocator; + using base_type::get_compare; + + public: + fixed_set(); + fixed_set(const overflow_allocator_type& overflowAllocator); + explicit fixed_set(const Compare& compare); + fixed_set(const this_type& x); + fixed_set(this_type&& x); + fixed_set(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_SET_DEFAULT_ALLOCATOR); + + template + fixed_set(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_set + + + + + + + /// fixed_multiset + /// + /// Implements a multiset with a fixed block of memory identified by the + /// nodeCount template parameter. + /// + /// Key The type of object the set holds (a.k.a. value). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted. + /// Compare Compare function/object for set ordering. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template , typename OverflowAllocator = EASTLAllocatorType> + class fixed_multiset : public multiset::node_type), + nodeCount, EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(Key), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef multiset base_type; + typedef fixed_multiset this_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::size_type size_type; + + enum { kMaxSize = nodeCount }; + + using base_type::insert; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::mAllocator; + + public: + fixed_multiset(); + fixed_multiset(const overflow_allocator_type& overflowAllocator); + explicit fixed_multiset(const Compare& compare); + fixed_multiset(const this_type& x); + fixed_multiset(this_type&& x); + fixed_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_MULTISET_DEFAULT_ALLOCATOR); + + template + fixed_multiset(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + size_type max_size() const; + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_multiset + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_set + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_set::fixed_set() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_set::fixed_set(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_set::fixed_set(const Compare& compare) + : base_type(compare, fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_set::fixed_set(const this_type& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_set::fixed_set(this_type&& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_set::fixed_set(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + fixed_set::fixed_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + + insert(ilist.begin(), ilist.end()); + } + + + template + template + fixed_set::fixed_set(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME); + #endif + + insert(first, last); + } + + + template + inline typename fixed_set::this_type& + fixed_set::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_set::this_type& + fixed_set::operator=(std::initializer_list ilist) + { + base_type::clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_set::this_type& + fixed_set::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline void fixed_set::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_set::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_set::size_type + fixed_set::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_set::overflow_allocator_type& + fixed_set::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_set::overflow_allocator_type& + fixed_set::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_set::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_set& a, + fixed_set& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + + /////////////////////////////////////////////////////////////////////// + // fixed_multiset + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_multiset::fixed_multiset() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multiset::fixed_multiset(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multiset::fixed_multiset(const Compare& compare) + : base_type(compare, fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + } + + + template + inline fixed_multiset::fixed_multiset(const this_type& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_multiset::fixed_multiset(this_type&& x) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + inline fixed_multiset::fixed_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(x.get_compare(), fixed_allocator_type(mBuffer, overflowAllocator)) + { + mAllocator.copy_overflow_allocator(x.mAllocator); + + #if EASTL_NAME_ENABLED + mAllocator.set_name(x.mAllocator.get_name()); + #endif + + base_type::operator=(x); + } + + + template + fixed_multiset::fixed_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + + insert(ilist.begin(), ilist.end()); + } + + + template + template + fixed_multiset::fixed_multiset(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME); + #endif + + insert(first, last); + } + + + template + inline typename fixed_multiset::this_type& + fixed_multiset::operator=(const this_type& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline typename fixed_multiset::this_type& + fixed_multiset::operator=(std::initializer_list ilist) + { + base_type::clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_multiset::this_type& + fixed_multiset::operator=(this_type&& x) + { + base_type::operator=(x); + return *this; + } + + + template + inline void fixed_multiset::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_multiset::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_multiset::size_type + fixed_multiset::max_size() const + { + return kMaxSize; + } + + + template + inline const typename fixed_multiset::overflow_allocator_type& + fixed_multiset::get_overflow_allocator() const EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline typename fixed_multiset::overflow_allocator_type& + fixed_multiset::get_overflow_allocator() EA_NOEXCEPT + { + return mAllocator.get_overflow_allocator(); + } + + + template + inline void fixed_multiset::set_overflow_allocator(const overflow_allocator_type& allocator) + { + mAllocator.set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_multiset& a, + fixed_multiset& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + diff --git a/include/EASTL/fixed_slist.h b/include/EASTL/fixed_slist.h new file mode 100644 index 0000000..85a7a7b --- /dev/null +++ b/include/EASTL/fixed_slist.h @@ -0,0 +1,389 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements an slist which uses a fixed size memory pool for its nodes. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_SLIST_H +#define EASTL_FIXED_SLIST_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_SLIST_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_SLIST_DEFAULT_NAME + #define EASTL_FIXED_SLIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_slist" // Unless the user overrides something, this is "EASTL fixed_slist". + #endif + + + /// EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR + #define EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_SLIST_DEFAULT_NAME) + #endif + + + + /// fixed_slist + /// + /// fixed_slist is an slist which uses a single block of contiguous memory + /// for its nodes. The purpose of this is to reduce memory usage relative + /// to a conventional memory allocation system (with block headers), to + /// increase allocation speed (often due to avoidance of mutex locks), + /// to increase performance (due to better memory locality), and to decrease + /// memory fragmentation due to the way that fixed block allocators work. + /// + /// The primary downside to a fixed_slist is that the number of nodes it + /// can contain is fixed upon its declaration. If you want a fixed_slist + /// that doesn't have this limitation, then you probably don't want a + /// fixed_slist. You can always create your own memory allocator that works + /// the way you want. + /// + /// Template parameters: + /// T The type of object the slist holds. + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_slist : public slist::node_type), + nodeCount, EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> > + { + public: + typedef fixed_node_allocator::node_type), nodeCount, + EASTL_ALIGN_OF(T), 0, bEnableOverflow, OverflowAllocator> fixed_allocator_type; + typedef OverflowAllocator overflow_allocator_type; + typedef slist base_type; + typedef fixed_slist this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + + enum { kMaxSize = nodeCount }; + + using base_type::assign; + using base_type::resize; + using base_type::size; + + protected: + char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements. + + using base_type::internalAllocator; + + public: + fixed_slist(); + explicit fixed_slist(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true. + explicit fixed_slist(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity. + fixed_slist(size_type n, const value_type& value); + fixed_slist(const this_type& x); + fixed_slist(this_type&& x); + fixed_slist(this_type&&, const overflow_allocator_type&); + fixed_slist(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_SLIST_DEFAULT_ALLOCATOR); + + template + fixed_slist(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter. + bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot. + bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled. + bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter. + + // OverflowAllocator + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_slist + + + + + /////////////////////////////////////////////////////////////////////// + // slist + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_slist::fixed_slist() + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + } + + + template + inline fixed_slist::fixed_slist(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + } + + + template + inline fixed_slist::fixed_slist(size_type n) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + + resize(n); + } + + + template + inline fixed_slist::fixed_slist(size_type n, const value_type& value) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + + resize(n, value); + } + + + template + inline fixed_slist::fixed_slist(const this_type& x) + : base_type(fixed_allocator_type(mBuffer)) + { + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_slist::fixed_slist(this_type&& x) + : base_type(fixed_allocator_type(mBuffer)) + { + // Since we are a fixed_list, we can't normally swap pointers unless both this and + // x are using using overflow and the overflow allocators are equal. To do: + //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator())) + //{ + // We can swap contents and may need to swap the allocators as well. + //} + + // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that + // way then we may want to make a shared implementation. + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + template + inline fixed_slist::fixed_slist(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + // See comments above. + internalAllocator().copy_overflow_allocator(x.internalAllocator()); + + #if EASTL_NAME_ENABLED + internalAllocator().set_name(x.internalAllocator().get_name()); + #endif + + assign(x.begin(), x.end()); + } + + + template + inline fixed_slist::fixed_slist(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + + assign(ilist.begin(), ilist.end()); + } + + + template + template + fixed_slist::fixed_slist(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer)) + { + #if EASTL_NAME_ENABLED + internalAllocator().set_name(EASTL_FIXED_SLIST_DEFAULT_NAME); + #endif + + assign(first, last); + } + + + template + inline typename fixed_slist::this_type& + fixed_slist::operator=(const this_type& x) + { + if(this != &x) + { + base_type::clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + internalAllocator() = x.internalAllocator(); // The primary effect of this is to copy the overflow allocator. + #endif + + base_type::assign(x.begin(), x.end()); // It would probably be better to implement this like slist::operator=. + } + return *this; + } + + + template + inline typename fixed_slist::this_type& + fixed_slist::operator=(this_type&& x) + { + return operator=(x); + } + + + template + inline typename fixed_slist::this_type& + fixed_slist::operator=(std::initializer_list ilist) + { + base_type::clear(); + base_type::assign(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline void fixed_slist::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_slist::reset_lose_memory() + { + base_type::reset_lose_memory(); + base_type::get_allocator().reset(mBuffer); + } + + + template + inline typename fixed_slist::size_type + fixed_slist::max_size() const + { + return kMaxSize; + } + + + template + inline bool fixed_slist::full() const + { + // Note: This implementation isn't right in the case of bEnableOverflow = true because it will return + // false for the case that there are free nodes from the buffer but also nodes from the dynamic heap. + // This can happen if the container exceeds the fixed size and then frees some of the nodes from the fixed buffer. + return !internalAllocator().can_allocate(); // This is the quickest way of detecting this. has_overflowed uses a different method because it can't use this quick method. + } + + + template + inline bool fixed_slist::has_overflowed() const + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED // If we can use this faster pathway (as size() may be slow)... + return (internalAllocator().mPool.mnPeakSize > kMaxSize); + #else + return (size() > kMaxSize); + #endif + } + + + template + inline bool fixed_slist::can_overflow() const + { + return bEnableOverflow; + } + + + template + inline const typename fixed_slist::overflow_allocator_type& + fixed_slist::get_overflow_allocator() const EA_NOEXCEPT + { + return internalAllocator().get_overflow_allocator(); + } + + + template + inline typename fixed_slist::overflow_allocator_type& + fixed_slist::get_overflow_allocator() EA_NOEXCEPT + { + return internalAllocator().get_overflow_allocator(); + } + + + template + inline void + fixed_slist::set_overflow_allocator(const overflow_allocator_type& allocator) + { + internalAllocator().set_overflow_allocator(allocator); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline void swap(fixed_slist& a, + fixed_slist& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/fixed_string.h b/include/EASTL/fixed_string.h new file mode 100644 index 0000000..f646302 --- /dev/null +++ b/include/EASTL/fixed_string.h @@ -0,0 +1,805 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a string which uses a fixed size memory pool. +// The bEnableOverflow template parameter allows the container to resort to +// heap allocations if the memory pool is exhausted. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_STRING_H +#define EASTL_FIXED_STRING_H + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +namespace eastl +{ + /// EASTL_FIXED_STRING_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_STRING_DEFAULT_NAME + #define EASTL_FIXED_STRING_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_string" // Unless the user overrides something, this is "EASTL fixed_string". + #endif + + + + /// fixed_string + /// + /// A fixed_string with bEnableOverflow == true is identical to a regular + /// string in terms of its behavior. All the expectations of regular string + /// apply to it and no additional expectations come from it. When bEnableOverflow + /// is false, fixed_string behaves like regular string with the exception that + /// its capacity can never increase. All operations you do on such a fixed_string + /// which require a capacity increase will result in undefined behavior or an + /// C++ allocation exception, depending on the configuration of EASTL. + /// + /// Note: The nodeCount value is the amount of characters to allocate, which needs to + /// take into account a terminating zero. Thus if you want to store strings with a strlen + /// of 30, the nodeCount value must be at least 31. + /// + /// Template parameters: + /// T The type of object the string holds (char, wchar_t, char8_t, char16_t, char32_t). + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + /// Notes: + /// The nodeCount value must be at least 2, one for a character and one for a terminating 0. + /// + /// As of this writing, the string class necessarily reallocates when an insert of + /// self is done into self. As a result, the fixed_string class doesn't support + /// inserting self into self unless the bEnableOverflow template parameter is true. + /// + /// Example usage: + /// fixed_string fixedString("hello world"); // Can hold up to a strlen of 128. + /// + /// fixedString = "hola mundo"; + /// fixedString.clear(); + /// fixedString.resize(200); + /// fixedString.sprintf("%f", 1.5f); + /// + template + class fixed_string : public basic_string > + { + public: + typedef fixed_vector_allocator fixed_allocator_type; + typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type; + typedef basic_string base_type; + typedef fixed_string this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::CtorDoNotInitialize CtorDoNotInitialize; + typedef typename base_type::CtorSprintf CtorSprintf; + typedef aligned_buffer aligned_buffer_type; + + enum { kMaxSize = nodeCount - 1 }; // -1 because we need to save one element for the silent terminating null. + + using base_type::npos; + using base_type::mPair; + using base_type::append; + using base_type::resize; + using base_type::clear; + using base_type::capacity; + using base_type::size; + using base_type::sprintf_va_list; + using base_type::DoAllocate; + using base_type::DoFree; + using base_type::internalLayout; + using base_type::get_allocator; + + protected: + union // We define a union in order to avoid strict pointer aliasing issues with compilers like GCC. + { + value_type mArray[1]; + aligned_buffer_type mBuffer; // Question: Why are we doing this aligned_buffer thing? Why not just do an array of value_type, given that we are using just strings of char types. + }; + + public: + fixed_string(); + explicit fixed_string(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true. + fixed_string(const base_type& x, size_type position, size_type n = base_type::npos); // Currently we don't support overflowAllocator specification for other constructors, for simplicity. + fixed_string(const value_type* p, size_type n); + fixed_string(const value_type* p); + fixed_string(size_type n, const value_type& value); + fixed_string(const this_type& x); + fixed_string(const this_type& x, const overflow_allocator_type& overflowAllocator); + fixed_string(const base_type& x); + fixed_string(const value_type* pBegin, const value_type* pEnd); + fixed_string(CtorDoNotInitialize, size_type n); + fixed_string(CtorSprintf, const value_type* pFormat, ...); + fixed_string(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator); + fixed_string(this_type&& x); + fixed_string(this_type&& x, const overflow_allocator_type& overflowAllocator); + + this_type& operator=(const this_type& x); + this_type& operator=(const base_type& x); + this_type& operator=(const value_type* p); + this_type& operator=(const value_type c); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void set_capacity(size_type n); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + size_type max_size() const; + bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot. + bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled. + bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter. + + // The inherited versions of substr/left/right call the basic_string constructor, + // which will call the overflow allocator and fail if bEnableOverflow == false + this_type substr(size_type position, size_type n) const; + this_type left(size_type n) const; + this_type right(size_type n) const; + + // OverflowAllocator + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + }; // fixed_string + + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_string + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_string::fixed_string() + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + } + + + template + inline fixed_string::fixed_string(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + } + + + template + inline fixed_string::fixed_string(const this_type& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); + } + + + template + inline fixed_string::fixed_string(const this_type& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); + } + + + template + inline fixed_string::fixed_string(const base_type& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); + } + + + template + inline fixed_string::fixed_string(const base_type& x, size_type position, size_type n) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x, position, n); + } + + + template + inline fixed_string::fixed_string(const value_type* p, size_type n) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(p, n); + } + + + template + inline fixed_string::fixed_string(const value_type* p) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(p); // There better be enough space to hold the assigned string. + } + + + template + inline fixed_string::fixed_string(size_type n, const value_type& value) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(n, value); // There better be enough space to hold the assigned string. + } + + + template + inline fixed_string::fixed_string(const value_type* pBegin, const value_type* pEnd) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(pBegin, pEnd); + } + + + template + inline fixed_string::fixed_string(CtorDoNotInitialize, size_type n) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + + if(n < nodeCount) + { + internalLayout().SetHeapSize(n); + *internalLayout().HeapEndPtr() = 0; + } + else + { + internalLayout().SetHeapSize(0); + *internalLayout().HeapEndPtr() = 0; + + resize(n); + } + } + + + template + inline fixed_string::fixed_string(CtorSprintf, const value_type* pFormat, ...) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + *internalLayout().HeapBeginPtr() = 0; + + va_list arguments; + va_start(arguments, pFormat); + sprintf_va_list(pFormat, arguments); + va_end(arguments); + } + + + template + inline fixed_string::fixed_string(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_STRING_DEFAULT_NAME); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(ilist.begin(), ilist.end()); + } + + + template + inline fixed_string::fixed_string(this_type&& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers. + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); // Let x destruct its own items. + } + + template + inline fixed_string::fixed_string(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers. + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapCapacity(nodeCount - 1); + internalLayout().SetHeapSize(0); + + *internalLayout().HeapBeginPtr() = 0; + + append(x); // Let x destruct its own items. + } + + + template + inline typename fixed_string::this_type& + fixed_string::operator=(const this_type& x) + { + if(this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); + #endif + + append(x); + } + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(const base_type& x) + { + if(static_cast(this) != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); + #endif + + append(x); + } + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(const value_type* p) + { + if(internalLayout().HeapBeginPtr() != p) + { + clear(); + append(p); + } + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(const value_type c) + { + clear(); + append((size_type)1, c); + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(std::initializer_list ilist) + { + clear(); + append(ilist.begin(), ilist.end()); + return *this; + } + + + template + inline typename fixed_string:: + this_type& fixed_string::operator=(this_type&& x) + { + // We copy from x instead of trade with it. We need to do so because fixed_ containers use local memory buffers. + + // if(static_cast(this) != &x) This should be impossible, so we disable it until proven otherwise. + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); + #endif + + append(x); // Let x destruct its own items. + } + return *this; + } + + + template + inline void fixed_string::swap(this_type& x) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + + + template + inline void fixed_string::set_capacity(size_type n) + { + const size_type nPrevSize = internalLayout().GetSize(); + const size_type nPrevCapacity = capacity(); + + if(n == npos) // If the user means to set the capacity so that it equals the size (i.e. free excess capacity)... + n = nPrevSize; + + if(n != nPrevCapacity) // If the request results in a capacity change... + { + const size_type allocSize = (n + 1); // +1 because the terminating 0 isn't included in the supplied capacity value. So now n refers the amount of memory we need. + + if(can_overflow() && (((uintptr_t)internalLayout().HeapBeginPtr() != (uintptr_t)mBuffer.buffer) || (allocSize > kMaxSize))) // If we are or would be using dynamically allocated memory instead of our fixed-size member buffer... + { + T* const pNewData = (allocSize <= kMaxSize) ? (T*)&mBuffer.buffer[0] : DoAllocate(allocSize); + T* const pCopyEnd = (n < nPrevSize) ? (internalLayout().HeapBeginPtr() + n) : internalLayout().HeapEndPtr(); + CharStringUninitializedCopy(internalLayout().HeapBeginPtr(), pCopyEnd, pNewData); // Copy [internalLayout().heap.mpBegin, pCopyEnd) to pNewData. + if((uintptr_t)internalLayout().HeapBeginPtr() != (uintptr_t)mBuffer.buffer) + DoFree(internalLayout().HeapBeginPtr(), internalLayout().GetHeapCapacity() + 1); + + internalLayout().SetHeapSize((size_type)(pCopyEnd - internalLayout().HeapBeginPtr())); + internalLayout().SetHeapBeginPtr(pNewData); + internalLayout().SetHeapCapacity(allocSize - 1); + } // Else the new capacity would be within our fixed buffer. + else if(n < nPrevSize) // If the newly requested capacity is less than our size, we do what vector::set_capacity does and resize, even though we actually aren't reducing the capacity. + resize(n); + } + } + + + template + inline void fixed_string::reset_lose_memory() + { + internalLayout().SetHeapBeginPtr(mArray); + internalLayout().SetHeapSize(0); + internalLayout().SetHeapCapacity(nodeCount - 1); + } + + + template + inline typename fixed_string:: + size_type fixed_string::max_size() const + { + return kMaxSize; + } + + + template + inline bool fixed_string::full() const + { + // If size >= capacity, then we are definitely full. + // Also, if our size is smaller but we've switched away from mBuffer due to a previous overflow, then we are considered full. + return ((size_t)(internalLayout().HeapEndPtr() - internalLayout().HeapBeginPtr()) >= kMaxSize) || ((void*)internalLayout().HeapBeginPtr() != (void*)mBuffer.buffer); + } + + + template + inline bool fixed_string::has_overflowed() const + { + // This will be incorrect for the case that bOverflowEnabled is true and the container was resized + // down to a small size where the fixed buffer could take over ownership of the data again. + // The only simple fix for this is to take on another member variable which tracks whether this overflow + // has occurred at some point in the past. + return ((void*)internalLayout().HeapBeginPtr() != (void*)mBuffer.buffer); + } + + + template + inline bool fixed_string::can_overflow() const + { + return bEnableOverflow; + } + + + template + inline typename fixed_string:: + this_type fixed_string::substr(size_type position, size_type n) const + { + #if EASTL_STRING_OPT_RANGE_ERRORS + if(position > internalLayout().GetSize()) + base_type::ThrowRangeException(); + #endif + + return fixed_string(internalLayout().HeapBeginPtr() + position, + internalLayout().HeapBeginPtr() + position + eastl::min_alt(n, internalLayout().GetSize() - position)); + } + + + template + inline typename fixed_string:: + this_type fixed_string::left(size_type n) const + { + const size_type nLength = size(); + if(n < nLength) + return fixed_string(internalLayout().HeapBeginPtr(), internalLayout().HeapBeginPtr() + n); + return *this; + } + + + template + inline typename fixed_string:: + this_type fixed_string::right(size_type n) const + { + const size_type nLength = size(); + if(n < nLength) + return fixed_string(internalLayout().HeapEndPtr() - n, internalLayout().HeapEndPtr()); + return *this; + } + + + template + inline const typename fixed_string:: + overflow_allocator_type& fixed_string::get_overflow_allocator() const EA_NOEXCEPT + { + return get_allocator().get_overflow_allocator(); + } + + + template + inline typename fixed_string:: + overflow_allocator_type& fixed_string::get_overflow_allocator() EA_NOEXCEPT + { + return get_allocator().get_overflow_allocator(); + } + + + template + inline void + fixed_string::set_overflow_allocator(const overflow_allocator_type& allocator) + { + get_allocator().set_overflow_allocator(allocator); + } + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + + // Operator + + template + fixed_string operator+(const fixed_string& a, + const fixed_string& b) + { + // We have a problem here because need to return an fixed_string by value. This will typically result in it + // using stack space equal to its size. That size may be too large to be workable. + typedef fixed_string this_type; + + this_type result(const_cast(a).get_overflow_allocator()); + result.append(a); + result.append(b); + return result; + } + + + template + fixed_string operator+(const typename fixed_string::value_type* p, + const fixed_string& b) + { + typedef fixed_string this_type; + + const typename this_type::size_type n = (typename this_type::size_type)CharStrlen(p); + this_type result(const_cast(b).get_overflow_allocator()); + result.append(p, p + n); + result.append(b); + return result; + } + + + template + fixed_string operator+(typename fixed_string::value_type c, + const fixed_string& b) + { + typedef fixed_string this_type; + + this_type result(const_cast(b).get_overflow_allocator()); + result.push_back(c); + result.append(b); + return result; + } + + + template + fixed_string operator+(const fixed_string& a, + const typename fixed_string::value_type* p) + { + typedef fixed_string this_type; + + const typename this_type::size_type n = (typename this_type::size_type)CharStrlen(p); + this_type result(const_cast(a).get_overflow_allocator()); + result.append(a); + result.append(p, p + n); + return result; + } + + + template + fixed_string operator+(const fixed_string& a, + typename fixed_string::value_type c) + { + typedef fixed_string this_type; + + this_type result(const_cast(a).get_overflow_allocator()); + result.append(a); + result.push_back(c); + return result; + } + + + template + fixed_string operator+(fixed_string&& a, + fixed_string&& b) + { + a.append(b); // Using an rvalue by name results in it becoming an lvalue. + return eastl::move(a); + } + + template + fixed_string operator+(fixed_string&& a, + const fixed_string& b) + { + a.append(b); + return eastl::move(a); + } + + template + fixed_string operator+(const typename fixed_string::value_type* p, + fixed_string&& b) + { + b.insert(0, p); + return eastl::move(b); + } + + template + fixed_string operator+(fixed_string&& a, + const typename fixed_string::value_type* p) + { + a.append(p); + return eastl::move(a); + } + + template + fixed_string operator+(fixed_string&& a, + typename fixed_string::value_type c) + { + a.push_back(c); + return eastl::move(a); + } + + + // operator ==, !=, <, >, <=, >= come from the string implementations. + + template + inline void swap(fixed_string& a, + fixed_string& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + +} // namespace eastl + +#endif // Header include guard diff --git a/include/EASTL/fixed_substring.h b/include/EASTL/fixed_substring.h new file mode 100644 index 0000000..033052f --- /dev/null +++ b/include/EASTL/fixed_substring.h @@ -0,0 +1,265 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_SUBSTRING_H +#define EASTL_FIXED_SUBSTRING_H + + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// fixed_substring + /// + /// Implements a string which is a reference to a segment of characters. + /// This class is efficient because it allocates no memory and copies no + /// memory during construction and assignment, but rather refers directly + /// to the segment of chracters. A common use of this is to have a + /// fixed_substring efficiently refer to a substring within another string. + /// + /// You cannot directly resize a fixed_substring (e.g. via resize, insert, + /// append, erase), but you can assign a different substring to it. + /// You can modify the characters within a substring in place. + /// As of this writing, in the name of being lean and simple it is the + /// user's responsibility to not call unsupported resizing functions + /// such as those listed above. A detailed listing of the functions which + /// are not supported is given below in the class declaration. + /// + /// The c_str function doesn't act as one might hope, as it simply + /// returns the pointer to the beginning of the string segment and the + /// 0-terminator may be beyond the end of the segment. If you want to + /// always be able to use c_str as expected, use the fixed string solution + /// we describe below. + /// + /// Another use of fixed_substring is to provide C++ string-like functionality + /// with a C character array. This allows you to work on a C character array + /// as if it were a C++ string as opposed using the C string API. Thus you + /// can do this: + /// + /// void DoSomethingForUser(char* timeStr, size_t timeStrCapacity) + /// { + /// fixed_substring tmp(timeStr, timeStrCapacity); + /// tmp = "hello "; + /// tmp += "world"; + /// } + /// + /// Note that this class constructs and assigns from const string pointers + /// and const string objects, yet this class does not declare its member + /// data as const. This is a concession in order to allow this implementation + /// to be simple and lean. It is the user's responsibility to make sure + /// that strings that should not or can not be modified are either not + /// used by fixed_substring or are not modified by fixed_substring. + /// + /// A more flexible alternative to fixed_substring is fixed_string. + /// fixed_string has none of the functional limitations that fixed_substring + /// has and like fixed_substring it doesn't allocate memory. However, + /// fixed_string makes a *copy* of the source string and uses local + /// memory to store that copy. Also, fixed_string objects on the stack + /// are going to have a limit as to their maximum size. + /// + /// Notes: + /// As of this writing, the string class necessarily reallocates when + /// an insert of self is done into self. As a result, the fixed_substring + /// class doesn't support inserting self into self. + /// + /// Example usage: + /// basic_string str("hello world"); + /// fixed_substring sub(str, 2, 5); // sub == "llo w" + /// + template + class fixed_substring : public basic_string + { + public: + typedef basic_string base_type; + typedef fixed_substring this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + + using base_type::npos; + using base_type::mPair; + using base_type::AllocateSelf; + using base_type::internalLayout; + using base_type::get_allocator; + + private: + + void SetInternalHeapLayout(value_type* pBeginPtr, size_type nSize, size_type nCap) + { + internalLayout().SetHeapBeginPtr(pBeginPtr); + internalLayout().SetHeapSize(nSize); + internalLayout().SetHeapCapacity(nCap); + } + + + public: + fixed_substring() + : base_type() + { + } + + fixed_substring(const base_type& x) + : base_type() + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + assign(x); + } + + // We gain no benefit from having an rvalue move constructor or assignment operator, + // as this class is a const class. + + fixed_substring(const base_type& x, size_type position, size_type n = base_type::npos) + : base_type() + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + assign(x, position, n); + } + + fixed_substring(const value_type* p, size_type n) + : base_type() + { + assign(p, n); + } + + fixed_substring(const value_type* p) + : base_type() + { + assign(p); + } + + fixed_substring(const value_type* pBegin, const value_type* pEnd) + : base_type() + { + assign(pBegin, pEnd); + } + + ~fixed_substring() + { + // We need to reset, as otherwise the parent destructor will + // attempt to free our memory. + AllocateSelf(); + } + + this_type& operator=(const base_type& x) + { + assign(x); + return *this; + } + + this_type& operator=(const value_type* p) + { + assign(p); + return *this; + } + + this_type& assign(const base_type& x) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(x.data()), x.size(), x.size()); + return *this; + } + + this_type& assign(const base_type& x, size_type position, size_type n) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(x.data()) + position, n, n); + return *this; + } + + this_type& assign(const value_type* p, size_type n) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(p), n, n); + return *this; + } + + this_type& assign(const value_type* p) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(p), (size_type)CharStrlen(p), (size_type)CharStrlen(p)); + return *this; + } + + this_type& assign(const value_type* pBegin, const value_type* pEnd) + { + // By design, we need to cast away const-ness here. + SetInternalHeapLayout(const_cast(pBegin), (size_type)(pEnd - pBegin), (size_type)(pEnd - pBegin)); + return *this; + } + + + // Partially supported functionality + // + // When using fixed_substring on a character sequence that is within another + // string, the following functions may do one of two things: + // 1 Attempt to reallocate + // 2 Write a 0 char at the end of the fixed_substring + // + // Item #1 will result in a crash, due to the attempt by the underlying + // string class to free the substring memory. Item #2 will result in a 0 + // char being written to the character array. Item #2 may or may not be + // a problem, depending on how you use fixed_substring. Thus the following + // functions cannot be used safely. + + #if 0 // !defined(EA_COMPILER_NO_DELETED_FUNCTIONS) We may want to enable these deletions after some investigation of possible user impact. + this_type& operator=(value_type c) = delete; + void resize(size_type n, value_type c) = delete; + void resize(size_type n) = delete; + void reserve(size_type = 0) = delete; + void set_capacity(size_type n) = delete; + void clear() = delete; + this_type& operator+=(const base_type& x) = delete; + this_type& operator+=(const value_type* p) = delete; + this_type& operator+=(value_type c) = delete; + this_type& append(const base_type& x) = delete; + this_type& append(const base_type& x, size_type position, size_type n) = delete; + this_type& append(const value_type* p, size_type n) = delete; + this_type& append(const value_type* p) = delete; + this_type& append(size_type n) = delete; + this_type& append(size_type n, value_type c) = delete; + this_type& append(const value_type* pBegin, const value_type* pEnd) = delete; + this_type& append_sprintf_va_list(const value_type* pFormat, va_list arguments) = delete; + this_type& append_sprintf(const value_type* pFormat, ...) = delete; + void push_back(value_type c) = delete; + void pop_back() = delete; + this_type& assign(size_type n, value_type c) = delete; + this_type& insert(size_type position, const base_type& x) = delete; + this_type& insert(size_type position, const base_type& x, size_type beg, size_type n) = delete; + this_type& insert(size_type position, const value_type* p, size_type n) = delete; + this_type& insert(size_type position, const value_type* p) = delete; + this_type& insert(size_type position, size_type n, value_type c) = delete; + iterator insert(const_iterator p, value_type c) = delete; + void insert(const_iterator p, size_type n, value_type c) = delete; + void insert(const_iterator p, const value_type* pBegin, const value_type* pEnd) = delete; + this_type& erase(size_type position = 0, size_type n = npos) = delete; + iterator erase(const_iterator p) = delete; + iterator erase(const_iterator pBegin, const_iterator pEnd) = delete; + void swap(base_type& x) = delete; + this_type& sprintf_va_list(const value_type* pFormat, va_list arguments) = delete; + this_type& sprintf(const value_type* pFormat, ...) = delete; + #endif + + }; // fixed_substring + + +} // namespace eastl + + + +#endif // Header include guard diff --git a/include/EASTL/fixed_vector.h b/include/EASTL/fixed_vector.h new file mode 100644 index 0000000..633eaa8 --- /dev/null +++ b/include/EASTL/fixed_vector.h @@ -0,0 +1,625 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a vector which uses a fixed size memory pool. +// The bEnableOverflow template parameter allows the container to resort to +// heap allocations if the memory pool is exhausted. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_FIXED_VECTOR_H +#define EASTL_FIXED_VECTOR_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// EASTL_FIXED_VECTOR_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// In the case of fixed-size containers, the allocator name always refers + /// to overflow allocations. + /// + #ifndef EASTL_FIXED_VECTOR_DEFAULT_NAME + #define EASTL_FIXED_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_vector" // Unless the user overrides something, this is "EASTL fixed_vector". + #endif + + + /// EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR + #define EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR overflow_allocator_type(EASTL_FIXED_VECTOR_DEFAULT_NAME) + #endif + + + /// fixed_vector + /// + /// A fixed_vector with bEnableOverflow == true is identical to a regular + /// vector in terms of its behavior. All the expectations of regular vector + /// apply to it and no additional expectations come from it. When bEnableOverflow + /// is false, fixed_vector behaves like regular vector with the exception that + /// its capacity can never increase. All operations you do on such a fixed_vector + /// which require a capacity increase will result in undefined behavior or an + /// C++ allocation exception, depending on the configuration of EASTL. + /// + /// Template parameters: + /// T The type of object the vector holds. + /// nodeCount The max number of objects to contain. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + /// Note: The nodeCount value must be at least 1. + /// + /// Example usage: + /// fixed_vector fixedVector); + /// + /// fixedVector.push_back(Widget()); + /// fixedVector.resize(200); + /// fixedVector.clear(); + /// + template ::type> + class fixed_vector : public vector > + { + public: + typedef fixed_vector_allocator fixed_allocator_type; + typedef OverflowAllocator overflow_allocator_type; + typedef vector base_type; + typedef fixed_vector this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::reference reference; + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + typedef aligned_buffer aligned_buffer_type; + + enum { kMaxSize = nodeCount }; + + using base_type::get_allocator; + using base_type::mpBegin; + using base_type::mpEnd; + using base_type::internalCapacityPtr; + using base_type::resize; + using base_type::clear; + using base_type::size; + using base_type::assign; + using base_type::npos; + using base_type::DoAllocate; + using base_type::DoFree; + using base_type::DoAssign; + using base_type::DoAssignFromIterator; + + protected: + aligned_buffer_type mBuffer; + + public: + fixed_vector(); + explicit fixed_vector(const overflow_allocator_type& overflowAllocator); // Only applicable if bEnableOverflow is true. + explicit fixed_vector(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity. + fixed_vector(size_type n, const value_type& value); + fixed_vector(const this_type& x); + fixed_vector(this_type&& x); + fixed_vector(this_type&& x, const overflow_allocator_type& overflowAllocator); + fixed_vector(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_VECTOR_DEFAULT_ALLOCATOR); + + template + fixed_vector(InputIterator first, InputIterator last); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + void set_capacity(size_type n); + void clear(bool freeOverflow); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter. + bool full() const; // Returns true if the fixed space has been fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot. + bool has_overflowed() const; // Returns true if the allocations spilled over into the overflow allocator. Meaningful only if overflow is enabled. + bool can_overflow() const; // Returns the value of the bEnableOverflow template parameter. + + void* push_back_uninitialized(); + void push_back(const value_type& value); // We implement push_back here because we have a specialization that's + reference push_back(); // smaller for the case of overflow being disabled. + void push_back(value_type&& value); + + // OverflowAllocator + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT; + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT; + void set_overflow_allocator(const overflow_allocator_type& allocator); + + protected: + void* DoPushBackUninitialized(true_type); + void* DoPushBackUninitialized(false_type); + + void DoPushBack(true_type, const value_type& value); + void DoPushBack(false_type, const value_type& value); + + void DoPushBackMove(true_type, value_type&& value); + void DoPushBackMove(false_type, value_type&& value); + + reference DoPushBack(false_type); + reference DoPushBack(true_type); + + }; // fixed_vector + + + + + /////////////////////////////////////////////////////////////////////// + // fixed_vector + /////////////////////////////////////////////////////////////////////// + + template + inline fixed_vector::fixed_vector() + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + } + + template + inline fixed_vector::fixed_vector(const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + } + + template + inline fixed_vector::fixed_vector(size_type n) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + resize(n); + } + + + template + inline fixed_vector::fixed_vector(size_type n, const value_type& value) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + resize(n, value); + } + + + template + inline fixed_vector::fixed_vector(const this_type& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssign(x.begin(), x.end(), false_type()); + } + + + template + inline fixed_vector::fixed_vector(this_type&& x) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + // Since we are a fixed_vector, we can't swap pointers. We can possibly so something like fixed_swap or + // we can just do an assignment from x. If we want to do the former then we need to have some complicated + // code to deal with overflow or no overflow, and whether the memory is in the fixed-size buffer or in + // the overflow allocator. 90% of the time the memory should be in the fixed buffer, in which case + // a simple assignment is no worse than the fancy pathway. + + // Since we are a fixed_list, we can't normally swap pointers unless both this and + // x are using using overflow and the overflow allocators are equal. To do: + //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator())) + //{ + // We can swap contents and may need to swap the allocators as well. + //} + + // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that + // way then we may want to make a shared implementation. + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssign, true>(eastl::make_move_iterator(x.begin()), eastl::make_move_iterator(x.end()), false_type()); + } + + + template + inline fixed_vector::fixed_vector(this_type&& x, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + // See the discussion above. + + // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that + // way then we may want to make a shared implementation. + get_allocator().copy_overflow_allocator(x.get_allocator()); + + #if EASTL_NAME_ENABLED + get_allocator().set_name(x.get_allocator().get_name()); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssign(x.begin(), x.end(), false_type()); + } + + + template + inline fixed_vector::fixed_vector(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator) + : base_type(fixed_allocator_type(mBuffer.buffer, overflowAllocator)) + { + typedef typename std::initializer_list::iterator InputIterator; + typedef typename eastl::iterator_traits::iterator_category IC; + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssignFromIterator(ilist.begin(), ilist.end(), IC()); + } + + + template + template + fixed_vector::fixed_vector(InputIterator first, InputIterator last) + : base_type(fixed_allocator_type(mBuffer.buffer)) + { + #if EASTL_NAME_ENABLED + get_allocator().set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME); + #endif + + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + base_type::template DoAssign(first, last, is_integral()); + } + + + template + inline typename fixed_vector::this_type& + fixed_vector::operator=(const this_type& x) + { + if(this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); // The primary effect of this is to copy the overflow allocator. + #endif + + base_type::template DoAssign(x.begin(), x.end(), false_type()); // Shorter route. + } + return *this; + } + + + template + inline typename fixed_vector::this_type& + fixed_vector::operator=(std::initializer_list ilist) + { + typedef typename std::initializer_list::iterator InputIterator; + typedef typename eastl::iterator_traits::iterator_category IC; + + clear(); + base_type::template DoAssignFromIterator(ilist.begin(), ilist.end(), IC()); + return *this; + } + + + template + inline typename fixed_vector::this_type& + fixed_vector::operator=(this_type&& x) + { + // Since we are a fixed_vector, we can't swap pointers. We can possibly do something like fixed_swap or + // we can just do an assignment from x. If we want to do the former then we need to have some complicated + // code to deal with overflow or no overflow, and whether the memory is in the fixed-size buffer or in + // the overflow allocator. 90% of the time the memory should be in the fixed buffer, in which case + // a simple assignment is no worse than the fancy pathway. + if (this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + get_allocator() = x.get_allocator(); // The primary effect of this is to copy the overflow allocator. + #endif + + base_type::template DoAssign, true>(make_move_iterator(x.begin()), make_move_iterator(x.end()), false_type()); // Shorter route. + } + return *this; + } + + + template + inline void fixed_vector::swap(this_type& x) + { + if((has_overflowed() && x.has_overflowed()) && (get_overflow_allocator() == x.get_overflow_allocator())) // If both containers are using the heap instead of local memory + { // then we can do a fast pointer swap instead of content swap. + eastl::swap(mpBegin, x.mpBegin); + eastl::swap(mpEnd, x.mpEnd); + eastl::swap(internalCapacityPtr(), x.internalCapacityPtr()); + } + else + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(*this, x); + } + } + + + template + inline void fixed_vector::set_capacity(size_type n) + { + const size_type nPrevSize = (size_type)(mpEnd - mpBegin); + const size_type nPrevCapacity = (size_type)(internalCapacityPtr() - mpBegin); + + if(n == npos) // If the user means to set the capacity so that it equals the size (i.e. free excess capacity)... + n = nPrevSize; + + if(n != nPrevCapacity) // If the request results in a capacity change... + { + if(can_overflow() && (((uintptr_t)mpBegin != (uintptr_t)mBuffer.buffer) || (n > kMaxSize))) // If we are or would be using dynamically allocated memory instead of our fixed-size member buffer... + { + T* const pNewData = (n <= kMaxSize) ? (T*)&mBuffer.buffer[0] : DoAllocate(n); + T* const pCopyEnd = (n < nPrevSize) ? (mpBegin + n) : mpEnd; + eastl::uninitialized_move_ptr(mpBegin, pCopyEnd, pNewData); // Move [mpBegin, pCopyEnd) to p. + eastl::destruct(mpBegin, mpEnd); + if((uintptr_t)mpBegin != (uintptr_t)mBuffer.buffer) + DoFree(mpBegin, (size_type)(internalCapacityPtr() - mpBegin)); + + mpEnd = pNewData + (pCopyEnd - mpBegin); + mpBegin = pNewData; + internalCapacityPtr() = mpBegin + n; + } // Else the new capacity would be within our fixed buffer. + else if(n < nPrevSize) // If the newly requested capacity is less than our size, we do what vector::set_capacity does and resize, even though we actually aren't reducing the capacity. + resize(n); + } + } + + + template + inline void fixed_vector::clear(bool freeOverflow) + { + base_type::clear(); + if (freeOverflow && mpBegin != (value_type*)&mBuffer.buffer[0]) + { + EASTLFree(get_allocator(), mpBegin, (internalCapacityPtr() - mpBegin) * sizeof(T)); + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + } + } + + + template + inline void fixed_vector::reset_lose_memory() + { + mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0]; + internalCapacityPtr() = mpBegin + nodeCount; + } + + + template + inline typename fixed_vector::size_type + fixed_vector::max_size() const + { + return kMaxSize; + } + + + template + inline bool fixed_vector::full() const + { + // If size >= capacity, then we are definitely full. + // Also, if our size is smaller but we've switched away from mBuffer due to a previous overflow, then we are considered full. + return ((size_t)(mpEnd - mpBegin) >= kMaxSize) || ((void*)mpBegin != (void*)mBuffer.buffer); + } + + + template + inline bool fixed_vector::has_overflowed() const + { + // This will be incorrect for the case that bOverflowEnabled is true and the container was resized + // down to a small size where the fixed buffer could take over ownership of the data again. + // The only simple fix for this is to take on another member variable which tracks whether this overflow + // has occurred at some point in the past. + return ((void*)mpBegin != (void*)mBuffer.buffer); + } + + + template + inline bool fixed_vector::can_overflow() const + { + return bEnableOverflow; + } + + + template + inline void* fixed_vector::push_back_uninitialized() + { + return DoPushBackUninitialized(typename type_select::type()); + } + + + template + inline void* fixed_vector::DoPushBackUninitialized(true_type) + { + return base_type::push_back_uninitialized(); + } + + + template + inline void* fixed_vector::DoPushBackUninitialized(false_type) + { + EASTL_ASSERT(mpEnd < internalCapacityPtr()); + + return mpEnd++; + } + + + template + inline void fixed_vector::push_back(const value_type& value) + { + DoPushBack(typename type_select::type(), value); + } + + + template + inline void fixed_vector::DoPushBack(true_type, const value_type& value) + { + base_type::push_back(value); + } + + + // This template specializes for overflow NOT enabled. + // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will) + template + inline void fixed_vector::DoPushBack(false_type, const value_type& value) + { + EASTL_ASSERT(mpEnd < internalCapacityPtr()); + + ::new((void*)mpEnd++) value_type(value); + } + + + template + inline typename fixed_vector::reference fixed_vector::push_back() + { + return DoPushBack(typename type_select::type()); + } + + + template + inline typename fixed_vector::reference fixed_vector::DoPushBack(true_type) + { + return base_type::push_back(); + } + + + // This template specializes for overflow NOT enabled. + // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will) + template + inline typename fixed_vector::reference fixed_vector::DoPushBack(false_type) + { + EASTL_ASSERT(mpEnd < internalCapacityPtr()); + + ::new((void*)mpEnd++) value_type; // Note that this isn't value_type() as that syntax doesn't work on all compilers for POD types. + + return *(mpEnd - 1); // Same as return back(); + } + + + template + inline void fixed_vector::push_back(value_type&& value) + { + DoPushBackMove(typename type_select::type(), eastl::move(value)); + } + + + template + inline void fixed_vector::DoPushBackMove(true_type, value_type&& value) + { + base_type::push_back(eastl::move(value)); // This will call vector::push_back(value_type &&), and possibly swap value with *mpEnd. + } + + + // This template specializes for overflow NOT enabled. + // In this configuration, there is no need for the heavy weight push_back() which tests to see if the container should grow (it never will) + template + inline void fixed_vector::DoPushBackMove(false_type, value_type&& value) + { + EASTL_ASSERT(mpEnd < internalCapacityPtr()); + + ::new((void*)mpEnd++) value_type(eastl::move(value)); // This will call the value_type(value_type&&) constructor, and possibly swap value with *mpEnd. + } + + + template + inline const typename fixed_vector::overflow_allocator_type& + fixed_vector::get_overflow_allocator() const EA_NOEXCEPT + { + return get_allocator().get_overflow_allocator(); + } + + + template + inline typename fixed_vector::overflow_allocator_type& + fixed_vector::get_overflow_allocator() EA_NOEXCEPT + { + return get_allocator().get_overflow_allocator(); + } + + + template + inline void + fixed_vector::set_overflow_allocator(const overflow_allocator_type& allocator) + { + get_allocator().set_overflow_allocator(allocator); + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + // operator ==, !=, <, >, <=, >= come from the vector implementations. + + template + inline void swap(fixed_vector& a, + fixed_vector& b) + { + // Fixed containers use a special swap that can deal with excessively large buffers. + eastl::fixed_swap(a, b); + } + + + +} // namespace eastl + + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/functional.h b/include/EASTL/functional.h new file mode 100644 index 0000000..03c2697 --- /dev/null +++ b/include/EASTL/functional.h @@ -0,0 +1,1262 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FUNCTIONAL_H +#define EASTL_FUNCTIONAL_H + + +#include +#include +#include +#include +#include +#include +#include + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////// + // Primary C++ functions + /////////////////////////////////////////////////////////////////////// + + template + struct plus : public binary_function + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a + b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/plus_void + template <> + struct plus + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) + eastl::forward(b)) + { return eastl::forward(a) + eastl::forward(b); } + }; + + template + struct minus : public binary_function + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a - b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/minus_void + template <> + struct minus + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) - eastl::forward(b)) + { return eastl::forward(a) - eastl::forward(b); } + }; + + template + struct multiplies : public binary_function + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a * b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/multiplies_void + template <> + struct multiplies + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) * eastl::forward(b)) + { return eastl::forward(a) * eastl::forward(b); } + }; + + template + struct divides : public binary_function + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a / b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/divides_void + template <> + struct divides + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) / eastl::forward(b)) + { return eastl::forward(a) / eastl::forward(b); } + }; + + template + struct modulus : public binary_function + { + EA_CPP14_CONSTEXPR T operator()(const T& a, const T& b) const + { return a % b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/modulus_void + template <> + struct modulus + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) % eastl::forward(b)) + { return eastl::forward(a) % eastl::forward(b); } + }; + + template + struct negate : public unary_function + { + EA_CPP14_CONSTEXPR T operator()(const T& a) const + { return -a; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/negate_void + template <> + struct negate + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(T&& t) const + -> decltype(-eastl::forward(t)) + { return -eastl::forward(t); } + }; + + template + struct equal_to : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a == b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/equal_to_void + template <> + struct equal_to + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) == eastl::forward(b)) + { return eastl::forward(a) == eastl::forward(b); } + }; + + template + bool validate_equal_to(const T& a, const T& b, Compare compare) + { + return compare(a, b) == compare(b, a); + } + + template + struct not_equal_to : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a != b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/not_equal_to_void + template <> + struct not_equal_to + { + typedef int is_transparent; + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) != eastl::forward(b)) + { return eastl::forward(a) != eastl::forward(b); } + }; + + template + bool validate_not_equal_to(const T& a, const T& b, Compare compare) + { + return compare(a, b) == compare(b, a); // We want the not equal comparison results to be equal. + } + + /// str_equal_to + /// + /// Compares two 0-terminated string types. + /// The T types are expected to be iterators or act like iterators. + /// The expected behavior of str_less is the same as (strcmp(p1, p2) == 0). + /// + /// Example usage: + /// hash_set, str_equal_to > stringHashSet; + /// + /// Note: + /// You couldn't use str_equal_to like this: + /// bool result = equal("hi", "hi" + 2, "ho", str_equal_to()); + /// This is because equal tests an array of something, with each element by + /// the comparison function. But str_equal_to tests an array of something itself. + /// + /// To consider: Update this code to use existing word-based comparison optimizations, + /// such as that used in the EAStdC Strcmp function. + /// + template + struct str_equal_to : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(T a, T b) const + { + while(*a && (*a == *b)) + { + ++a; + ++b; + } + return (*a == *b); + } + }; + + template + struct greater : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a > b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/greater_void + template <> + struct greater + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) > eastl::forward(b)) + { return eastl::forward(a) > eastl::forward(b); } + }; + + template + bool validate_greater(const T& a, const T& b, Compare compare) + { + return !compare(a, b) || !compare(b, a); // If (a > b), then !(b > a) + } + + + template + bool validate_less(const T& a, const T& b, Compare compare) + { + return !compare(a, b) || !compare(b, a); // If (a < b), then !(b < a) + } + + /// str_less + /// + /// Compares two 0-terminated string types. + /// The T types are expected to be iterators or act like iterators, + /// and that includes being a pointer to a C character array. + /// The expected behavior of str_less is the same as (strcmp(p1, p2) < 0). + /// This function is not Unicode-correct and it's not guaranteed to work + /// with all Unicode strings. + /// + /// Example usage: + /// set > stringSet; + /// + /// To consider: Update this code to use existing word-based comparison optimizations, + /// such as that used in the EAStdC Strcmp function. + /// + template + struct str_less : public binary_function + { + bool operator()(T a, T b) const + { + while(static_cast::type>::type>(*a) == + static_cast::type>::type>(*b)) + { + if(*a == 0) + return (*b != 0); + ++a; + ++b; + } + + char aValue = static_cast::type>(*a); + char bValue = static_cast::type>(*b); + + typename make_unsigned::type aValueU = static_cast::type>(aValue); + typename make_unsigned::type bValueU = static_cast::type>(bValue); + + return aValueU < bValueU; + + //return (static_cast::type>::type>(*a) < + // static_cast::type>::type>(*b)); + } + }; + + template + struct greater_equal : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a >= b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/greater_equal_void + template <> + struct greater_equal + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) >= eastl::forward(b)) + { return eastl::forward(a) >= eastl::forward(b); } + }; + + template + bool validate_greater_equal(const T& a, const T& b, Compare compare) + { + return !compare(a, b) || !compare(b, a); // If (a >= b), then !(b >= a) + } + + template + struct less_equal : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a <= b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/less_equal_void + template <> + struct less_equal + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) <= eastl::forward(b)) + { return eastl::forward(a) <= eastl::forward(b); } + }; + + template + bool validate_less_equal(const T& a, const T& b, Compare compare) + { + return !compare(a, b) || !compare(b, a); // If (a <= b), then !(b <= a) + } + + template + struct logical_and : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a && b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/logical_and_void + template <> + struct logical_and + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) && eastl::forward(b)) + { return eastl::forward(a) && eastl::forward(b); } + }; + + template + struct logical_or : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a || b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/logical_or_void + template <> + struct logical_or + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) || eastl::forward(b)) + { return eastl::forward(a) || eastl::forward(b); } + }; + + template + struct logical_not : public unary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a) const + { return !a; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/logical_not_void + template <> + struct logical_not + { + template + EA_CPP14_CONSTEXPR auto operator()(T&& t) const + -> decltype(!eastl::forward(t)) + { return !eastl::forward(t); } + }; + + + + /////////////////////////////////////////////////////////////////////// + // Dual type functions + /////////////////////////////////////////////////////////////////////// + + template + struct equal_to_2 : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const + { return a == b; } + EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const // If you are getting a 'operator() already defined' error related to on this line while compiling a + { return b == a; } // hashtable class (e.g. hash_map), it's likely that you are using hashtable::find_as when you should + }; // be using hashtable::find instead. The problem is that (const T, U) collide. To do: make this work. + + template + struct equal_to_2 : public equal_to + { + }; + + + template + struct not_equal_to_2 : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const + { return a != b; } + EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const + { return b != a; } + }; + + template + struct not_equal_to_2 : public not_equal_to + { + }; + + + template + struct less_2 : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const U& b) const + { return a < b; } + EA_CPP14_CONSTEXPR bool operator()(const U& b, const T& a) const + { return b < a; } + }; + + template + struct less_2 : public less + { + }; + + + + + /// unary_negate + /// + template + class unary_negate : public unary_function + { + protected: + Predicate mPredicate; + public: + explicit unary_negate(const Predicate& a) + : mPredicate(a) {} + EA_CPP14_CONSTEXPR bool operator()(const typename Predicate::argument_type& a) const + { return !mPredicate(a); } + }; + + template + inline EA_CPP14_CONSTEXPR unary_negate not1(const Predicate& predicate) + { return unary_negate(predicate); } + + + + /// binary_negate + /// + template + class binary_negate : public binary_function + { + protected: + Predicate mPredicate; + public: + explicit binary_negate(const Predicate& a) + : mPredicate(a) { } + EA_CPP14_CONSTEXPR bool operator()(const typename Predicate::first_argument_type& a, const typename Predicate::second_argument_type& b) const + { return !mPredicate(a, b); } + }; + + template + inline EA_CPP14_CONSTEXPR binary_negate not2(const Predicate& predicate) + { return binary_negate(predicate); } + + + + /// unary_compose + /// + template + struct unary_compose : public unary_function + { + protected: + Operation1 op1; + Operation2 op2; + + public: + unary_compose(const Operation1& x, const Operation2& y) + : op1(x), op2(y) {} + + typename Operation1::result_type operator()(const typename Operation2::argument_type& x) const + { return op1(op2(x)); } + + typename Operation1::result_type operator()(typename Operation2::argument_type& x) const + { return op1(op2(x)); } + }; + + template + inline unary_compose + compose1(const Operation1& op1, const Operation2& op2) + { + return unary_compose(op1,op2); + } + + + /// binary_compose + /// + template + class binary_compose : public unary_function + { + protected: + Operation1 op1; + Operation2 op2; + Operation3 op3; + + public: + // Support binary functors too. + typedef typename Operation2::argument_type first_argument_type; + typedef typename Operation3::argument_type second_argument_type; + + binary_compose(const Operation1& x, const Operation2& y, const Operation3& z) + : op1(x), op2(y), op3(z) { } + + typename Operation1::result_type operator()(const typename Operation2::argument_type& x) const + { return op1(op2(x),op3(x)); } + + typename Operation1::result_type operator()(typename Operation2::argument_type& x) const + { return op1(op2(x),op3(x)); } + + typename Operation1::result_type operator()(const typename Operation2::argument_type& x,const typename Operation3::argument_type& y) const + { return op1(op2(x),op3(y)); } + + typename Operation1::result_type operator()(typename Operation2::argument_type& x, typename Operation3::argument_type& y) const + { return op1(op2(x),op3(y)); } + }; + + + template + inline binary_compose + compose2(const Operation1& op1, const Operation2& op2, const Operation3& op3) + { + return binary_compose(op1, op2, op3); + } + + + + /////////////////////////////////////////////////////////////////////// + // pointer_to_unary_function + /////////////////////////////////////////////////////////////////////// + + /// pointer_to_unary_function + /// + /// This is an adapter template which converts a pointer to a standalone + /// function to a function object. This allows standalone functions to + /// work in many cases where the system requires a function object. + /// + /// Example usage: + /// ptrdiff_t Rand(ptrdiff_t n) { return rand() % n; } // Note: The C rand function is poor and slow. + /// pointer_to_unary_function randInstance(Rand); + /// random_shuffle(pArrayBegin, pArrayEnd, randInstance); + /// + template + class pointer_to_unary_function : public unary_function + { + protected: + Result (*mpFunction)(Arg); + + public: + pointer_to_unary_function() + { } + + explicit pointer_to_unary_function(Result (*pFunction)(Arg)) + : mpFunction(pFunction) { } + + Result operator()(Arg x) const + { return mpFunction(x); } + }; + + + /// ptr_fun + /// + /// This ptr_fun is simply shorthand for usage of pointer_to_unary_function. + /// + /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway): + /// int factorial(int x) { return (x > 1) ? (x * factorial(x - 1)) : x; } + /// transform(pIntArrayBegin, pIntArrayEnd, pIntArrayBegin, ptr_fun(factorial)); + /// + template + inline pointer_to_unary_function + ptr_fun(Result (*pFunction)(Arg)) + { return pointer_to_unary_function(pFunction); } + + + + + + /////////////////////////////////////////////////////////////////////// + // pointer_to_binary_function + /////////////////////////////////////////////////////////////////////// + + /// pointer_to_binary_function + /// + /// This is an adapter template which converts a pointer to a standalone + /// function to a function object. This allows standalone functions to + /// work in many cases where the system requires a function object. + /// + template + class pointer_to_binary_function : public binary_function + { + protected: + Result (*mpFunction)(Arg1, Arg2); + + public: + pointer_to_binary_function() + { } + + explicit pointer_to_binary_function(Result (*pFunction)(Arg1, Arg2)) + : mpFunction(pFunction) {} + + Result operator()(Arg1 x, Arg2 y) const + { return mpFunction(x, y); } + }; + + + /// This ptr_fun is simply shorthand for usage of pointer_to_binary_function. + /// + /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway): + /// int multiply(int x, int y) { return x * y; } + /// transform(pIntArray1Begin, pIntArray1End, pIntArray2Begin, pIntArray1Begin, ptr_fun(multiply)); + /// + template + inline pointer_to_binary_function + ptr_fun(Result (*pFunction)(Arg1, Arg2)) + { return pointer_to_binary_function(pFunction); } + + + + + + + /////////////////////////////////////////////////////////////////////// + // mem_fun + // mem_fun1 + // + // Note that mem_fun calls member functions via *pointers* to classes + // and not instances of classes. mem_fun_ref is for calling functions + // via instances of classes or references to classes. + // + // NOTE: + // mem_fun was deprecated in C++11 and removed in C++17, in favor + // of the more general mem_fn and bind. + // + /////////////////////////////////////////////////////////////////////// + + /// mem_fun_t + /// + /// Member function with no arguments. + /// + template + class mem_fun_t : public unary_function + { + public: + typedef Result (T::*MemberFunction)(); + + inline explicit mem_fun_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(T* pT) const + { + return (pT->*mpMemberFunction)(); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// mem_fun1_t + /// + /// Member function with one argument. + /// + template + class mem_fun1_t : public binary_function + { + public: + typedef Result (T::*MemberFunction)(Argument); + + inline explicit mem_fun1_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(T* pT, Argument arg) const + { + return (pT->*mpMemberFunction)(arg); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// const_mem_fun_t + /// + /// Const member function with no arguments. + /// Note that we inherit from unary_function + /// instead of what the C++ standard specifies: unary_function. + /// The C++ standard is in error and this has been recognized by the defect group. + /// + template + class const_mem_fun_t : public unary_function + { + public: + typedef Result (T::*MemberFunction)() const; + + inline explicit const_mem_fun_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(const T* pT) const + { + return (pT->*mpMemberFunction)(); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// const_mem_fun1_t + /// + /// Const member function with one argument. + /// Note that we inherit from unary_function + /// instead of what the C++ standard specifies: unary_function. + /// The C++ standard is in error and this has been recognized by the defect group. + /// + template + class const_mem_fun1_t : public binary_function + { + public: + typedef Result (T::*MemberFunction)(Argument) const; + + inline explicit const_mem_fun1_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(const T* pT, Argument arg) const + { + return (pT->*mpMemberFunction)(arg); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// mem_fun + /// + /// This is the high level interface to the mem_fun_t family. + /// + /// Example usage: + /// struct TestClass { void print() { puts("hello"); } } + /// TestClass* pTestClassArray[3] = { ... }; + /// for_each(pTestClassArray, pTestClassArray + 3, &TestClass::print); + /// + /// Note: using conventional inlining here to avoid issues on GCC/Linux + /// + template + inline mem_fun_t + mem_fun(Result (T::*MemberFunction)()) + { + return eastl::mem_fun_t(MemberFunction); + } + + template + inline mem_fun1_t + mem_fun(Result (T::*MemberFunction)(Argument)) + { + return eastl::mem_fun1_t(MemberFunction); + } + + template + inline const_mem_fun_t + mem_fun(Result (T::*MemberFunction)() const) + { + return eastl::const_mem_fun_t(MemberFunction); + } + + template + inline const_mem_fun1_t + mem_fun(Result (T::*MemberFunction)(Argument) const) + { + return eastl::const_mem_fun1_t(MemberFunction); + } + + + + + + /////////////////////////////////////////////////////////////////////// + // mem_fun_ref + // mem_fun1_ref + // + /////////////////////////////////////////////////////////////////////// + + /// mem_fun_ref_t + /// + template + class mem_fun_ref_t : public unary_function + { + public: + typedef Result (T::*MemberFunction)(); + + inline explicit mem_fun_ref_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(T& t) const + { + return (t.*mpMemberFunction)(); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// mem_fun1_ref_t + /// + template + class mem_fun1_ref_t : public binary_function + { + public: + typedef Result (T::*MemberFunction)(Argument); + + inline explicit mem_fun1_ref_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(T& t, Argument arg) const + { + return (t.*mpMemberFunction)(arg); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// const_mem_fun_ref_t + /// + template + class const_mem_fun_ref_t : public unary_function + { + public: + typedef Result (T::*MemberFunction)() const; + + inline explicit const_mem_fun_ref_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(const T& t) const + { + return (t.*mpMemberFunction)(); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// const_mem_fun1_ref_t + /// + template + class const_mem_fun1_ref_t : public binary_function + { + public: + typedef Result (T::*MemberFunction)(Argument) const; + + inline explicit const_mem_fun1_ref_t(MemberFunction pMemberFunction) + : mpMemberFunction(pMemberFunction) + { + // Empty + } + + inline Result operator()(const T& t, Argument arg) const + { + return (t.*mpMemberFunction)(arg); + } + + protected: + MemberFunction mpMemberFunction; + }; + + + /// mem_fun_ref + /// Example usage: + /// struct TestClass { void print() { puts("hello"); } } + /// TestClass testClassArray[3]; + /// for_each(testClassArray, testClassArray + 3, &TestClass::print); + /// + /// Note: using conventional inlining here to avoid issues on GCC/Linux + /// + template + inline mem_fun_ref_t + mem_fun_ref(Result (T::*MemberFunction)()) + { + return eastl::mem_fun_ref_t(MemberFunction); + } + + template + inline mem_fun1_ref_t + mem_fun_ref(Result (T::*MemberFunction)(Argument)) + { + return eastl::mem_fun1_ref_t(MemberFunction); + } + + template + inline const_mem_fun_ref_t + mem_fun_ref(Result (T::*MemberFunction)() const) + { + return eastl::const_mem_fun_ref_t(MemberFunction); + } + + template + inline const_mem_fun1_ref_t + mem_fun_ref(Result (T::*MemberFunction)(Argument) const) + { + return eastl::const_mem_fun1_ref_t(MemberFunction); + } + + + // not_fn_ret + // not_fn_ret is a implementation specified return type of eastl::not_fn. + // The type name is not specified but it does have mandated functions that conforming implementations must support. + // + // http://en.cppreference.com/w/cpp/utility/functional/not_fn + // + template + struct not_fn_ret + { + explicit not_fn_ret(F&& f) : mDecayF(eastl::forward(f)) {} + not_fn_ret(not_fn_ret&& f) = default; + not_fn_ret(const not_fn_ret& f) = default; + + // overloads for lvalues + template + auto operator()(Args&&... args) & + -> decltype(!eastl::declval&, Args...>>()) + { return !eastl::invoke(mDecayF, eastl::forward(args)...); } + + template + auto operator()(Args&&... args) const & + -> decltype(!eastl::declval const&, Args...>>()) + { return !eastl::invoke(mDecayF, eastl::forward(args)...); } + + // overloads for rvalues + template + auto operator()(Args&&... args) && + -> decltype(!eastl::declval, Args...>>()) + { return !eastl::invoke(eastl::move(mDecayF), eastl::forward(args)...); } + + template + auto operator()(Args&&... args) const && + -> decltype(!eastl::declval const, Args...>>()) + { return !eastl::invoke(eastl::move(mDecayF), eastl::forward(args)...); } + + eastl::decay_t mDecayF; + }; + + /// not_fn + /// + /// Creates an implementation specified functor that returns the complement of the callable object it was passed. + /// not_fn is intended to replace the C++03-era negators eastl::not1 and eastl::not2. + /// + /// http://en.cppreference.com/w/cpp/utility/functional/not_fn + /// + /// Example usage: + /// + /// auto nf = eastl::not_fn([]{ return false; }); + /// assert(nf()); // return true + /// + template + inline not_fn_ret not_fn(F&& f) + { + return not_fn_ret(eastl::forward(f)); + } + + + /////////////////////////////////////////////////////////////////////// + // hash + /////////////////////////////////////////////////////////////////////// + namespace Internal + { + // utility to disable the generic template specialization that is + // used for enum types only. + template + struct EnableHashIf + { + }; + + template + struct EnableHashIf + { + size_t operator()(const T& p) const { return size_t(p); } + }; + } // namespace Internal + + + template struct hash; + + template + struct hash : Internal::EnableHashIf> + { + size_t operator()(T p) const { return size_t(p); } + }; + + template struct hash // Note that we use the pointer as-is and don't divide by sizeof(T*). This is because the table is of a prime size and this division doesn't benefit distribution. + { size_t operator()(T* p) const { return size_t(uintptr_t(p)); } }; + + template <> struct hash + { size_t operator()(bool val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(char val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(signed char val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned char val) const { return static_cast(val); } }; + + #if defined(EA_CHAR16_NATIVE) && EA_CHAR16_NATIVE + template <> struct hash + { size_t operator()(char16_t val) const { return static_cast(val); } }; + #endif + + #if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE + template <> struct hash + { size_t operator()(char32_t val) const { return static_cast(val); } }; + #endif + + // If wchar_t is a native type instead of simply a define to an existing type... + #if !defined(EA_WCHAR_T_NON_NATIVE) + template <> struct hash + { size_t operator()(wchar_t val) const { return static_cast(val); } }; + #endif + + template <> struct hash + { size_t operator()(signed short val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned short val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(signed int val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned int val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(signed long val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned long val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(signed long long val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(unsigned long long val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(float val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(double val) const { return static_cast(val); } }; + + template <> struct hash + { size_t operator()(long double val) const { return static_cast(val); } }; + + + /////////////////////////////////////////////////////////////////////////// + // string hashes + // + // Note that our string hashes here intentionally are slow for long strings. + // The reasoning for this is so: + // - The large majority of hashed strings are only a few bytes long. + // - The hash function is significantly more efficient if it can make this assumption. + // - The user is welcome to make a custom hash for those uncommon cases where + // long strings need to be hashed. Indeed, the user can probably make a + // special hash customized for such strings that's better than what we provide. + /////////////////////////////////////////////////////////////////////////// + + template <> struct hash + { + size_t operator()(const char* p) const + { + uint32_t c, result = 2166136261U; // FNV1 hash. Perhaps the best string hash. Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint8_t)*p++) != 0) // Using '!=' disables compiler warnings. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint8_t)*p++) != 0) // cast to unsigned 8 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + +#if EA_CHAR8_UNIQUE + template <> struct hash + { + size_t operator()(const char8_t* p) const + { + uint32_t c, result = 2166136261U; // FNV1 hash. Perhaps the best string hash. Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint8_t)*p++) != 0) // Using '!=' disables compiler warnings. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char8_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint8_t)*p++) != 0) // cast to unsigned 8 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; +#endif + + + template <> struct hash + { + size_t operator()(const char16_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint16_t)*p++) != 0) // cast to unsigned 16 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char16_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint16_t)*p++) != 0) // cast to unsigned 16 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char32_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template <> struct hash + { + size_t operator()(const char32_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + +#if defined(EA_WCHAR_UNIQUE) && EA_WCHAR_UNIQUE + template<> struct hash + { + size_t operator()(const wchar_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while ((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + template<> struct hash + { + size_t operator()(const wchar_t* p) const + { + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while ((c = (uint32_t)*p++) != 0) // cast to unsigned 32 bit. + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; +#endif + + /// string_hash + /// + /// Defines a generic string hash for an arbitrary EASTL basic_string container. + /// + /// Example usage: + /// eastl::hash_set > hashSet; + /// + template + struct string_hash + { + typedef String string_type; + typedef typename String::value_type value_type; + typedef typename eastl::add_unsigned::type unsigned_value_type; + + size_t operator()(const string_type& s) const + { + const unsigned_value_type* p = (const unsigned_value_type*)s.c_str(); + uint32_t c, result = 2166136261U; // Intentionally uint32_t instead of size_t, so the behavior is the same regardless of size. + while((c = *p++) != 0) + result = (result * 16777619) ^ c; + return (size_t)result; + } + }; + + +} // namespace eastl + +#include + +#endif // Header include guard + + + + + + + diff --git a/include/EASTL/hash_map.h b/include/EASTL/hash_map.h new file mode 100644 index 0000000..c363597 --- /dev/null +++ b/include/EASTL/hash_map.h @@ -0,0 +1,580 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file is based on the TR1 (technical report 1) reference implementation +// of the unordered_set/unordered_map C++ classes as of about 4/2005. Most likely +// many or all C++ library vendors' implementations of this classes will be +// based off of the reference version and so will look pretty similar to this +// file as well as other vendors' versions. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_HASH_MAP_H +#define EASTL_HASH_MAP_H + + +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// EASTL_HASH_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASH_MAP_DEFAULT_NAME + #define EASTL_HASH_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_map" // Unless the user overrides something, this is "EASTL hash_map". + #endif + + + /// EASTL_HASH_MULTIMAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASH_MULTIMAP_DEFAULT_NAME + #define EASTL_HASH_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_multimap" // Unless the user overrides something, this is "EASTL hash_multimap". + #endif + + + /// EASTL_HASH_MAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASH_MAP_DEFAULT_ALLOCATOR + #define EASTL_HASH_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MAP_DEFAULT_NAME) + #endif + + /// EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR + #define EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MULTIMAP_DEFAULT_NAME) + #endif + + + + /// hash_map + /// + /// Implements a hash_map, which is a hashed associative container. + /// Lookups are O(1) (that is, they are fast) but the container is + /// not sorted. Note that lookups are only O(1) if the hash table + /// is well-distributed (non-colliding). The lookup approaches + /// O(n) behavior as the table becomes increasingly poorly distributed. + /// + /// set_max_load_factor + /// If you want to make a hashtable never increase its bucket usage, + /// call set_max_load_factor with a very high value such as 100000.f. + /// + /// bCacheHashCode + /// We provide the boolean bCacheHashCode template parameter in order + /// to allow the storing of the hash code of the key within the map. + /// When this option is disabled, the rehashing of the table will + /// call the hash function on the key. Setting bCacheHashCode to true + /// is useful for cases whereby the calculation of the hash value for + /// a contained object is very expensive. + /// + /// find_as + /// In order to support the ability to have a hashtable of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the hashtable key type. + /// + /// Example find_as usage: + /// hash_map hashMap; + /// i = hashMap.find_as("hello"); // Use default hash and compare. + /// + /// Example find_as usage (namespaces omitted for brevity): + /// hash_map hashMap; + /// i = hashMap.find_as("hello", hash(), equal_to_2()); + /// + template , typename Predicate = eastl::equal_to, + typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false> + class hash_map + : public hashtable, Allocator, eastl::use_first >, Predicate, + Hash, mod_range_hashing, default_ranged_hash, prime_rehash_policy, bCacheHashCode, true, true> + { + public: + typedef hashtable, Allocator, + eastl::use_first >, + Predicate, Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, true, true> base_type; + typedef hash_map this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::key_type key_type; + typedef T mapped_type; + typedef typename base_type::value_type value_type; // NOTE: 'value_type = pair'. + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::insert_return_type insert_return_type; + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + + using base_type::insert; + + public: + /// hash_map + /// + /// Default constructor. + /// + explicit hash_map(const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR) + : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), + Predicate(), eastl::use_first >(), allocator) + { + // Empty + } + + + /// hash_map + /// + /// Constructor which creates an empty container, but start with nBucketCount buckets. + /// We default to a small nBucketCount value, though the user really should manually + /// specify an appropriate value in order to prevent memory from being reallocated. + /// + explicit hash_map(size_type nBucketCount, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR) + : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + + hash_map(const this_type& x) + : base_type(x) + { + } + + + hash_map(this_type&& x) + : base_type(eastl::move(x)) + { + } + + + hash_map(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + /// hash_map + /// + /// initializer_list-based constructor. + /// Allows for initializing with brace values (e.g. hash_map hm = { {3,"c"}, {4,"d"}, {5,"e"} }; ) + /// + hash_map(std::initializer_list ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR) + : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + + /// hash_map + /// + /// An input bucket count of <= 1 causes the bucket count to be equal to the number of + /// elements in the input range. + /// + template + hash_map(ForwardIterator first, ForwardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR) + : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + + this_type& operator=(const this_type& x) + { + return static_cast(base_type::operator=(x)); + } + + + this_type& operator=(std::initializer_list ilist) + { + return static_cast(base_type::operator=(ilist)); + } + + + this_type& operator=(this_type&& x) + { + return static_cast(base_type::operator=(eastl::move(x))); + } + + + /// insert + /// + /// This is an extension to the C++ standard. We insert a default-constructed + /// element with the given key. The reason for this is that we can avoid the + /// potentially expensive operation of creating and/or copying a mapped_type + /// object on the stack. + insert_return_type insert(const key_type& key) + { + return base_type::DoInsertKey(true_type(), key); + } + + T& at(const key_type& k) + { + iterator it = base_type::find(k); + + if (it == base_type::end()) + { + #if EASTL_EXCEPTIONS_ENABLED + // throw exeption if exceptions enabled + throw std::out_of_range("invalid hash_map key"); + #else + // assert false if asserts enabled + EASTL_ASSERT_MSG(false, "invalid hash_map key"); + #endif + } + // undefined behaviour if exceptions and asserts are disabled and it == end() + return it->second; + } + + + const T& at(const key_type& k) const + { + const_iterator it = base_type::find(k); + + if (it == base_type::end()) + { + #if EASTL_EXCEPTIONS_ENABLED + // throw exeption if exceptions enabled + throw std::out_of_range("invalid hash_map key"); + #else + // assert false if asserts enabled + EASTL_ASSERT_MSG(false, "invalid hash_map key"); + #endif + } + // undefined behaviour if exceptions and asserts are disabled and it == end() + return it->second; + } + + + insert_return_type insert(key_type&& key) + { + return base_type::DoInsertKey(true_type(), eastl::move(key)); + } + + + mapped_type& operator[](const key_type& key) + { + return (*base_type::DoInsertKey(true_type(), key).first).second; + + // Slower reference version: + //const typename base_type::iterator it = base_type::find(key); + //if(it != base_type::end()) + // return (*it).second; + //return (*base_type::insert(value_type(key, mapped_type())).first).second; + } + + mapped_type& operator[](key_type&& key) + { + // The Standard states that this function "inserts the value value_type(std::move(key), mapped_type())" + return (*base_type::DoInsertKey(true_type(), eastl::move(key)).first).second; + } + + + }; // hash_map + + /// hash_map erase_if + /// + /// https://en.cppreference.com/w/cpp/container/unordered_map/erase_if + template + void erase_if(eastl::hash_map& c, UserPredicate predicate) + { + // Erases all elements that satisfy the predicate from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + } + + + /// hash_multimap + /// + /// Implements a hash_multimap, which is the same thing as a hash_map + /// except that contained elements need not be unique. See the + /// documentation for hash_set for details. + /// + template , typename Predicate = eastl::equal_to, + typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false> + class hash_multimap + : public hashtable, Allocator, eastl::use_first >, Predicate, + Hash, mod_range_hashing, default_ranged_hash, prime_rehash_policy, bCacheHashCode, true, false> + { + public: + typedef hashtable, Allocator, + eastl::use_first >, + Predicate, Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, true, false> base_type; + typedef hash_multimap this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::key_type key_type; + typedef T mapped_type; + typedef typename base_type::value_type value_type; // Note that this is pair. + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::insert_return_type insert_return_type; + typedef typename base_type::iterator iterator; + + using base_type::insert; + + private: + using base_type::try_emplace; + using base_type::insert_or_assign; + + public: + /// hash_multimap + /// + /// Default constructor. + /// + explicit hash_multimap(const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR) + : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), + Predicate(), eastl::use_first >(), allocator) + { + // Empty + } + + + /// hash_multimap + /// + /// Constructor which creates an empty container, but start with nBucketCount buckets. + /// We default to a small nBucketCount value, though the user really should manually + /// specify an appropriate value in order to prevent memory from being reallocated. + /// + explicit hash_multimap(size_type nBucketCount, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR) + : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + + hash_multimap(const this_type& x) + : base_type(x) + { + } + + + hash_multimap(this_type&& x) + : base_type(eastl::move(x)) + { + } + + + hash_multimap(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + /// hash_multimap + /// + /// initializer_list-based constructor. + /// Allows for initializing with brace values (e.g. hash_multimap hm = { {3,"c"}, {3,"C"}, {4,"d"} }; ) + /// + hash_multimap(std::initializer_list ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR) + : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + + /// hash_multimap + /// + /// An input bucket count of <= 1 causes the bucket count to be equal to the number of + /// elements in the input range. + /// + template + hash_multimap(ForwardIterator first, ForwardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR) + : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), + predicate, eastl::use_first >(), allocator) + { + // Empty + } + + + this_type& operator=(const this_type& x) + { + return static_cast(base_type::operator=(x)); + } + + + this_type& operator=(std::initializer_list ilist) + { + return static_cast(base_type::operator=(ilist)); + } + + + this_type& operator=(this_type&& x) + { + return static_cast(base_type::operator=(eastl::move(x))); + } + + + /// insert + /// + /// This is an extension to the C++ standard. We insert a default-constructed + /// element with the given key. The reason for this is that we can avoid the + /// potentially expensive operation of creating and/or copying a mapped_type + /// object on the stack. + insert_return_type insert(const key_type& key) + { + return base_type::DoInsertKey(false_type(), key); + } + + + insert_return_type insert(key_type&& key) + { + return base_type::DoInsertKey(false_type(), eastl::move(key)); + } + + }; // hash_multimap + + /// hash_multimap erase_if + /// + /// https://en.cppreference.com/w/cpp/container/unordered_multimap/erase_if + template + void erase_if(eastl::hash_multimap& c, UserPredicate predicate) + { + // Erases all elements that satisfy the predicate from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const hash_map& a, + const hash_map& b) + { + typedef typename hash_map::const_iterator const_iterator; + + // We implement branching with the assumption that the return value is usually false. + if(a.size() != b.size()) + return false; + + // For map (with its unique keys), we need only test that each element in a can be found in b, + // as there can be only one such pairing per element. multimap needs to do a something more elaborate. + for(const_iterator ai = a.begin(), aiEnd = a.end(), biEnd = b.end(); ai != aiEnd; ++ai) + { + const_iterator bi = b.find(ai->first); + + if((bi == biEnd) || !(*ai == *bi)) // We have to compare the values, because lookups are done by keys alone but the full value_type of a map is a key/value pair. + return false; // It's possible that two elements in the two containers have identical keys but different values. + } + + return true; + } + + template + inline bool operator!=(const hash_map& a, + const hash_map& b) + { + return !(a == b); + } + + + template + inline bool operator==(const hash_multimap& a, + const hash_multimap& b) + { + typedef typename hash_multimap::const_iterator const_iterator; + typedef typename eastl::iterator_traits::difference_type difference_type; + + // We implement branching with the assumption that the return value is usually false. + if(a.size() != b.size()) + return false; + + // We can't simply search for each element of a in b, as it may be that the bucket for + // two elements in a has those same two elements in b but in different order (which should + // still result in equality). Also it's possible that one bucket in a has two elements which + // both match a solitary element in the equivalent bucket in b (which shouldn't result in equality). + eastl::pair aRange; + eastl::pair bRange; + + for(const_iterator ai = a.begin(), aiEnd = a.end(); ai != aiEnd; ai = aRange.second) // For each element in a... + { + aRange = a.equal_range(ai->first); // Get the range of elements in a that are equal to ai. + bRange = b.equal_range(ai->first); // Get the range of elements in b that are equal to ai. + + // We need to verify that aRange == bRange. First make sure the range sizes are equivalent... + const difference_type aDistance = eastl::distance(aRange.first, aRange.second); + const difference_type bDistance = eastl::distance(bRange.first, bRange.second); + + if(aDistance != bDistance) + return false; + + // At this point, aDistance > 0 and aDistance == bDistance. + // Implement a fast pathway for the case that there's just a single element. + if(aDistance == 1) + { + if(!(*aRange.first == *bRange.first)) // We have to compare the values, because lookups are done by keys alone but the full value_type of a map is a key/value pair. + return false; // It's possible that two elements in the two containers have identical keys but different values. Ditto for the permutation case below. + } + else + { + // Check to see if these aRange and bRange are any permutation of each other. + // This check gets slower as there are more elements in the range. + if(!eastl::is_permutation(aRange.first, aRange.second, bRange.first)) + return false; + } + } + + return true; + } + + template + inline bool operator!=(const hash_multimap& a, + const hash_multimap& b) + { + return !(a == b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/include/EASTL/hash_set.h b/include/EASTL/hash_set.h new file mode 100644 index 0000000..c075975 --- /dev/null +++ b/include/EASTL/hash_set.h @@ -0,0 +1,468 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file is based on the TR1 (technical report 1) reference implementation +// of the unordered_set/unordered_map C++ classes as of about 4/2005. Most likely +// many or all C++ library vendors' implementations of this classes will be +// based off of the reference version and so will look pretty similar to this +// file as well as other vendors' versions. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_HASH_SET_H +#define EASTL_HASH_SET_H + + +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// EASTL_HASH_SET_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASH_SET_DEFAULT_NAME + #define EASTL_HASH_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_set" // Unless the user overrides something, this is "EASTL hash_set". + #endif + + + /// EASTL_HASH_MULTISET_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASH_MULTISET_DEFAULT_NAME + #define EASTL_HASH_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_multiset" // Unless the user overrides something, this is "EASTL hash_multiset". + #endif + + + /// EASTL_HASH_SET_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASH_SET_DEFAULT_ALLOCATOR + #define EASTL_HASH_SET_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_SET_DEFAULT_NAME) + #endif + + /// EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR + #define EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MULTISET_DEFAULT_NAME) + #endif + + + + /// hash_set + /// + /// Implements a hash_set, which is a hashed unique-item container. + /// Lookups are O(1) (that is, they are fast) but the container is + /// not sorted. Note that lookups are only O(1) if the hash table + /// is well-distributed (non-colliding). The lookup approaches + /// O(n) behavior as the table becomes increasingly poorly distributed. + /// + /// set_max_load_factor + /// If you want to make a hashtable never increase its bucket usage, + /// call set_max_load_factor with a very high value such as 100000.f. + /// + /// bCacheHashCode + /// We provide the boolean bCacheHashCode template parameter in order + /// to allow the storing of the hash code of the key within the map. + /// When this option is disabled, the rehashing of the table will + /// call the hash function on the key. Setting bCacheHashCode to true + /// is useful for cases whereby the calculation of the hash value for + /// a contained object is very expensive. + /// + /// find_as + /// In order to support the ability to have a hashtable of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the hashtable key type. + /// + /// Example find_as usage: + /// hash_set hashSet; + /// i = hashSet.find_as("hello"); // Use default hash and compare. + /// + /// Example find_as usage (namespaces omitted for brevity): + /// hash_set hashSet; + /// i = hashSet.find_as("hello", hash(), equal_to_2()); + /// + template , typename Predicate = eastl::equal_to, + typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false> + class hash_set + : public hashtable, Predicate, + Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, false, true> + { + public: + typedef hashtable, Predicate, + Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, false, true> base_type; + typedef hash_set this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + + public: + /// hash_set + /// + /// Default constructor. + /// + explicit hash_set(const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR) + : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self(), allocator) + { + // Empty + } + + + /// hash_set + /// + /// Constructor which creates an empty container, but start with nBucketCount buckets. + /// We default to a small nBucketCount value, though the user really should manually + /// specify an appropriate value in order to prevent memory from being reallocated. + /// + explicit hash_set(size_type nBucketCount, const Hash& hashFunction = Hash(), const Predicate& predicate = Predicate(), + const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR) + : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + + hash_set(const this_type& x) + : base_type(x) + { + } + + + hash_set(this_type&& x) + : base_type(eastl::move(x)) + { + } + + + hash_set(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + /// hash_set + /// + /// initializer_list-based constructor. + /// Allows for initializing with brace values (e.g. hash_set hs = { 3, 4, 5, }; ) + /// + hash_set(std::initializer_list ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR) + : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + + /// hash_set + /// + /// An input bucket count of <= 1 causes the bucket count to be equal to the number of + /// elements in the input range. + /// + template + hash_set(FowardIterator first, FowardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR) + : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + + this_type& operator=(const this_type& x) + { + return static_cast(base_type::operator=(x)); + } + + + this_type& operator=(std::initializer_list ilist) + { + return static_cast(base_type::operator=(ilist)); + } + + + this_type& operator=(this_type&& x) + { + return static_cast(base_type::operator=(eastl::move(x))); + } + + }; // hash_set + + /// hash_set erase_if + /// + /// https://en.cppreference.com/w/cpp/container/unordered_set/erase_if + template + void erase_if(eastl::hash_set& c, UserPredicate predicate) + { + // Erases all elements that satisfy the predicate pred from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + } + + + /// hash_multiset + /// + /// Implements a hash_multiset, which is the same thing as a hash_set + /// except that contained elements need not be unique. See the documentation + /// for hash_set for details. + /// + template , typename Predicate = eastl::equal_to, + typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false> + class hash_multiset + : public hashtable, Predicate, + Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, false, false> + { + public: + typedef hashtable, Predicate, + Hash, mod_range_hashing, default_ranged_hash, + prime_rehash_policy, bCacheHashCode, false, false> base_type; + typedef hash_multiset this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + + public: + /// hash_multiset + /// + /// Default constructor. + /// + explicit hash_multiset(const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR) + : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self(), allocator) + { + // Empty + } + + + /// hash_multiset + /// + /// Constructor which creates an empty container, but start with nBucketCount buckets. + /// We default to a small nBucketCount value, though the user really should manually + /// specify an appropriate value in order to prevent memory from being reallocated. + /// + explicit hash_multiset(size_type nBucketCount, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR) + : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + + hash_multiset(const this_type& x) + : base_type(x) + { + } + + + hash_multiset(this_type&& x) + : base_type(eastl::move(x)) + { + } + + + hash_multiset(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + /// hash_multiset + /// + /// initializer_list-based constructor. + /// Allows for initializing with brace values (e.g. hash_set hs = { 3, 3, 4, }; ) + /// + hash_multiset(std::initializer_list ilist, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR) + : base_type(ilist.begin(), ilist.end(), nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + + /// hash_multiset + /// + /// An input bucket count of <= 1 causes the bucket count to be equal to the number of + /// elements in the input range. + /// + template + hash_multiset(FowardIterator first, FowardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(), + const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR) + : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self(), allocator) + { + // Empty + } + + + this_type& operator=(const this_type& x) + { + return static_cast(base_type::operator=(x)); + } + + + this_type& operator=(std::initializer_list ilist) + { + return static_cast(base_type::operator=(ilist)); + } + + + this_type& operator=(this_type&& x) + { + return static_cast(base_type::operator=(eastl::move(x))); + } + + }; // hash_multiset + + /// hash_multiset erase_if + /// + /// https://en.cppreference.com/w/cpp/container/unordered_multiset/erase_if + template + void erase_if(eastl::hash_multiset& c, UserPredicate predicate) + { + // Erases all elements that satisfy the predicate pred from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const hash_set& a, + const hash_set& b) + { + typedef typename hash_set::const_iterator const_iterator; + + // We implement branching with the assumption that the return value is usually false. + if(a.size() != b.size()) + return false; + + // For set (with its unique keys), we need only test that each element in a can be found in b, + // as there can be only one such pairing per element. multiset needs to do a something more elaborate. + for(const_iterator ai = a.begin(), aiEnd = a.end(), biEnd = b.end(); ai != aiEnd; ++ai) + { + const_iterator bi = b.find(*ai); + + if((bi == biEnd) || !(*ai == *bi)) // We have to compare values in addition to making sure the lookups succeeded. This is because the lookup is done via the user-supplised Predicate + return false; // which isn't strictly required to be identical to the Value operator==, though 99% of the time it will be so. + } + + return true; + } + + template + inline bool operator!=(const hash_set& a, + const hash_set& b) + { + return !(a == b); + } + + + template + inline bool operator==(const hash_multiset& a, + const hash_multiset& b) + { + typedef typename hash_multiset::const_iterator const_iterator; + typedef typename eastl::iterator_traits::difference_type difference_type; + + // We implement branching with the assumption that the return value is usually false. + if(a.size() != b.size()) + return false; + + // We can't simply search for each element of a in b, as it may be that the bucket for + // two elements in a has those same two elements in b but in different order (which should + // still result in equality). Also it's possible that one bucket in a has two elements which + // both match a solitary element in the equivalent bucket in b (which shouldn't result in equality). + eastl::pair aRange; + eastl::pair bRange; + + for(const_iterator ai = a.begin(), aiEnd = a.end(); ai != aiEnd; ai = aRange.second) // For each element in a... + { + aRange = a.equal_range(*ai); // Get the range of elements in a that are equal to ai. + bRange = b.equal_range(*ai); // Get the range of elements in b that are equal to ai. + + // We need to verify that aRange == bRange. First make sure the range sizes are equivalent... + const difference_type aDistance = eastl::distance(aRange.first, aRange.second); + const difference_type bDistance = eastl::distance(bRange.first, bRange.second); + + if(aDistance != bDistance) + return false; + + // At this point, aDistance > 0 and aDistance == bDistance. + // Implement a fast pathway for the case that there's just a single element. + if(aDistance == 1) + { + if(!(*aRange.first == *bRange.first)) // We have to compare values in addition to making sure the distance (element count) was equal. This is because the lookup is done via the user-supplised Predicate + return false; // which isn't strictly required to be identical to the Value operator==, though 99% of the time it will be so. Ditto for the is_permutation usage below. + } + else + { + // Check to see if these aRange and bRange are any permutation of each other. + // This check gets slower as there are more elements in the range. + if(!eastl::is_permutation(aRange.first, aRange.second, bRange.first)) + return false; + } + } + + return true; + } + + template + inline bool operator!=(const hash_multiset& a, + const hash_multiset& b) + { + return !(a == b); + } + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/heap.h b/include/EASTL/heap.h new file mode 100644 index 0000000..f0e770b --- /dev/null +++ b/include/EASTL/heap.h @@ -0,0 +1,685 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements heap functionality much like the std C++ heap algorithms. +// Such heaps are not the same thing as memory heaps or pools, but rather are +// semi-sorted random access containers which have the primary purpose of +// supporting the implementation of priority_queue and similar data structures. +// +// The primary distinctions between this heap functionality and std::heap are: +// - This heap exposes some extra functionality such as is_heap and change_heap. +// - This heap is more efficient than versions found in typical STL +// implementations such as STLPort, Microsoft, and Metrowerks. This comes +// about due to better use of array dereferencing and branch prediction. +// You should expect of 5-30%, depending on the usage and platform. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// The publicly usable functions we define are: +// push_heap -- Adds an entry to a heap. Same as C++ std::push_heap. +// pop_heap -- Removes the top entry from a heap. Same as C++ std::pop_heap. +// make_heap -- Converts an array to a heap. Same as C++ std::make_heap. +// sort_heap -- Sorts a heap in place. Same as C++ std::sort_heap. +// remove_heap -- Removes an arbitrary entry from a heap. +// change_heap -- Changes the priority of an entry in the heap. +// is_heap -- Returns true if an array appears is in heap format. Same as C++11 std::is_heap. +// is_heap_until -- Returns largest part of the range which is a heap. Same as C++11 std::is_heap_until. +/////////////////////////////////////////////////////////////////////////////// + + + +#ifndef EASTL_HEAP_H +#define EASTL_HEAP_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////// + // promote_heap (internal function) + /////////////////////////////////////////////////////////////////////// + + template + inline void promote_heap_impl(RandomAccessIterator first, Distance topPosition, Distance position, T value) + { + for(Distance parentPosition = (position - 1) >> 1; // This formula assumes that (position > 0). // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>. + (position > topPosition) && (*(first + parentPosition) < value); + parentPosition = (position - 1) >> 1) + { + *(first + position) = eastl::forward(*(first + parentPosition)); // Swap the node with its parent. + position = parentPosition; + } + + *(first + position) = eastl::forward(value); + } + + /// promote_heap + /// + /// Moves a value in the heap from a given position upward until + /// it is sorted correctly. It's kind of like bubble-sort, except that + /// instead of moving linearly from the back of a list to the front, + /// it moves from the bottom of the tree up the branches towards the + /// top. But otherwise is just like bubble-sort. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, const T& value) + { + typedef typename iterator_traits::value_type value_type; + promote_heap_impl(first, topPosition, position, value); + } + + + /// promote_heap + /// + /// Moves a value in the heap from a given position upward until + /// it is sorted correctly. It's kind of like bubble-sort, except that + /// instead of moving linearly from the back of a list to the front, + /// it moves from the bottom of the tree up the branches towards the + /// top. But otherwise is just like bubble-sort. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, T&& value) + { + typedef typename iterator_traits::value_type value_type; + promote_heap_impl(first, topPosition, position, eastl::forward(value)); + } + + + template + inline void promote_heap_impl(RandomAccessIterator first, Distance topPosition, Distance position, T value, Compare compare) + { + for(Distance parentPosition = (position - 1) >> 1; // This formula assumes that (position > 0). // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>. + (position > topPosition) && compare(*(first + parentPosition), value); + parentPosition = (position - 1) >> 1) + { + *(first + position) = eastl::forward(*(first + parentPosition)); // Swap the node with its parent. + position = parentPosition; + } + + *(first + position) = eastl::forward(value); + } + + + /// promote_heap + /// + /// Takes a Compare(a, b) function (or function object) which returns true if a < b. + /// For example, you could use the standard 'less' comparison object. + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, const T& value, Compare compare) + { + typedef typename iterator_traits::value_type value_type; + promote_heap_impl(first, topPosition, position, value, compare); + } + + + /// promote_heap + /// + /// Takes a Compare(a, b) function (or function object) which returns true if a < b. + /// For example, you could use the standard 'less' comparison object. + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, T&& value, Compare compare) + { + typedef typename iterator_traits::value_type value_type; + promote_heap_impl(first, topPosition, position, eastl::forward(value), compare); + } + + + + /////////////////////////////////////////////////////////////////////// + // adjust_heap (internal function) + /////////////////////////////////////////////////////////////////////// + + template + void adjust_heap_impl(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T value) + { + // We do the conventional approach of moving the position down to the + // bottom then inserting the value at the back and moving it up. + Distance childPosition = (2 * position) + 2; + + for(; childPosition < heapSize; childPosition = (2 * childPosition) + 2) + { + if(*(first + childPosition) < *(first + (childPosition - 1))) // Choose the larger of the two children. + --childPosition; + *(first + position) = eastl::forward(*(first + childPosition)); // Swap positions with this child. + position = childPosition; + } + + if(childPosition == heapSize) // If we are at the very last index of the bottom... + { + *(first + position) = eastl::forward(*(first + (childPosition - 1))); + position = childPosition - 1; + } + + eastl::promote_heap(first, topPosition, position, eastl::forward(value)); + } + + /// adjust_heap + /// + /// Given a position that has just been vacated, this function moves + /// new values into that vacated position appropriately. The value + /// argument is an entry which will be inserted into the heap after + /// we move nodes into the positions that were vacated. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, const T& value) + { + typedef typename iterator_traits::value_type value_type; + adjust_heap_impl(first, topPosition, heapSize, position, eastl::forward(value)); + } + + + /// adjust_heap + /// + /// Given a position that has just been vacated, this function moves + /// new values into that vacated position appropriately. The value + /// argument is an entry which will be inserted into the heap after + /// we move nodes into the positions that were vacated. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T&& value) + { + typedef typename iterator_traits::value_type value_type; + adjust_heap_impl(first, topPosition, heapSize, position, eastl::forward(value)); + } + + + template + void adjust_heap_impl(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T value, Compare compare) + { + // We do the conventional approach of moving the position down to the + // bottom then inserting the value at the back and moving it up. + Distance childPosition = (2 * position) + 2; + + for(; childPosition < heapSize; childPosition = (2 * childPosition) + 2) + { + if(compare(*(first + childPosition), *(first + (childPosition - 1)))) // Choose the larger of the two children. + --childPosition; + *(first + position) = eastl::forward(*(first + childPosition)); // Swap positions with this child. + position = childPosition; + } + + if(childPosition == heapSize) // If we are at the bottom... + { + *(first + position) = eastl::forward(*(first + (childPosition - 1))); + position = childPosition - 1; + } + + eastl::promote_heap(first, topPosition, position, eastl::forward(value), compare); + } + + /// adjust_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, const T& value, Compare compare) + { + typedef typename iterator_traits::value_type value_type; + adjust_heap_impl(first, topPosition, heapSize, position, eastl::forward(value), compare); + } + + + /// adjust_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// This function requires that the value argument refer to a value + /// that is currently not within the heap. + /// + template + void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, T&& value, Compare compare) + { + typedef typename iterator_traits::value_type value_type; + adjust_heap_impl(first, topPosition, heapSize, position, eastl::forward(value), compare); + } + + + /////////////////////////////////////////////////////////////////////// + // push_heap + /////////////////////////////////////////////////////////////////////// + + /// push_heap + /// + /// Adds an item to a heap (which is an array). The item necessarily + /// comes from the back of the heap (array). Thus, the insertion of a + /// new item in a heap is a two step process: push_back and push_heap. + /// + /// Example usage: + /// vector heap; + /// + /// heap.push_back(3); + /// push_heap(heap.begin(), heap.end()); // Places '3' appropriately. + /// + template + inline void push_heap(RandomAccessIterator first, RandomAccessIterator last) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const value_type tempBottom(eastl::forward(*(last - 1))); + + eastl::promote_heap + (first, (difference_type)0, (difference_type)(last - first - 1), eastl::forward(tempBottom)); + } + + + /// push_heap + /// + /// This version is useful for cases where your object comparison is unusual + /// or where you want to have the heap store pointers to objects instead of + /// storing the objects themselves (often in order to improve cache coherency + /// while doing sorting). + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline void push_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const value_type tempBottom(*(last - 1)); + + eastl::promote_heap + (first, (difference_type)0, (difference_type)(last - first - 1), tempBottom, compare); + } + + + + + /////////////////////////////////////////////////////////////////////// + // pop_heap + /////////////////////////////////////////////////////////////////////// + + /// pop_heap + /// + /// Removes the first item from the heap (which is an array), and adjusts + /// the heap so that the highest priority item becomes the new first item. + /// + /// Example usage: + /// vector heap; + /// + /// heap.push_back(2); + /// heap.push_back(3); + /// heap.push_back(1); + /// + /// pop_heap(heap.begin(), heap.end()); // Moves heap[0] to the back of the heap and adjusts the heap. + /// heap.pop_back(); // Remove value that was just at the top of the heap + /// + template + inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + value_type tempBottom(eastl::forward(*(last - 1))); + *(last - 1) = eastl::forward(*first); + eastl::adjust_heap + (first, (difference_type)0, (difference_type)(last - first - 1), 0, eastl::forward(tempBottom)); + } + + + + /// pop_heap + /// + /// This version is useful for cases where your object comparison is unusual + /// or where you want to have the heap store pointers to objects instead of + /// storing the objects themselves (often in order to improve cache coherency + /// while doing sorting). + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + value_type tempBottom(eastl::forward(*(last - 1))); + *(last - 1) = eastl::forward(*first); + eastl::adjust_heap + (first, (difference_type)0, (difference_type)(last - first - 1), 0, eastl::forward(tempBottom), compare); + } + + + /////////////////////////////////////////////////////////////////////// + // make_heap + /////////////////////////////////////////////////////////////////////// + + + /// make_heap + /// + /// Given an array, this function converts it into heap format. + /// The complexity is O(n), where n is count of the range. + /// The input range is not required to be in any order. + /// + template + void make_heap(RandomAccessIterator first, RandomAccessIterator last) + { + // We do bottom-up heap construction as per Sedgewick. Such construction is O(n). + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const difference_type heapSize = last - first; + + if(heapSize >= 2) // If there is anything to do... (we need this check because otherwise the math fails below). + { + difference_type parentPosition = ((heapSize - 2) >> 1) + 1; // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>. + + do{ + --parentPosition; + value_type temp(eastl::forward(*(first + parentPosition))); + eastl::adjust_heap + (first, parentPosition, heapSize, parentPosition, eastl::forward(temp)); + } while(parentPosition != 0); + } + } + + + template + void make_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const difference_type heapSize = last - first; + + if(heapSize >= 2) // If there is anything to do... (we need this check because otherwise the math fails below). + { + difference_type parentPosition = ((heapSize - 2) >> 1) + 1; // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>. + + do{ + --parentPosition; + value_type temp(eastl::forward(*(first + parentPosition))); + eastl::adjust_heap + (first, parentPosition, heapSize, parentPosition, eastl::forward(temp), compare); + } while(parentPosition != 0); + } + } + + + /////////////////////////////////////////////////////////////////////// + // sort_heap + /////////////////////////////////////////////////////////////////////// + + /// sort_heap + /// + /// After the application if this algorithm, the range it was applied to + /// is no longer a heap, though it will be a reverse heap (smallest first). + /// The item with the lowest priority will be first, and the highest last. + /// This is not a stable sort because the relative order of equivalent + /// elements is not necessarily preserved. + /// The range referenced must be valid; all pointers must be dereferenceable + /// and within the sequence the last position is reachable from the first + /// by incrementation. + /// The complexity is at most O(n * log(n)), where n is count of the range. + /// + template + inline void sort_heap(RandomAccessIterator first, RandomAccessIterator last) + { + for(; (last - first) > 1; --last) // We simply use the heap to sort itself. + eastl::pop_heap(first, last); + } + + + /// sort_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline void sort_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + for(; (last - first) > 1; --last) // We simply use the heap to sort itself. + eastl::pop_heap(first, last, compare); + } + + + + /////////////////////////////////////////////////////////////////////// + // remove_heap + /////////////////////////////////////////////////////////////////////// + + /// remove_heap + /// + /// Removes an arbitrary entry from the heap and adjusts the heap appropriately. + /// This function is unlike pop_heap in that pop_heap moves the top item + /// to the back of the heap, whereas remove_heap moves an arbitrary item to + /// the back of the heap. + /// + /// Note: Since this function moves the element to the back of the heap and + /// doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. + /// + template + inline void remove_heap(RandomAccessIterator first, Distance heapSize, Distance position) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const value_type tempBottom(*(first + heapSize - 1)); + *(first + heapSize - 1) = *(first + position); + eastl::adjust_heap + (first, (difference_type)0, (difference_type)(heapSize - 1), (difference_type)position, tempBottom); + } + + + /// remove_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + /// Note: Since this function moves the element to the back of the heap and + /// doesn't actually remove it from the given container, the user must call + /// the container erase function if the user wants to erase the element + /// from the container. + /// + template + inline void remove_heap(RandomAccessIterator first, Distance heapSize, Distance position, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + const value_type tempBottom(*(first + heapSize - 1)); + *(first + heapSize - 1) = *(first + position); + eastl::adjust_heap + (first, (difference_type)0, (difference_type)(heapSize - 1), (difference_type)position, tempBottom, compare); + } + + + + /////////////////////////////////////////////////////////////////////// + // change_heap + /////////////////////////////////////////////////////////////////////// + + /// change_heap + /// + /// Given a value in the heap that has changed in priority, this function + /// adjusts the heap appropriately. The heap size remains unchanged after + /// this operation. + /// + template + inline void change_heap(RandomAccessIterator first, Distance heapSize, Distance position) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + eastl::remove_heap(first, heapSize, position); + + value_type tempBottom(*(first + heapSize - 1)); + + eastl::promote_heap + (first, (difference_type)0, (difference_type)(heapSize - 1), tempBottom); + } + + + /// change_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline void change_heap(RandomAccessIterator first, Distance heapSize, Distance position, Compare compare) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::value_type value_type; + + eastl::remove_heap(first, heapSize, position, compare); + + value_type tempBottom(*(first + heapSize - 1)); + + eastl::promote_heap + (first, (difference_type)0, (difference_type)(heapSize - 1), tempBottom, compare); + } + + + + /////////////////////////////////////////////////////////////////////// + // is_heap_until + /////////////////////////////////////////////////////////////////////// + + /// is_heap_until + /// + template + inline RandomAccessIterator is_heap_until(RandomAccessIterator first, RandomAccessIterator last) + { + int counter = 0; + + for(RandomAccessIterator child = first + 1; child < last; ++child, counter ^= 1) + { + if(*first < *child) // We must use operator <, and are not allowed to use > or >= here. + return child; + first += counter; // counter switches between 0 and 1 every time through. + } + + return last; + } + + + /// is_heap_until + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline RandomAccessIterator is_heap_until(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + int counter = 0; + + for(RandomAccessIterator child = first + 1; child < last; ++child, counter ^= 1) + { + if(compare(*first, *child)) + return child; + first += counter; // counter switches between 0 and 1 every time through. + } + + return last; + } + + + + /////////////////////////////////////////////////////////////////////// + // is_heap + /////////////////////////////////////////////////////////////////////// + + /// is_heap + /// + /// This is a useful debugging algorithm for verifying that a random + /// access container is in heap format. + /// + template + inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last) + { + return (eastl::is_heap_until(first, last) == last); + } + + + /// is_heap + /// + /// The Compare function must work equivalently to the compare function used + /// to make and maintain the heap. + /// + template + inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare) + { + return (eastl::is_heap_until(first, last, compare) == last); + } + + + // To consider: The following may be a faster implementation for most cases. + // + // template + // inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last) + // { + // if(((uintptr_t)(last - first) & 1) == 0) // If the range has an even number of elements... + // --last; + // + // RandomAccessIterator parent = first, child = (first + 1); + // + // for(; child < last; child += 2, ++parent) + // { + // if((*parent < *child) || (*parent < *(child + 1))) + // return false; + // } + // + // if((((uintptr_t)(last - first) & 1) == 0) && (*parent < *child)) + // return false; + // + // return true; + // } + + +} // namespace eastl + + +#endif // Header include guard + + + + diff --git a/include/EASTL/initializer_list.h b/include/EASTL/initializer_list.h new file mode 100644 index 0000000..028fb4f --- /dev/null +++ b/include/EASTL/initializer_list.h @@ -0,0 +1,96 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +// +// This file #includes if it's available, else it defines +// its own version of std::initializer_list. It does not define eastl::initializer_list +// because that would not provide any use, due to how the C++11 Standard works. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INITIALIZER_LIST_H +#define EASTL_INITIALIZER_LIST_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +#if defined(EA_HAVE_CPP11_INITIALIZER_LIST) // If the compiler can generate calls to std::initializer_list... + + // The initializer_list type must be declared in the std namespace, as that's the + // namespace the compiler uses when generating code to use it. + EA_DISABLE_ALL_VC_WARNINGS() + #include + EA_RESTORE_ALL_VC_WARNINGS() + +#else + + // If you get an error here about initializer_list being already defined, then the EA_HAVE_CPP11_INITIALIZER_LIST define from needs to be updated. + namespace std + { + // See the C++11 Standard, section 18.9. + template + class initializer_list + { + public: + typedef E value_type; + typedef const E& reference; + typedef const E& const_reference; + typedef size_t size_type; + typedef const E* iterator; // Must be const, as initializer_list (and its mpArray) is an immutable temp object. + typedef const E* const_iterator; + + private: + iterator mpArray; + size_type mArraySize; + + // This constructor is private, but the C++ compiler has the ability to call it, as per the C++11 Standard. + initializer_list(const_iterator pArray, size_type arraySize) + : mpArray(pArray), mArraySize(arraySize) { } + + public: + initializer_list() EA_NOEXCEPT // EA_NOEXCEPT requires a recent version of EABase. + : mpArray(NULL), mArraySize(0) { } + + size_type size() const EA_NOEXCEPT { return mArraySize; } + const_iterator begin() const EA_NOEXCEPT { return mpArray; } // Must be const_iterator, as initializer_list (and its mpArray) is an immutable temp object. + const_iterator end() const EA_NOEXCEPT { return mpArray + mArraySize; } + }; + + + template + const T* begin(std::initializer_list ilist) EA_NOEXCEPT + { + return ilist.begin(); + } + + template + const T* end(std::initializer_list ilist) EA_NOEXCEPT + { + return ilist.end(); + } + } + +#endif + + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/include/EASTL/internal/allocator_traits.h b/include/EASTL/internal/allocator_traits.h new file mode 100644 index 0000000..87985f0 --- /dev/null +++ b/include/EASTL/internal/allocator_traits.h @@ -0,0 +1,347 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// +// The code in this file is a modification of the libcxx implementation. We copy +// the license information here as required. +//////////////////////////////////////////////////////////////////////////////// +//===------------------------ functional ----------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include +#include + +namespace eastl +{ + namespace Internal + { + // has_value_type + template + struct has_value_type + { + private: + template static eastl::no_type test(...); + template static eastl::yes_type test(typename U::value_type* = 0); + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + + template ::value> + struct value_type + { + typedef typename Alloc::value_type type; + }; + + template + struct value_type + { + typedef char type; + }; + + + // has_pointer_type + namespace has_pointer_type_imp + { + template static eastl::no_type test(...); + template static eastl::yes_type test(typename U::pointer* = 0); + } + + template + struct has_pointer_type + : public integral_constant(0)) == sizeof(eastl::yes_type)> + { + }; + + namespace PointerTypeInternal + { + template ::value> + struct pointer_type + { + typedef typename D::pointer type; + }; + + template + struct pointer_type + { + typedef T* type; + }; + } + + template + struct pointer_type + { + typedef typename PointerTypeInternal::pointer_type::type>::type type; + }; + + + // has_const_pointer + template + struct has_const_pointer + { + private: + template static eastl::no_type test(...); + template static eastl::yes_type test(typename U::const_pointer* = 0); + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + + template ::value> + struct const_pointer + { + typedef typename Alloc::const_pointer type; + }; + + template + struct const_pointer + { + #ifndef EA_COMPILER_NO_TEMPLATE_ALIASES + typedef typename pointer_traits::template rebind type; + #else + typedef typename pointer_traits::template rebind::other type; + #endif + }; + + + // has_void_pointer + template + struct has_void_pointer + { + private: + template static eastl::no_type test(...); + template static eastl::yes_type test(typename U::void_pointer* = 0); + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + + template ::value> + struct void_pointer + { + typedef typename Alloc::void_pointer type; + }; + + template + struct void_pointer + { + #ifndef EA_COMPILER_NO_TEMPLATE_ALIASES + typedef typename pointer_traits::template rebind type; + #else + typedef typename pointer_traits::template rebind::other type; + #endif + }; + + + // has_const_void_pointer + template + struct has_const_void_pointer + { + private: + template static eastl::no_type test(...); + template static eastl::yes_type test(typename U::const_void_pointer* = 0); + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + + template ::value> + struct const_void_pointer + { + typedef typename Alloc::const_void_pointer type; + }; + + template + struct const_void_pointer + { + #ifndef EA_COMPILER_NO_TEMPLATE_ALIASES + typedef typename pointer_traits::template rebind type; + #else + typedef typename pointer_traits::template rebind::other type; + #endif + }; + + + // alloc_traits_difference_type + template ::value> + struct alloc_traits_difference_type + { + typedef typename pointer_traits::difference_type type; + }; + + template + struct alloc_traits_difference_type + { + typedef typename Alloc::difference_type type; + }; + + + // has_size_type + template + struct has_size_type + { + private: + template static eastl::no_type test(...); + template static char test(typename U::size_type* = 0); + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + + template ::value> + struct size_type + { + typedef typename make_unsigned::type type; + }; + + template + struct size_type + { + typedef typename Alloc::size_type type; + }; + + + // has_construct + template + decltype(eastl::declval().construct(eastl::declval(), eastl::declval()...), eastl::true_type()) + has_construct_test(Alloc&& a, T* p, Args&&... args); + + template + eastl::false_type has_construct_test(const Alloc& a, Pointer&& p, Args&&... args); + + template + struct has_construct + : public eastl::integral_constant< bool, + eastl::is_same(), eastl::declval(), + eastl::declval()...)), + eastl::true_type>::value> + { + }; + + + // has_destroy + template + auto has_destroy_test(Alloc&& a, Pointer&& p) -> decltype(a.destroy(p), eastl::true_type()); + + template + auto has_destroy_test(const Alloc& a, Pointer&& p) -> eastl::false_type; + + template + struct has_destroy + : public eastl::integral_constant< bool, + is_same(), eastl::declval())), eastl::true_type>::value> + { + }; + + + // has_max_size + template + auto has_max_size_test(Alloc&& a) -> decltype(a.max_size(), eastl::true_type()); + + template + auto has_max_size_test(const volatile Alloc& a) -> eastl::false_type; + + template + struct has_max_size + : public eastl::integral_constant())), eastl::true_type>::value> + { + }; + + } // namespace Internal + + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // allocator_traits + // + // C++11 Standard section 20.7.8 + // This Internal namespace holds the utility functions required for allocator_traits to do compile-time type + // inspection inorder to determine if needs to provide a default implementation or utilize the users allocator + // implementation. + // + // Reference: http://en.cppreference.com/w/cpp/memory/allocator_traits + // + // eastl::allocator_traits supplies a uniform interface to all allocator types. + // + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // eastl::allocator_traits is not a standards conforming implementation. Enough of the standard was implemented to + // make the eastl::function implementation possible. We must revisit this implementation before rolling out its + // usage fully in eastl::containers. + // + // NOTE: We do not recommend users directly code against eastl::allocator_traits until we have completed a full standards comforming implementation. + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + template + struct allocator_traits + { + typedef Alloc allocator_type; + + typedef typename Internal::value_type::type value_type; + typedef typename Internal::pointer_type::type pointer; + typedef typename Internal::const_pointer::type const_pointer; + typedef typename Internal::void_pointer::type void_pointer; + typedef typename Internal::const_void_pointer::type const_void_pointer; + typedef typename Internal::alloc_traits_difference_type::type difference_type; + typedef typename Internal::size_type::type size_type; + + // + // TODO: for full standards compliance implement the following: + // + // typedef typename Internal::propagate_on_container_copy_assignment::type propagate_on_container_copy_assignment; + // typedef typename Internal::propagate_on_container_move_assignment::type propagate_on_container_move_assignment; + // typedef typename Internal::propagate_on_container_swap::type propagate_on_container_swap; + // template using rebind_alloc = Alloc::rebind::other | Alloc; + // template using rebind_traits = allocator_traits>; + // static allocator_type select_on_container_copy_construction(const allocator_type& a); + + static size_type internal_max_size(true_type, const allocator_type& a) { return a.max_size(); } + static size_type internal_max_size(false_type, const allocator_type&) { return (eastl::numeric_limits::max)(); } // additional parenthesis disables the windows max macro from expanding. + static size_type max_size(const allocator_type& a) EA_NOEXCEPT + { + return internal_max_size(Internal::has_max_size(), a); + } + + static pointer allocate(allocator_type& a, size_type n) { return static_cast(a.allocate(n)); } + + static pointer allocate(allocator_type& a, size_type n, const_void_pointer) + { + // return allocate(a, n, hint, Internal::has_allocate_hint()); + return allocate(a, n); + } + + static void deallocate(allocator_type& a, pointer p, size_type n) EA_NOEXCEPT { a.deallocate(p, n); } + + template + static void internal_construct(eastl::true_type, allocator_type& a, T* p, Args&&... args) + { + a.construct(p, eastl::forward(args)...); + } + + template + static void internal_construct(false_type, allocator_type&, T* p, Args&&... args) + { + ::new ((void*)p) T(eastl::forward(args)...); + } + + template + static void construct(allocator_type& a, T* p, Args&&... args) + { + internal_construct(Internal::has_construct(), a, p, eastl::forward(args)...); + } + + template + static void internal_destroy(eastl::true_type, allocator_type& a, T* p) { a.destroy(p); } + + template + static void internal_destroy(eastl::false_type, allocator_type&, T* p) { EA_UNUSED(p); p->~T(); } + + template + static void destroy(allocator_type& a, T* p) + { + internal_destroy(Internal::has_destroy(), a, p); + } + }; +} // namespace eastl diff --git a/include/EASTL/internal/allocator_traits_fwd_decls.h b/include/EASTL/internal/allocator_traits_fwd_decls.h new file mode 100644 index 0000000..d6283cf --- /dev/null +++ b/include/EASTL/internal/allocator_traits_fwd_decls.h @@ -0,0 +1,40 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_ALLOCATOR_TRAITS_H +#define EASTL_INTERNAL_ALLOCATOR_TRAITS_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include + +namespace eastl +{ + template + struct allocator_traits; + +} // namespace eastl + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/include/EASTL/internal/char_traits.h b/include/EASTL/internal/char_traits.h new file mode 100644 index 0000000..62fe79b --- /dev/null +++ b/include/EASTL/internal/char_traits.h @@ -0,0 +1,464 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements similar functionality to char_traits which is part of +// the C++ standard STL library specification. This is intended for internal +// EASTL use only. Functionality can be accessed through the eastl::string or +// eastl::string_view types. +// +// http://en.cppreference.com/w/cpp/string/char_traits +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_CHAR_TRAITS_H +#define EASTL_CHAR_TRAITS_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include // toupper, etc. +#include // memset, etc. +EA_RESTORE_ALL_VC_WARNINGS() + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////////////// + /// DecodePart + /// + /// These implement UTF8/UCS2/UCS4 encoding/decoding. + /// + EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char*& pDest, char* pDestEnd); + EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + EASTL_API bool DecodePart(const char*& pSrc, const char* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char*& pDest, char* pDestEnd); + EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + EASTL_API bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char*& pDest, char* pDestEnd); + EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + EASTL_API bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char*& pDest, char* pDestEnd); + EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + EASTL_API bool DecodePart(const int*& pSrc, const int* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + #if EA_CHAR8_UNIQUE + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char*& pDest, char* pDestEnd); + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + bool DecodePart(const char*& pSrc, const char* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + #endif + + #if EA_WCHAR_UNIQUE + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char*& pDest, char* pDestEnd); + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd); + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd); + + bool DecodePart(const char*& pSrc, const char* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + #endif + + #if EA_CHAR8_UNIQUE && EA_WCHAR_UNIQUE + bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd); + bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd); + #endif + + + #if EA_WCHAR_UNIQUE + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char*& pDest, char* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #endif + } + + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #endif + } + + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + #endif + } + + inline bool DecodePart(const char*& pSrc, const char* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + + inline bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + + inline bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + #endif + + #if EA_CHAR8_UNIQUE + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char*& pDest, char* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + } + + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char16_t*& pDest, char16_t* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + } + + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, char32_t*& pDest, char32_t* pDestEnd) + { + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), pDest, pDestEnd); + } + + inline bool DecodePart(const char*& pSrc, const char* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + + inline bool DecodePart(const char16_t*& pSrc, const char16_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + + inline bool DecodePart(const char32_t*& pSrc, const char32_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + } + #endif + + #if EA_CHAR8_UNIQUE && EA_WCHAR_UNIQUE + inline bool DecodePart(const char8_t*& pSrc, const char8_t* pSrcEnd, wchar_t*& pDest, wchar_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(pSrc, pSrcEnd, reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + + inline bool DecodePart(const wchar_t*& pSrc, const wchar_t* pSrcEnd, char8_t*& pDest, char8_t* pDestEnd) + { + #if (EA_WCHAR_SIZE == 2) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #elif (EA_WCHAR_SIZE == 4) + return DecodePart(reinterpret_cast(pSrc), reinterpret_cast(pSrcEnd), reinterpret_cast(pDest), reinterpret_cast(pDestEnd)); + #endif + } + #endif + + /////////////////////////////////////////////////////////////////////////////// + // 'char traits' functionality + // + inline char CharToLower(char c) + { return (char)tolower((uint8_t)c); } + + template + inline T CharToLower(T c) + { if((unsigned)c <= 0xff) return (T)tolower((uint8_t)c); return c; } + + + inline char CharToUpper(char c) + { return (char)toupper((uint8_t)c); } + + template + inline T CharToUpper(T c) + { if((unsigned)c <= 0xff) return (T)toupper((uint8_t)c); return c; } + + + template + int Compare(const T* p1, const T* p2, size_t n) + { + for(; n > 0; ++p1, ++p2, --n) + { + if(*p1 != *p2) + return (static_cast::type>(*p1) < + static_cast::type>(*p2)) ? -1 : 1; + } + return 0; + } + + inline int Compare(const char* p1, const char* p2, size_t n) + { + return memcmp(p1, p2, n); + } + + + template + inline int CompareI(const T* p1, const T* p2, size_t n) + { + for(; n > 0; ++p1, ++p2, --n) + { + const T c1 = CharToLower(*p1); + const T c2 = CharToLower(*p2); + + if(c1 != c2) + return (static_cast::type>(c1) < + static_cast::type>(c2)) ? -1 : 1; + } + return 0; + } + + + template + inline const T* Find(const T* p, T c, size_t n) + { + for(; n > 0; --n, ++p) + { + if(*p == c) + return p; + } + + return NULL; + } + + inline const char* Find(const char* p, char c, size_t n) + { + return (const char*)memchr(p, c, n); + } + + + template + inline EA_CPP14_CONSTEXPR size_t CharStrlen(const T* p) + { + const auto* pCurrent = p; + while(*pCurrent) + ++pCurrent; + return (size_t)(pCurrent - p); + } + + + template + inline T* CharStringUninitializedCopy(const T* pSource, const T* pSourceEnd, T* pDestination) + { + memmove(pDestination, pSource, (size_t)(pSourceEnd - pSource) * sizeof(T)); + return pDestination + (pSourceEnd - pSource); + } + + + template + const T* CharTypeStringFindEnd(const T* pBegin, const T* pEnd, T c) + { + const T* pTemp = pEnd; + while(--pTemp >= pBegin) + { + if(*pTemp == c) + return pTemp; + } + + return pEnd; + } + + + template + const T* CharTypeStringRSearch(const T* p1Begin, const T* p1End, + const T* p2Begin, const T* p2End) + { + // Test for zero length strings, in which case we have a match or a failure, + // but the return value is the same either way. + if((p1Begin == p1End) || (p2Begin == p2End)) + return p1Begin; + + // Test for a pattern of length 1. + if((p2Begin + 1) == p2End) + return CharTypeStringFindEnd(p1Begin, p1End, *p2Begin); + + // Test for search string length being longer than string length. + if((p2End - p2Begin) > (p1End - p1Begin)) + return p1End; + + // General case. + const T* pSearchEnd = (p1End - (p2End - p2Begin) + 1); + const T* pCurrent1; + const T* pCurrent2; + + while(pSearchEnd != p1Begin) + { + // Search for the last occurrence of *p2Begin. + pCurrent1 = CharTypeStringFindEnd(p1Begin, pSearchEnd, *p2Begin); + if(pCurrent1 == pSearchEnd) // If the first char of p2 wasn't found, + return p1End; // then we immediately have failure. + + // In this case, *pTemp == *p2Begin. So compare the rest. + pCurrent2 = p2Begin; + while(*pCurrent1++ == *pCurrent2++) + { + if(pCurrent2 == p2End) + return (pCurrent1 - (p2End - p2Begin)); + } + + // A smarter algorithm might know to subtract more than just one, + // but in most cases it won't make much difference anyway. + --pSearchEnd; + } + + return p1End; + } + + + template + inline const T* CharTypeStringFindFirstOf(const T* p1Begin, const T* p1End, const T* p2Begin, const T* p2End) + { + for (; p1Begin != p1End; ++p1Begin) + { + for (const T* pTemp = p2Begin; pTemp != p2End; ++pTemp) + { + if (*p1Begin == *pTemp) + return p1Begin; + } + } + return p1End; + } + + + template + inline const T* CharTypeStringRFindFirstNotOf(const T* p1RBegin, const T* p1REnd, const T* p2Begin, const T* p2End) + { + for (; p1RBegin != p1REnd; --p1RBegin) + { + const T* pTemp; + for (pTemp = p2Begin; pTemp != p2End; ++pTemp) + { + if (*(p1RBegin - 1) == *pTemp) + break; + } + if (pTemp == p2End) + return p1RBegin; + } + return p1REnd; + } + + + template + inline const T* CharTypeStringFindFirstNotOf(const T* p1Begin, const T* p1End, const T* p2Begin, const T* p2End) + { + for (; p1Begin != p1End; ++p1Begin) + { + const T* pTemp; + for (pTemp = p2Begin; pTemp != p2End; ++pTemp) + { + if (*p1Begin == *pTemp) + break; + } + if (pTemp == p2End) + return p1Begin; + } + return p1End; + } + + + template + inline const T* CharTypeStringRFindFirstOf(const T* p1RBegin, const T* p1REnd, const T* p2Begin, const T* p2End) + { + for (; p1RBegin != p1REnd; --p1RBegin) + { + for (const T* pTemp = p2Begin; pTemp != p2End; ++pTemp) + { + if (*(p1RBegin - 1) == *pTemp) + return p1RBegin; + } + } + return p1REnd; + } + + + template + inline const T* CharTypeStringRFind(const T* pRBegin, const T* pREnd, const T c) + { + while (pRBegin > pREnd) + { + if (*(pRBegin - 1) == c) + return pRBegin; + --pRBegin; + } + return pREnd; + } + + + inline char* CharStringUninitializedFillN(char* pDestination, size_t n, const char c) + { + if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0. + memset(pDestination, (uint8_t)c, (size_t)n); + return pDestination + n; + } + + template + inline T* CharStringUninitializedFillN(T* pDestination, size_t n, const T c) + { + T * pDest = pDestination; + const T* const pEnd = pDestination + n; + while(pDest < pEnd) + *pDest++ = c; + return pDestination + n; + } + + + inline char* CharTypeAssignN(char* pDestination, size_t n, char c) + { + if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0. + return (char*)memset(pDestination, c, (size_t)n); + return pDestination; + } + + template + inline T* CharTypeAssignN(T* pDestination, size_t n, T c) + { + T* pDest = pDestination; + const T* const pEnd = pDestination + n; + while(pDest < pEnd) + *pDest++ = c; + return pDestination; + } +} // namespace eastl + +#endif // EASTL_CHAR_TRAITS_H diff --git a/include/EASTL/internal/config.h b/include/EASTL/internal/config.h new file mode 100644 index 0000000..a824609 --- /dev/null +++ b/include/EASTL/internal/config.h @@ -0,0 +1,1866 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_CONFIG_H +#define EASTL_INTERNAL_CONFIG_H + + +/////////////////////////////////////////////////////////////////////////////// +// ReadMe +// +// This is the EASTL configuration file. All configurable parameters of EASTL +// are controlled through this file. However, all the settings here can be +// manually overridden by the user. There are three ways for a user to override +// the settings in this file: +// +// - Simply edit this file. +// - Define EASTL_USER_CONFIG_HEADER. +// - Predefine individual defines (e.g. EASTL_ASSERT). +// +/////////////////////////////////////////////////////////////////////////////// + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_USER_CONFIG_HEADER +// +// This allows the user to define a header file to be #included before the +// EASTL config.h contents are compiled. A primary use of this is to override +// the contents of this config.h file. Note that all the settings below in +// this file are user-overridable. +// +// Example usage: +// #define EASTL_USER_CONFIG_HEADER "MyConfigOverrides.h" +// #include +// +/////////////////////////////////////////////////////////////////////////////// + +#ifdef EASTL_USER_CONFIG_HEADER + #include EASTL_USER_CONFIG_HEADER +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EABASE_DISABLED +// +// The user can disable EABase usage and manually supply the configuration +// via defining EASTL_EABASE_DISABLED and defining the appropriate entities +// globally or via the above EASTL_USER_CONFIG_HEADER. +// +// Example usage: +// #define EASTL_EABASE_DISABLED +// #include +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_EABASE_DISABLED + #include +#endif +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VERSION +// +// We more or less follow the conventional EA packaging approach to versioning +// here. A primary distinction here is that minor versions are defined as two +// digit entities (e.g. .03") instead of minimal digit entities ".3"). The logic +// here is that the value is a counter and not a floating point fraction. +// Note that the major version doesn't have leading zeros. +// +// Example version strings: +// "0.91.00" // Major version 0, minor version 91, patch version 0. +// "1.00.00" // Major version 1, minor and patch version 0. +// "3.10.02" // Major version 3, minor version 10, patch version 02. +// "12.03.01" // Major version 12, minor version 03, patch version +// +// Example usage: +// printf("EASTL version: %s", EASTL_VERSION); +// printf("EASTL version: %d.%d.%d", EASTL_VERSION_N / 10000 % 100, EASTL_VERSION_N / 100 % 100, EASTL_VERSION_N % 100); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VERSION + #define EASTL_VERSION "3.16.07" + #define EASTL_VERSION_N 31607 +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EA_COMPILER_NO_STANDARD_CPP_LIBRARY +// +// Defined as 1 or undefined. +// Implements support for the definition of EA_COMPILER_NO_STANDARD_CPP_LIBRARY for the case +// of using EABase versions prior to the addition of its EA_COMPILER_NO_STANDARD_CPP_LIBRARY support. +// +#if !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY) + #if defined(EA_PLATFORM_ANDROID) + // Disabled because EA's eaconfig/android_config/android_sdk packages currently + // don't support linking STL libraries. Perhaps we can figure out what linker arguments + // are needed for an app so we can manually specify them and then re-enable this code. + // + //#include + // + //#if (__ANDROID_API__ < 9) // Earlier versions of Android provide no std C++ STL implementation. + #define EA_COMPILER_NO_STANDARD_CPP_LIBRARY 1 + //#endif + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EA_NOEXCEPT +// +// Defined as a macro. Provided here for backward compatibility with older +// EABase versions prior to 2.00.40 that don't yet define it themselves. +// +#if !defined(EA_NOEXCEPT) + #define EA_NOEXCEPT + #define EA_NOEXCEPT_IF(predicate) + #define EA_NOEXCEPT_EXPR(expression) false +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EA_CPP14_CONSTEXPR +// +// Defined as constexpr when a C++14 compiler is present. Defines it as nothing +// when using a C++11 compiler. +// C++14 relaxes the specification for constexpr such that it allows more +// kinds of expressions. Since a C++11 compiler doesn't allow this, we need +// to make a unique define for C++14 constexpr. This macro should be used only +// when you are using it with code that specfically requires C++14 constexpr +// functionality beyond the regular C++11 constexpr functionality. +// http://en.wikipedia.org/wiki/C%2B%2B14#Relaxed_constexpr_restrictions +// +#if !defined(EA_CPP14_CONSTEXPR) + + #if defined(EA_COMPILER_MSVC_2015) + #define EA_CPP14_CONSTEXPR // not supported + #define EA_NO_CPP14_CONSTEXPR + #elif defined(__GNUC__) && (EA_COMPILER_VERSION < 9000) // Before GCC 9.0 + #define EA_CPP14_CONSTEXPR // not supported + #define EA_NO_CPP14_CONSTEXPR + #elif defined(EA_COMPILER_CPP14_ENABLED) + #define EA_CPP14_CONSTEXPR constexpr + #else + #define EA_CPP14_CONSTEXPR // not supported + #define EA_NO_CPP14_CONSTEXPR + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL namespace +// +// We define this so that users that #include this config file can reference +// these namespaces without seeing any other files that happen to use them. +/////////////////////////////////////////////////////////////////////////////// + +/// EA Standard Template Library +namespace eastl +{ + // Intentionally empty. +} + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEBUG +// +// Defined as an integer >= 0. Default is 1 for debug builds and 0 for +// release builds. This define is also a master switch for the default value +// of some other settings. +// +// Example usage: +// #if EASTL_DEBUG +// ... +// #endif +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_DEBUG + #if defined(EA_DEBUG) || defined(_DEBUG) + #define EASTL_DEBUG 1 + #else + #define EASTL_DEBUG 0 + #endif +#endif + +// Developer debug. Helps EASTL developers assert EASTL is coded correctly. +// Normally disabled for users since it validates internal things and not user things. +#ifndef EASTL_DEV_DEBUG + #define EASTL_DEV_DEBUG 0 +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEBUGPARAMS_LEVEL +// +// EASTL_DEBUGPARAMS_LEVEL controls what debug information is passed through to +// the allocator by default. +// This value may be defined by the user ... if not it will default to 1 for +// EA_DEBUG builds, otherwise 0. +// +// 0 - no debug information is passed through to allocator calls. +// 1 - 'name' is passed through to allocator calls. +// 2 - 'name', __FILE__, and __LINE__ are passed through to allocator calls. +// +// This parameter mirrors the equivalent parameter in the CoreAllocator package. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_DEBUGPARAMS_LEVEL + #if EASTL_DEBUG + #define EASTL_DEBUGPARAMS_LEVEL 2 + #else + #define EASTL_DEBUGPARAMS_LEVEL 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DLL +// +// Defined as 0 or 1. The default is dependent on the definition of EA_DLL. +// If EA_DLL is defined, then EASTL_DLL is 1, else EASTL_DLL is 0. +// EA_DLL is a define that controls DLL builds within the EAConfig build system. +// EASTL_DLL controls whether EASTL is built and used as a DLL. +// Normally you wouldn't do such a thing, but there are use cases for such +// a thing, particularly in the case of embedding C++ into C# applications. +// +#ifndef EASTL_DLL + #if defined(EA_DLL) + #define EASTL_DLL 1 + #else + #define EASTL_DLL 0 + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_IF_NOT_DLL +// +// Utility to include expressions only for static builds. +// +#ifndef EASTL_IF_NOT_DLL + #if EASTL_DLL + #define EASTL_IF_NOT_DLL(x) + #else + #define EASTL_IF_NOT_DLL(x) x + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_API +// +// This is used to label functions as DLL exports under Microsoft platforms. +// If EA_DLL is defined, then the user is building EASTL as a DLL and EASTL's +// non-templated functions will be exported. EASTL template functions are not +// labelled as EASTL_API (and are thus not exported in a DLL build). This is +// because it's not possible (or at least unsafe) to implement inline templated +// functions in a DLL. +// +// Example usage of EASTL_API: +// EASTL_API int someVariable = 10; // Export someVariable in a DLL build. +// +// struct EASTL_API SomeClass{ // Export SomeClass and its member functions in a DLL build. +// EASTL_LOCAL void PrivateMethod(); // Not exported. +// }; +// +// EASTL_API void SomeFunction(); // Export SomeFunction in a DLL build. +// +// +#if defined(EA_DLL) && !defined(EASTL_DLL) + #define EASTL_DLL 1 +#endif + +#ifndef EASTL_API // If the build file hasn't already defined this to be dllexport... + #if EASTL_DLL + #if defined(_MSC_VER) + #define EASTL_API __declspec(dllimport) + #define EASTL_LOCAL + #elif defined(__CYGWIN__) + #define EASTL_API __attribute__((dllimport)) + #define EASTL_LOCAL + #elif (defined(__GNUC__) && (__GNUC__ >= 4)) + #define EASTL_API __attribute__ ((visibility("default"))) + #define EASTL_LOCAL __attribute__ ((visibility("hidden"))) + #else + #define EASTL_API + #define EASTL_LOCAL + #endif + #else + #define EASTL_API + #define EASTL_LOCAL + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EASTDC_API +// +// This is used for importing EAStdC functions into EASTL, possibly via a DLL import. +// +#ifndef EASTL_EASTDC_API + #if EASTL_DLL + #if defined(_MSC_VER) + #define EASTL_EASTDC_API __declspec(dllimport) + #define EASTL_EASTDC_LOCAL + #elif defined(__CYGWIN__) + #define EASTL_EASTDC_API __attribute__((dllimport)) + #define EASTL_EASTDC_LOCAL + #elif (defined(__GNUC__) && (__GNUC__ >= 4)) + #define EASTL_EASTDC_API __attribute__ ((visibility("default"))) + #define EASTL_EASTDC_LOCAL __attribute__ ((visibility("hidden"))) + #else + #define EASTL_EASTDC_API + #define EASTL_EASTDC_LOCAL + #endif + #else + #define EASTL_EASTDC_API + #define EASTL_EASTDC_LOCAL + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EASTDC_VSNPRINTF +// +// Defined as 0 or 1. By default it is 1. +// +// When enabled EASTL uses EAStdC's Vsnprintf function directly instead of +// having the user provide a global Vsnprintf8/16/32 function. The benefit +// of this is that it will allow EASTL to just link to EAStdC's Vsnprintf +// without the user doing anything. The downside is that any users who aren't +// already using EAStdC will either need to now depend on EAStdC or globally +// define this property to be 0 and simply provide functions that have the same +// names. See the usage of EASTL_EASTDC_VSNPRINTF in string.h for more info. +// +#if !defined(EASTL_EASTDC_VSNPRINTF) + #define EASTL_EASTDC_VSNPRINTF 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_NAME_ENABLED / EASTL_NAME / EASTL_NAME_VAL +// +// Used to wrap debug string names. In a release build, the definition +// goes away. These are present to avoid release build compiler warnings +// and to make code simpler. +// +// Example usage of EASTL_NAME: +// // pName will defined away in a release build and thus prevent compiler warnings. +// void allocator::set_name(const char* EASTL_NAME(pName)) +// { +// #if EASTL_NAME_ENABLED +// mpName = pName; +// #endif +// } +// +// Example usage of EASTL_NAME_VAL: +// // "xxx" is defined to NULL in a release build. +// vector::vector(const allocator_type& allocator = allocator_type(EASTL_NAME_VAL("xxx"))); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_NAME_ENABLED + #define EASTL_NAME_ENABLED EASTL_DEBUG +#endif + +#ifndef EASTL_NAME + #if EASTL_NAME_ENABLED + #define EASTL_NAME(x) x + #define EASTL_NAME_VAL(x) x + #else + #define EASTL_NAME(x) + #define EASTL_NAME_VAL(x) ((const char*)NULL) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEFAULT_NAME_PREFIX +// +// Defined as a string literal. Defaults to "EASTL". +// This define is used as the default name for EASTL where such a thing is +// referenced in EASTL. For example, if the user doesn't specify an allocator +// name for their deque, it is named "EASTL deque". However, you can override +// this to say "SuperBaseball deque" by changing EASTL_DEFAULT_NAME_PREFIX. +// +// Example usage (which is simply taken from how deque.h uses this define): +// #ifndef EASTL_DEQUE_DEFAULT_NAME +// #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque" +// #endif +// +#ifndef EASTL_DEFAULT_NAME_PREFIX + #define EASTL_DEFAULT_NAME_PREFIX "EASTL" +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ASSERT_ENABLED +// +// Defined as 0 or non-zero. Default is same as EASTL_DEBUG. +// If EASTL_ASSERT_ENABLED is non-zero, then asserts will be executed via +// the assertion mechanism. +// +// Example usage: +// #if EASTL_ASSERT_ENABLED +// EASTL_ASSERT(v.size() > 17); +// #endif +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ASSERT_ENABLED + #define EASTL_ASSERT_ENABLED EASTL_DEBUG +#endif + +// Developer assert. Helps EASTL developers assert EASTL is coded correctly. +// Normally disabled for users since it validates internal things and not user things. +#ifndef EASTL_DEV_ASSERT_ENABLED + #define EASTL_DEV_ASSERT_ENABLED EASTL_DEV_DEBUG +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EMPTY_REFERENCE_ASSERT_ENABLED +// +// Defined as 0 or non-zero. Default is same as EASTL_ASSERT_ENABLED. +// This is like EASTL_ASSERT_ENABLED, except it is for empty container +// references. Sometime people like to be able to take a reference to +// the front of the container, but not use it if the container is empty. +// In practice it's often easier and more efficient to do this than to write +// extra code to check if the container is empty. +// +// NOTE: If this is enabled, EASTL_ASSERT_ENABLED must also be enabled +// +// Example usage: +// template +// inline typename vector::reference +// vector::front() +// { +// #if EASTL_ASSERT_ENABLED +// EASTL_ASSERT(mpEnd > mpBegin); +// #endif +// +// return *mpBegin; +// } +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + #define EASTL_EMPTY_REFERENCE_ASSERT_ENABLED EASTL_ASSERT_ENABLED +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// SetAssertionFailureFunction +// +// Allows the user to set a custom assertion failure mechanism. +// +// Example usage: +// void Assert(const char* pExpression, void* pContext); +// SetAssertionFailureFunction(Assert, this); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ASSERTION_FAILURE_DEFINED + #define EASTL_ASSERTION_FAILURE_DEFINED + + namespace eastl + { + typedef void (*EASTL_AssertionFailureFunction)(const char* pExpression, void* pContext); + EASTL_API void SetAssertionFailureFunction(EASTL_AssertionFailureFunction pFunction, void* pContext); + + // These are the internal default functions that implement asserts. + EASTL_API void AssertionFailure(const char* pExpression); + EASTL_API void AssertionFailureFunctionDefault(const char* pExpression, void* pContext); + } +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ASSERT +// +// Assertion macro. Can be overridden by user with a different value. +// +// Example usage: +// EASTL_ASSERT(intVector.size() < 100); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ASSERT + #if EASTL_ASSERT_ENABLED + #define EASTL_ASSERT(expression) \ + EA_DISABLE_VC_WARNING(4127) \ + do { \ + EA_ANALYSIS_ASSUME(expression); \ + (void)((expression) || (eastl::AssertionFailure(#expression), 0)); \ + } while (0) \ + EA_RESTORE_VC_WARNING() + #else + #define EASTL_ASSERT(expression) + #endif +#endif + +// Developer assert. Helps EASTL developers assert EASTL is coded correctly. +// Normally disabled for users since it validates internal things and not user things. +#ifndef EASTL_DEV_ASSERT + #if EASTL_DEV_ASSERT_ENABLED + #define EASTL_DEV_ASSERT(expression) \ + EA_DISABLE_VC_WARNING(4127) \ + do { \ + EA_ANALYSIS_ASSUME(expression); \ + (void)((expression) || (eastl::AssertionFailure(#expression), 0)); \ + } while(0) \ + EA_RESTORE_VC_WARNING() + #else + #define EASTL_DEV_ASSERT(expression) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ASSERT_MSG +// +// Example usage: +// EASTL_ASSERT_MSG(false, "detected error condition!"); +// +/////////////////////////////////////////////////////////////////////////////// +#ifndef EASTL_ASSERT_MSG + #if EASTL_ASSERT_ENABLED + #define EASTL_ASSERT_MSG(expression, message) \ + EA_DISABLE_VC_WARNING(4127) \ + do { \ + EA_ANALYSIS_ASSUME(expression); \ + (void)((expression) || (eastl::AssertionFailure(message), 0)); \ + } while (0) \ + EA_RESTORE_VC_WARNING() + #else + #define EASTL_ASSERT_MSG(expression, message) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_FAIL_MSG +// +// Failure macro. Can be overridden by user with a different value. +// +// Example usage: +// EASTL_FAIL("detected error condition!"); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FAIL_MSG + #if EASTL_ASSERT_ENABLED + #define EASTL_FAIL_MSG(message) (eastl::AssertionFailure(message)) + #else + #define EASTL_FAIL_MSG(message) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_CT_ASSERT / EASTL_CT_ASSERT_NAMED +// +// EASTL_CT_ASSERT is a macro for compile time assertion checks, useful for +// validating *constant* expressions. The advantage over using EASTL_ASSERT +// is that errors are caught at compile time instead of runtime. +// +// Example usage: +// EASTL_CT_ASSERT(sizeof(uint32_t) == 4); +// +/////////////////////////////////////////////////////////////////////////////// + +#define EASTL_CT_ASSERT(expression) static_assert(expression, #expression) + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_CT_ASSERT_MSG +// +// EASTL_CT_ASSERT_MSG is a macro for compile time assertion checks, useful for +// validating *constant* expressions. The advantage over using EASTL_ASSERT +// is that errors are caught at compile time instead of runtime. +// The message must be a string literal. +// +// Example usage: +// EASTL_CT_ASSERT_MSG(sizeof(uint32_t) == 4, "The size of uint32_t must be 4."); +// +/////////////////////////////////////////////////////////////////////////////// + +#define EASTL_CT_ASSERT_MSG(expression, message) static_assert(expression, message) + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEBUG_BREAK / EASTL_DEBUG_BREAK_OVERRIDE +// +// This function causes an app to immediately stop under the debugger. +// It is implemented as a macro in order to allow stopping at the site +// of the call. +// +// EASTL_DEBUG_BREAK_OVERRIDE allows one to define EASTL_DEBUG_BREAK directly. +// This is useful in cases where you desire to disable EASTL_DEBUG_BREAK +// but do not wish to (or cannot) define a custom void function() to replace +// EASTL_DEBUG_BREAK callsites. +// +// Example usage: +// EASTL_DEBUG_BREAK(); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_DEBUG_BREAK_OVERRIDE + #ifndef EASTL_DEBUG_BREAK + #if defined(_MSC_VER) && (_MSC_VER >= 1300) + #define EASTL_DEBUG_BREAK() __debugbreak() // This is a compiler intrinsic which will map to appropriate inlined asm for the platform. + #elif (defined(EA_PROCESSOR_ARM) && !defined(EA_PROCESSOR_ARM64)) && defined(__APPLE__) + #define EASTL_DEBUG_BREAK() asm("trap") + #elif defined(EA_PROCESSOR_ARM64) && defined(__APPLE__) + #include + #include + #define EASTL_DEBUG_BREAK() kill( getpid(), SIGINT ) + #elif defined(EA_PROCESSOR_ARM64) && defined(__GNUC__) + #define EASTL_DEBUG_BREAK() asm("brk 10") + #elif defined(EA_PROCESSOR_ARM) && defined(__GNUC__) + #define EASTL_DEBUG_BREAK() asm("BKPT 10") // The 10 is arbitrary. It's just a unique id. + #elif defined(EA_PROCESSOR_ARM) && defined(__ARMCC_VERSION) + #define EASTL_DEBUG_BREAK() __breakpoint(10) + #elif defined(EA_PROCESSOR_POWERPC) // Generic PowerPC. + #define EASTL_DEBUG_BREAK() asm(".long 0") // This triggers an exception by executing opcode 0x00000000. + #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && defined(EA_ASM_STYLE_INTEL) + #define EASTL_DEBUG_BREAK() { __asm int 3 } + #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && (defined(EA_ASM_STYLE_ATT) || defined(__GNUC__)) + #define EASTL_DEBUG_BREAK() asm("int3") + #else + void EASTL_DEBUG_BREAK(); // User must define this externally. + #endif + #else + void EASTL_DEBUG_BREAK(); // User must define this externally. + #endif +#else + #ifndef EASTL_DEBUG_BREAK + #if EASTL_DEBUG_BREAK_OVERRIDE == 1 + // define an empty callable to satisfy the call site. + #define EASTL_DEBUG_BREAK ([]{}) + #else + #define EASTL_DEBUG_BREAK EASTL_DEBUG_BREAK_OVERRIDE + #endif + #else + #error EASTL_DEBUG_BREAK is already defined yet you would like to override it. Please ensure no other headers are already defining EASTL_DEBUG_BREAK before this header (config.h) is included + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ALLOCATOR_COPY_ENABLED +// +// Defined as 0 or 1. Default is 0 (disabled) until some future date. +// If enabled (1) then container operator= copies the allocator from the +// source container. It ideally should be set to enabled but for backwards +// compatibility with older versions of EASTL it is currently set to 0. +// Regardless of whether this value is 0 or 1, this container copy constructs +// or copy assigns allocators. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ALLOCATOR_COPY_ENABLED + #define EASTL_ALLOCATOR_COPY_ENABLED 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_FIXED_SIZE_TRACKING_ENABLED +// +// Defined as an integer >= 0. Default is same as EASTL_DEBUG. +// If EASTL_FIXED_SIZE_TRACKING_ENABLED is enabled, then fixed +// containers in debug builds track the max count of objects +// that have been in the container. This allows for the tuning +// of fixed container sizes to their minimum required size. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FIXED_SIZE_TRACKING_ENABLED + #define EASTL_FIXED_SIZE_TRACKING_ENABLED EASTL_DEBUG +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_RTTI_ENABLED +// +// Defined as 0 or 1. Default is 1 if RTTI is supported by the compiler. +// This define exists so that we can use some dynamic_cast operations in the +// code without warning. dynamic_cast is only used if the specifically refers +// to it; EASTL won't do dynamic_cast behind your back. +// +// Example usage: +// #if EASTL_RTTI_ENABLED +// pChildClass = dynamic_cast(pParentClass); +// #endif +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_RTTI_ENABLED + // The VC++ default Standard Library (Dinkumware) disables major parts of RTTI + // (e.g. type_info) if exceptions are disabled, even if RTTI itself is enabled. + // _HAS_EXCEPTIONS is defined by Dinkumware to 0 or 1 (disabled or enabled). + #if defined(EA_COMPILER_NO_RTTI) || (defined(_MSC_VER) && defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && !(defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)) + #define EASTL_RTTI_ENABLED 0 + #else + #define EASTL_RTTI_ENABLED 1 + #endif +#endif + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_EXCEPTIONS_ENABLED +// +// Defined as 0 or 1. Default is to follow what the compiler settings are. +// The user can predefine EASTL_EXCEPTIONS_ENABLED to 0 or 1; however, if the +// compiler is set to disable exceptions then EASTL_EXCEPTIONS_ENABLED is +// forced to a value of 0 regardless of the user predefine. +// +// Note that we do not enable EASTL exceptions by default if the compiler +// has exceptions enabled. To enable EASTL_EXCEPTIONS_ENABLED you need to +// manually set it to 1. +// +/////////////////////////////////////////////////////////////////////////////// + +#if !defined(EASTL_EXCEPTIONS_ENABLED) || ((EASTL_EXCEPTIONS_ENABLED == 1) && defined(EA_COMPILER_NO_EXCEPTIONS)) + #define EASTL_EXCEPTIONS_ENABLED 0 +#endif + + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_STRING_OPT_XXXX +// +// Enables some options / optimizations options that cause the string class +// to behave slightly different from the C++ standard basic_string. These are +// options whereby you can improve performance by avoiding operations that +// in practice may never occur for you. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_STRING_OPT_EXPLICIT_CTORS + // Defined as 0 or 1. Default is 0. + // Defines if we should implement explicity in constructors where the C++ + // standard string does not. The advantage of enabling explicit constructors + // is that you can do this: string s = "hello"; in addition to string s("hello"); + // The disadvantage of enabling explicity constructors is that there can be + // silent conversions done which impede performance if the user isn't paying + // attention. + // C++ standard string ctors are not explicit. + #define EASTL_STRING_OPT_EXPLICIT_CTORS 0 +#endif + +#ifndef EASTL_STRING_OPT_LENGTH_ERRORS + // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED. + // Defines if we check for string values going beyond kMaxSize + // (a very large value) and throw exections if so. + // C++ standard strings are expected to do such checks. + #define EASTL_STRING_OPT_LENGTH_ERRORS EASTL_EXCEPTIONS_ENABLED +#endif + +#ifndef EASTL_STRING_OPT_RANGE_ERRORS + // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED. + // Defines if we check for out-of-bounds references to string + // positions and throw exceptions if so. Well-behaved code shouldn't + // refence out-of-bounds positions and so shouldn't need these checks. + // C++ standard strings are expected to do such range checks. + #define EASTL_STRING_OPT_RANGE_ERRORS EASTL_EXCEPTIONS_ENABLED +#endif + +#ifndef EASTL_STRING_OPT_ARGUMENT_ERRORS + // Defined as 0 or 1. Default is 0. + // Defines if we check for NULL ptr arguments passed to string + // functions by the user and throw exceptions if so. Well-behaved code + // shouldn't pass bad arguments and so shouldn't need these checks. + // Also, some users believe that strings should check for NULL pointers + // in all their arguments and do no-ops if so. This is very debatable. + // C++ standard strings are not required to check for such argument errors. + #define EASTL_STRING_OPT_ARGUMENT_ERRORS 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_BITSET_SIZE_T +// +// Defined as 0 or 1. Default is 1. +// Controls whether bitset uses size_t or eastl_size_t. +// +#ifndef EASTL_BITSET_SIZE_T + #define EASTL_BITSET_SIZE_T 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_INT128_SUPPORTED +// +// Defined as 0 or 1. +// +#ifndef EASTL_INT128_SUPPORTED + #if defined(EA_COMPILER_INTMAX_SIZE) && (EA_COMPILER_INTMAX_SIZE >= 16) // If the compiler supports int128_t (recent versions of GCC do)... + #define EASTL_INT128_SUPPORTED 1 + #else + #define EASTL_INT128_SUPPORTED 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED +// +// Defined as 0 or 1. +// Tells if you can use the default EASTL allocator to do aligned allocations, +// which for most uses tells if you can store aligned objects in containers +// that use default allocators. It turns out that when built as a DLL for +// some platforms, EASTL doesn't have a way to do aligned allocations, as it +// doesn't have a heap that supports it. There is a way to work around this +// with dynamically defined allocators, but that's currently a to-do. +// +#ifndef EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED + #if EASTL_DLL + #define EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED 0 + #else + #define EASTL_DEFAULT_ALLOCATOR_ALIGNED_ALLOCATIONS_SUPPORTED 1 + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_INT128_DEFINED +// +// Defined as 0 or 1. +// Specifies whether eastl_int128_t/eastl_uint128_t have been typedef'd yet. +// +#ifndef EASTL_INT128_DEFINED + #if EASTL_INT128_SUPPORTED + #define EASTL_INT128_DEFINED 1 + + #if defined(__GNUC__) + typedef __int128_t eastl_int128_t; + typedef __uint128_t eastl_uint128_t; + #else + typedef int128_t eastl_int128_t; // The EAStdC package defines an EA::StdC::int128_t and uint128_t type, + typedef uint128_t eastl_uint128_t; // though they are currently within the EA::StdC namespace. + #endif + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_BITSET_WORD_TYPE_DEFAULT / EASTL_BITSET_WORD_SIZE_DEFAULT +// +// Defined as an integral power of two type, usually uint32_t or uint64_t. +// Specifies the word type that bitset should use internally to implement +// storage. By default this is the platform register word size, but there +// may be reasons to use a different value. +// +// Defines the integral data type used by bitset by default. +// You can override this default on a bitset-by-bitset case by supplying a +// custom bitset WordType template parameter. +// +// The C++ standard specifies that the std::bitset word type be unsigned long, +// but that isn't necessarily the most efficient data type for the given platform. +// We can follow the standard and be potentially less efficient or we can do what +// is more efficient but less like the C++ std::bitset. +// +#if !defined(EASTL_BITSET_WORD_TYPE_DEFAULT) + #if defined(EASTL_BITSET_WORD_SIZE) // EASTL_BITSET_WORD_SIZE is deprecated, but we temporarily support the ability for the user to specify it. Use EASTL_BITSET_WORD_TYPE_DEFAULT instead. + #if (EASTL_BITSET_WORD_SIZE == 4) + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint32_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 4 + #else + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint64_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 8 + #endif + #elif (EA_PLATFORM_WORD_SIZE == 16) // EA_PLATFORM_WORD_SIZE is defined in EABase. + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint128_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 16 + #elif (EA_PLATFORM_WORD_SIZE == 8) + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint64_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 8 + #elif (EA_PLATFORM_WORD_SIZE == 4) + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint32_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 4 + #else + #define EASTL_BITSET_WORD_TYPE_DEFAULT uint16_t + #define EASTL_BITSET_WORD_SIZE_DEFAULT 2 + #endif +#endif + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_LIST_SIZE_CACHE +// +// Defined as 0 or 1. Default is 1. Changed from 0 in version 1.16.01. +// If defined as 1, the list and slist containers (and possibly any additional +// containers as well) keep a member mSize (or similar) variable which allows +// the size() member function to execute in constant time (a.k.a. O(1)). +// There are debates on both sides as to whether it is better to have this +// cached value or not, as having it entails some cost (memory and code). +// To consider: Make list size caching an optional template parameter. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_LIST_SIZE_CACHE + #define EASTL_LIST_SIZE_CACHE 1 +#endif + +#ifndef EASTL_SLIST_SIZE_CACHE + #define EASTL_SLIST_SIZE_CACHE 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_MAX_STACK_USAGE +// +// Defined as an integer greater than zero. Default is 4000. +// There are some places in EASTL where temporary objects are put on the +// stack. A common example of this is in the implementation of container +// swap functions whereby a temporary copy of the container is made. +// There is a problem, however, if the size of the item created on the stack +// is very large. This can happen with fixed-size containers, for example. +// The EASTL_MAX_STACK_USAGE define specifies the maximum amount of memory +// (in bytes) that the given platform/compiler will safely allow on the stack. +// Platforms such as Windows will generally allow larger values than embedded +// systems or console machines, but it is usually a good idea to stick with +// a max usage value that is portable across all platforms, lest the user be +// surprised when something breaks as it is ported to another platform. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_MAX_STACK_USAGE + #define EASTL_MAX_STACK_USAGE 4000 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VA_COPY_ENABLED +// +// Defined as 0 or 1. Default is 1 for compilers that need it, 0 for others. +// Some compilers on some platforms implement va_list whereby its contents +// are destroyed upon usage, even if passed by value to another function. +// With these compilers you can use va_copy to save and restore a va_list. +// Known compiler/platforms that destroy va_list contents upon usage include: +// CodeWarrior on PowerPC +// GCC on x86-64 +// However, va_copy is part of the C99 standard and not part of earlier C and +// C++ standards. So not all compilers support it. VC++ doesn't support va_copy, +// but it turns out that VC++ doesn't usually need it on the platforms it supports, +// and va_copy can usually be implemented via memcpy(va_list, va_list) with VC++. +// +// Example usage: +// void Function(va_list arguments) +// { +// #if EASTL_VA_COPY_ENABLED +// va_list argumentsCopy; +// va_copy(argumentsCopy, arguments); +// #endif +// +// #if EASTL_VA_COPY_ENABLED +// va_end(argumentsCopy); +// #endif +// } +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VA_COPY_ENABLED + #if ((defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__)) && (!defined(__i386__) || defined(__x86_64__)) && !defined(__ppc__) && !defined(__PPC__) && !defined(__PPC64__) + #define EASTL_VA_COPY_ENABLED 1 + #else + #define EASTL_VA_COPY_ENABLED 0 + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_OPERATOR_EQUALS_OTHER_ENABLED +// +// Defined as 0 or 1. Default is 0 until such day that it's deemeed safe. +// When enabled, enables operator= for other char types, e.g. for code +// like this: +// eastl::string8 s8; +// eastl::string16 s16; +// s8 = s16; +// This option is considered experimental, and may exist as such for an +// indefinite amount of time. +// +#if !defined(EASTL_OPERATOR_EQUALS_OTHER_ENABLED) + #define EASTL_OPERATOR_EQUALS_OTHER_ENABLED 0 +#endif +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_LIST_PROXY_ENABLED +// +#if !defined(EASTL_LIST_PROXY_ENABLED) + // GCC with -fstrict-aliasing has bugs (or undocumented functionality in their + // __may_alias__ implementation. The compiler gets confused about function signatures. + // VC8 (1400) doesn't need the proxy because it has built-in smart debugging capabilities. + #if defined(EASTL_DEBUG) && !defined(__GNUC__) && (!defined(_MSC_VER) || (_MSC_VER < 1400)) + #define EASTL_LIST_PROXY_ENABLED 1 + #define EASTL_LIST_PROXY_MAY_ALIAS EASTL_MAY_ALIAS + #else + #define EASTL_LIST_PROXY_ENABLED 0 + #define EASTL_LIST_PROXY_MAY_ALIAS + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_STD_ITERATOR_CATEGORY_ENABLED +// +// Defined as 0 or 1. Default is 0. +// If defined as non-zero, EASTL iterator categories (iterator.h's input_iterator_tag, +// forward_iterator_tag, etc.) are defined to be those from std C++ in the std +// namespace. The reason for wanting to enable such a feature is that it allows +// EASTL containers and algorithms to work with std STL containes and algorithms. +// The default value was changed from 1 to 0 in EASL 1.13.03, January 11, 2012. +// The reason for the change was that almost nobody was taking advantage of it and +// it was slowing down compile times for some compilers quite a bit due to them +// having a lot of headers behind . +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_STD_ITERATOR_CATEGORY_ENABLED + #define EASTL_STD_ITERATOR_CATEGORY_ENABLED 0 +#endif + +#if EASTL_STD_ITERATOR_CATEGORY_ENABLED + #define EASTL_ITC_NS std +#else + #define EASTL_ITC_NS eastl +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VALIDATION_ENABLED +// +// Defined as an integer >= 0. Default is to be equal to EASTL_DEBUG. +// If nonzero, then a certain amount of automatic runtime validation is done. +// Runtime validation is not considered the same thing as asserting that user +// input values are valid. Validation refers to internal consistency checking +// of the validity of containers and their iterators. Validation checking is +// something that often involves significantly more than basic assertion +// checking, and it may sometimes be desirable to disable it. +// This macro would generally be used internally by EASTL. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VALIDATION_ENABLED + #define EASTL_VALIDATION_ENABLED EASTL_DEBUG +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VALIDATE_COMPARE +// +// Defined as EASTL_ASSERT or defined away. Default is EASTL_ASSERT if EASTL_VALIDATION_ENABLED is enabled. +// This is used to validate user-supplied comparison functions, particularly for sorting purposes. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VALIDATE_COMPARE_ENABLED + #define EASTL_VALIDATE_COMPARE_ENABLED EASTL_VALIDATION_ENABLED +#endif + +#if EASTL_VALIDATE_COMPARE_ENABLED + #define EASTL_VALIDATE_COMPARE EASTL_ASSERT +#else + #define EASTL_VALIDATE_COMPARE(expression) +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VALIDATE_INTRUSIVE_LIST +// +// Defined as an integral value >= 0. Controls the amount of automatic validation +// done by intrusive_list. A value of 0 means no automatic validation is done. +// As of this writing, EASTL_VALIDATE_INTRUSIVE_LIST defaults to 0, as it makes +// the intrusive_list_node become a non-POD, which may be an issue for some code. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_VALIDATE_INTRUSIVE_LIST + #define EASTL_VALIDATE_INTRUSIVE_LIST 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_FORCE_INLINE +// +// Defined as a "force inline" expression or defined away. +// You generally don't need to use forced inlining with the Microsoft and +// Metrowerks compilers, but you may need it with the GCC compiler (any version). +// +// Example usage: +// template +// EASTL_FORCE_INLINE typename vector::size_type +// vector::size() const +// { return mpEnd - mpBegin; } +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FORCE_INLINE + #define EASTL_FORCE_INLINE EA_FORCE_INLINE +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_MAY_ALIAS +// +// Defined as a macro that wraps the GCC may_alias attribute. This attribute +// has no significance for VC++ because VC++ doesn't support the concept of +// strict aliasing. Users should avoid writing code that breaks strict +// aliasing rules; EASTL_MAY_ALIAS is for cases with no alternative. +// +// Example usage: +// uint32_t value EASTL_MAY_ALIAS; +// +// Example usage: +// typedef uint32_t EASTL_MAY_ALIAS value_type; +// value_type value; +// +#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303) && !defined(EA_COMPILER_RVCT) + #define EASTL_MAY_ALIAS __attribute__((__may_alias__)) +#else + #define EASTL_MAY_ALIAS +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_LIKELY / EASTL_UNLIKELY +// +// Defined as a macro which gives a hint to the compiler for branch +// prediction. GCC gives you the ability to manually give a hint to +// the compiler about the result of a comparison, though it's often +// best to compile shipping code with profiling feedback under both +// GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there +// are times when you feel very sure that a boolean expression will +// usually evaluate to either true or false and can help the compiler +// by using an explicity directive... +// +// Example usage: +// if(EASTL_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0. +// { ... } +// +// Example usage: +// if(EASTL_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0. +// { ... } +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_LIKELY + #if defined(__GNUC__) && (__GNUC__ >= 3) + #define EASTL_LIKELY(x) __builtin_expect(!!(x), true) + #define EASTL_UNLIKELY(x) __builtin_expect(!!(x), false) + #else + #define EASTL_LIKELY(x) (x) + #define EASTL_UNLIKELY(x) (x) + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_STD_TYPE_TRAITS_AVAILABLE +// +// Defined as 0 or 1; default is based on auto-detection. +// Specifies whether Standard C++11 support exists. +// Sometimes the auto-detection below fails to work properly and the +// user needs to override it. Does not define whether the compiler provides +// built-in compiler type trait support (e.g. __is_abstract()), as some +// compilers will EASTL_STD_TYPE_TRAITS_AVAILABLE = 0, but have built +// in type trait support. +// +#ifndef EASTL_STD_TYPE_TRAITS_AVAILABLE + /* Disabled because we don't currently need it. + #if defined(_MSC_VER) && (_MSC_VER >= 1500) // VS2008 or later + #pragma warning(push, 0) + #include + #pragma warning(pop) + #if ((defined(_HAS_TR1) && _HAS_TR1) || _MSC_VER >= 1700) // VS2012 (1700) and later has built-in type traits support. + #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1 + #include + #else + #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0 + #endif + + #elif defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) && !defined(__GCCXML__)) && !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY) + #include // This will define __GLIBCXX__ if using GNU's libstdc++ and _LIBCPP_VERSION if using clang's libc++. + + #if defined(EA_COMPILER_CLANG) && !defined(EA_PLATFORM_APPLE) // As of v3.0.0, Apple's clang doesn't support type traits. + // http://clang.llvm.org/docs/LanguageExtensions.html#checking_type_traits + // Clang has some built-in compiler trait support. This support doesn't currently + // directly cover all our type_traits, though the C++ Standard Library that's used + // with clang could fill that in. + #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1 + #endif + + #if !defined(EASTL_STD_TYPE_TRAITS_AVAILABLE) + #if defined(_LIBCPP_VERSION) // This is defined by clang's libc++. + #include + + #elif defined(__GLIBCXX__) && (__GLIBCXX__ >= 20090124) // It's not clear if this is the oldest version that has type traits; probably it isn't. + #define EASTL_STD_TYPE_TRAITS_AVAILABLE 1 + + #if defined(__GXX_EXPERIMENTAL_CXX0X__) // To do: Update this test to include conforming C++11 implementations. + #include + #else + #include + #endif + #else + #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0 + #endif + #endif + + #elif defined(__MSL_CPP__) && (__MSL_CPP__ >= 0x8000) // CodeWarrior compiler. + #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0 + // To do: Implement support for this (via modifying the EASTL type + // traits headers, as CodeWarrior provides this. + #else + #define EASTL_STD_TYPE_TRAITS_AVAILABLE 0 + #endif + */ +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE +// +// Defined as 0 or 1; default is based on auto-detection. +// Specifies whether the compiler provides built-in compiler type trait support +// (e.g. __is_abstract()). Does not specify any details about which traits +// are available or what their standards-compliance is. Nevertheless this is a +// useful macro identifier for our type traits implementation. +// +#ifndef EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE + #if defined(_MSC_VER) && (_MSC_VER >= 1500) // VS2008 or later + #pragma warning(push, 0) + #include + #pragma warning(pop) + #if ((defined(_HAS_TR1) && _HAS_TR1) || _MSC_VER >= 1700) // VS2012 (1700) and later has built-in type traits support. + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1 + #else + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0 + #endif + #elif defined(EA_COMPILER_CLANG) && defined(__APPLE__) && defined(_CXXCONFIG) // Apple clang but with GCC's libstdc++. + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0 + #elif defined(EA_COMPILER_CLANG) + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1 + #elif defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003) && !defined(__GCCXML__) + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1 + #elif defined(__MSL_CPP__) && (__MSL_CPP__ >= 0x8000) // CodeWarrior compiler. + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 1 + #else + #define EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_RESET_ENABLED +// +// Defined as 0 or 1; default is 1 for the time being. +// The reset_lose_memory function works the same as reset, as described below. +// +// Specifies whether the container reset functionality is enabled. If enabled +// then ::reset forgets its memory, otherwise it acts as the clear +// function. The reset function is potentially dangerous, as it (by design) +// causes containers to not free their memory. +// This option has no applicability to the bitset::reset function, as bitset +// isn't really a container. Also it has no applicability to the smart pointer +// wrappers (e.g. intrusive_ptr). +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_RESET_ENABLED + #define EASTL_RESET_ENABLED 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_MINMAX_ENABLED +// +// Defined as 0 or 1; default is 1. +// Specifies whether the min and max algorithms are available. +// It may be useful to disable the min and max algorithems because sometimes +// #defines for min and max exist which would collide with EASTL min and max. +// Note that there are already alternative versions of min and max in EASTL +// with the min_alt and max_alt functions. You can use these without colliding +// with min/max macros that may exist. +// +/////////////////////////////////////////////////////////////////////////////// +#ifndef EASTL_MINMAX_ENABLED + #define EASTL_MINMAX_ENABLED 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_NOMINMAX +// +// Defined as 0 or 1; default is 1. +// MSVC++ has #defines for min/max which collide with the min/max algorithm +// declarations. If EASTL_NOMINMAX is defined as 1, then we undefine min and +// max if they are #defined by an external library. This allows our min and +// max definitions in algorithm.h to work as expected. An alternative to +// the enabling of EASTL_NOMINMAX is to #define NOMINMAX in your project +// settings if you are compiling for Windows. +// Note that this does not control the availability of the EASTL min and max +// algorithms; the EASTL_MINMAX_ENABLED configuration parameter does that. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_NOMINMAX + #define EASTL_NOMINMAX 1 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_STD_CPP_ONLY +// +// Defined as 0 or 1; default is 0. +// Disables the use of compiler language extensions. We use compiler language +// extensions only in the case that they provide some benefit that can't be +// had any other practical way. But sometimes the compiler is set to disable +// language extensions or sometimes one compiler's preprocesor is used to generate +// code for another compiler, and so it's necessary to disable language extension usage. +// +// Example usage: +// #if defined(_MSC_VER) && !EASTL_STD_CPP_ONLY +// enum : size_type { npos = container_type::npos }; // Microsoft extension which results in significantly smaller debug symbols. +// #else +// static const size_type npos = container_type::npos; +// #endif +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_STD_CPP_ONLY + #define EASTL_STD_CPP_ONLY 0 +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_NO_RVALUE_REFERENCES +// +// Defined as 0 or 1. +// This is the same as EABase EA_COMPILER_NO_RVALUE_REFERENCES except that it +// follows the convention of being always defined, as 0 or 1. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_NO_RVALUE_REFERENCES) + #if defined(EA_COMPILER_NO_RVALUE_REFERENCES) + #define EASTL_NO_RVALUE_REFERENCES 1 + #else + #define EASTL_NO_RVALUE_REFERENCES 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_MOVE_SEMANTICS_ENABLED +// +// Defined as 0 or 1. +// If enabled then C++11-like functionality with rvalue references and move +// operations is enabled. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_MOVE_SEMANTICS_ENABLED) + #if EASTL_NO_RVALUE_REFERENCES // If the compiler doesn't support rvalue references or EASTL is configured to disable them... + #define EASTL_MOVE_SEMANTICS_ENABLED 0 + #else + #define EASTL_MOVE_SEMANTICS_ENABLED 1 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VARIADIC_TEMPLATES_ENABLED +// +// Defined as 0 or 1. +// If enabled then C++11-like functionality with variadic templates is enabled. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_VARIADIC_TEMPLATES_ENABLED) + #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) // If the compiler doesn't support variadic templates + #define EASTL_VARIADIC_TEMPLATES_ENABLED 0 + #else + #define EASTL_VARIADIC_TEMPLATES_ENABLED 1 + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_VARIABLE_TEMPLATES_ENABLED +// +// Defined as 0 or 1. +// If enabled then C++11-like functionality with variable templates is enabled. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_VARIABLE_TEMPLATES_ENABLED) + #if((EABASE_VERSION_N < 20605) || defined(EA_COMPILER_NO_VARIABLE_TEMPLATES)) + #define EASTL_VARIABLE_TEMPLATES_ENABLED 0 + #else + #define EASTL_VARIABLE_TEMPLATES_ENABLED 1 + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_INLINE_VARIABLE_ENABLED +// +// Defined as 0 or 1. +// If enabled then C++17-like functionality with inline variable is enabled. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_INLINE_VARIABLE_ENABLED) + #if((EABASE_VERSION_N < 20707) || defined(EA_COMPILER_NO_INLINE_VARIABLES)) + #define EASTL_INLINE_VARIABLE_ENABLED 0 + #else + #define EASTL_INLINE_VARIABLE_ENABLED 1 + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_CPP17_INLINE_VARIABLE +// +// Used to prefix a variable as inline when C++17 inline variables are available +// Usage: EASTL_CPP17_INLINE_VARIABLE constexpr bool type_trait_v = type_trait::value +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_CPP17_INLINE_VARIABLE) + #if EASTL_INLINE_VARIABLE_ENABLED + #define EASTL_CPP17_INLINE_VARIABLE inline + #else + #define EASTL_CPP17_INLINE_VARIABLE + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_HAVE_CPP11_TYPE_TRAITS +// +// Defined as 0 or 1. +// This is the same as EABase EA_HAVE_CPP11_TYPE_TRAITS except that it +// follows the convention of being always defined, as 0 or 1. Note that this +// identifies if the Standard Library has C++11 type traits and not if EASTL +// has its equivalents to C++11 type traits. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_HAVE_CPP11_TYPE_TRAITS) + // To do: Change this to use the EABase implementation once we have a few months of testing + // of this and we are sure it works right. Do this at some point after ~January 2014. + #if defined(EA_HAVE_DINKUMWARE_CPP_LIBRARY) && (_CPPLIB_VER >= 540) // Dinkumware. VS2012+ + #define EASTL_HAVE_CPP11_TYPE_TRAITS 1 + #elif defined(EA_COMPILER_CPP11_ENABLED) && defined(EA_HAVE_LIBSTDCPP_LIBRARY) && defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007) // Prior versions of libstdc++ have incomplete support for C++11 type traits. + #define EASTL_HAVE_CPP11_TYPE_TRAITS 1 + #elif defined(EA_HAVE_LIBCPP_LIBRARY) && (_LIBCPP_VERSION >= 1) + #define EASTL_HAVE_CPP11_TYPE_TRAITS 1 + #else + #define EASTL_HAVE_CPP11_TYPE_TRAITS 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS undef +// +// We need revise this macro to be undefined in some cases, in case the user +// isn't using an updated EABase. +/////////////////////////////////////////////////////////////////////////////// +#if defined(__EDG_VERSION__) && (__EDG_VERSION__ >= 403) // It may in fact be supported by 4.01 or 4.02 but we don't have compilers to test with. + #if defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS) + #undef EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_NO_RANGE_BASED_FOR_LOOP +// +// Defined as 0 or 1. +// This is the same as EABase EA_COMPILER_NO_RANGE_BASED_FOR_LOOP except that it +// follows the convention of being always defined, as 0 or 1. +/////////////////////////////////////////////////////////////////////////////// +#if !defined(EASTL_NO_RANGE_BASED_FOR_LOOP) + #if defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP) + #define EASTL_NO_RANGE_BASED_FOR_LOOP 1 + #else + #define EASTL_NO_RANGE_BASED_FOR_LOOP 0 + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ALIGN_OF +// +// Determines the alignment of a type. +// +// Example usage: +// size_t alignment = EASTL_ALIGN_OF(int); +// +/////////////////////////////////////////////////////////////////////////////// +#ifndef EASTL_ALIGN_OF + #define EASTL_ALIGN_OF alignof +#endif + + + + +/////////////////////////////////////////////////////////////////////////////// +// eastl_size_t +// +// Defined as an unsigned integer type, usually either size_t or uint32_t. +// Defaults to size_t to match std STL unless the user specifies to use +// uint32_t explicitly via the EASTL_SIZE_T_32BIT define +// +// Example usage: +// eastl_size_t n = intVector.size(); +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_SIZE_T_32BIT // Defines whether EASTL_SIZE_T uses uint32_t/int32_t as opposed to size_t/ssize_t. + #define EASTL_SIZE_T_32BIT 0 // This makes a difference on 64 bit platforms because they use a 64 bit size_t. +#endif // By default we do the same thing as std STL and use size_t. + +#ifndef EASTL_SIZE_T + #if (EASTL_SIZE_T_32BIT == 0) || (EA_PLATFORM_WORD_SIZE == 4) + #include + #define EASTL_SIZE_T size_t + #define EASTL_SSIZE_T intptr_t + #else + #define EASTL_SIZE_T uint32_t + #define EASTL_SSIZE_T int32_t + #endif +#endif + +typedef EASTL_SIZE_T eastl_size_t; // Same concept as std::size_t. +typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept is similar to Posix's ssize_t. + + + + +/////////////////////////////////////////////////////////////////////////////// +// AddRef / Release +// +// AddRef and Release are used for "intrusive" reference counting. By the term +// "intrusive", we mean that the reference count is maintained by the object +// and not by the user of the object. Given that an object implements referencing +// counting, the user of the object needs to be able to increment and decrement +// that reference count. We do that via the venerable AddRef and Release functions +// which the object must supply. These defines here allow us to specify the name +// of the functions. They could just as well be defined to addref and delref or +// IncRef and DecRef. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTLAddRef + #define EASTLAddRef AddRef +#endif + +#ifndef EASTLRelease + #define EASTLRelease Release +#endif + + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ALLOCATOR_EXPLICIT_ENABLED +// +// Defined as 0 or 1. Default is 0 for now but ideally would be changed to +// 1 some day. It's 0 because setting it to 1 breaks some existing code. +// This option enables the allocator ctor to be explicit, which avoids +// some undesirable silent conversions, especially with the string class. +// +// Example usage: +// class allocator +// { +// public: +// EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName); +// }; +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_ALLOCATOR_EXPLICIT_ENABLED + #define EASTL_ALLOCATOR_EXPLICIT_ENABLED 0 +#endif + +#if EASTL_ALLOCATOR_EXPLICIT_ENABLED + #define EASTL_ALLOCATOR_EXPLICIT explicit +#else + #define EASTL_ALLOCATOR_EXPLICIT +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_ALLOCATOR_MIN_ALIGNMENT +// +// Defined as an integral power-of-2 that's >= 1. +// Identifies the minimum alignment that EASTL should assume its allocators +// use. There is code within EASTL that decides whether to do a Malloc or +// MallocAligned call and it's typically better if it can use the Malloc call. +// But this requires knowing what the minimum possible alignment is. +#if !defined(EASTL_ALLOCATOR_MIN_ALIGNMENT) + #define EASTL_ALLOCATOR_MIN_ALIGNMENT EA_PLATFORM_MIN_MALLOC_ALIGNMENT +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT +// +// Identifies the minimum alignment that EASTL should assume system allocations +// from malloc and new will have. +#if !defined(EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT) + #if defined(EA_PLATFORM_MICROSOFT) || defined(EA_PLATFORM_APPLE) + #define EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT 16 + #else + #define EASTL_SYSTEM_ALLOCATOR_MIN_ALIGNMENT (EA_PLATFORM_PTR_SIZE * 2) + #endif +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL allocator +// +// The EASTL allocator system allows you to redefine how memory is allocated +// via some defines that are set up here. In the container code, memory is +// allocated via macros which expand to whatever the user has them set to +// expand to. Given that there are multiple allocator systems available, +// this system allows you to configure it to use whatever system you want, +// provided your system meets the requirements of this library. +// The requirements are: +// +// - Must be constructable via a const char* (name) parameter. +// Some uses of allocators won't require this, however. +// - Allocate a block of memory of size n and debug name string. +// - Allocate a block of memory of size n, debug name string, +// alignment a, and offset o. +// - Free memory allocated via either of the allocation functions above. +// - Provide a default allocator instance which can be used if the user +// doesn't provide a specific one. +// +/////////////////////////////////////////////////////////////////////////////// + +// namespace eastl +// { +// class allocator +// { +// allocator(const char* pName = NULL); +// +// void* allocate(size_t n, int flags = 0); +// void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0); +// void deallocate(void* p, size_t n); +// +// const char* get_name() const; +// void set_name(const char* pName); +// }; +// +// allocator* GetDefaultAllocator(); // This is used for anonymous allocations. +// } + +#ifndef EASTLAlloc // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does. + #define EASTLAlloc(allocator, n) (allocator).allocate(n); +#endif + +#ifndef EASTLAllocFlags // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does. + #define EASTLAllocFlags(allocator, n, flags) (allocator).allocate(n, flags); +#endif + +#ifndef EASTLAllocAligned + #define EASTLAllocAligned(allocator, n, alignment, offset) (allocator).allocate((n), (alignment), (offset)) +#endif + +#ifndef EASTLAllocAlignedFlags + #define EASTLAllocAlignedFlags(allocator, n, alignment, offset, flags) (allocator).allocate((n), (alignment), (offset), (flags)) +#endif + +#ifndef EASTLFree + #define EASTLFree(allocator, p, size) (allocator).deallocate((void*)(p), (size)) // Important to cast to void* as p may be non-const. +#endif + +#ifndef EASTLAllocatorType + #define EASTLAllocatorType eastl::allocator +#endif + +#ifndef EASTLDummyAllocatorType + #define EASTLDummyAllocatorType eastl::dummy_allocator +#endif + +#ifndef EASTLAllocatorDefault + // EASTLAllocatorDefault returns the default allocator instance. This is not a global + // allocator which implements all container allocations but is the allocator that is + // used when EASTL needs to allocate memory internally. There are very few cases where + // EASTL allocates memory internally, and in each of these it is for a sensible reason + // that is documented to behave as such. + #define EASTLAllocatorDefault eastl::GetDefaultAllocator +#endif + + +/// EASTL_ALLOCATOR_DEFAULT_NAME +/// +/// Defines a default allocator name in the absence of a user-provided name. +/// +#ifndef EASTL_ALLOCATOR_DEFAULT_NAME + #define EASTL_ALLOCATOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX // Unless the user overrides something, this is "EASTL". +#endif + +/// EASTL_USE_FORWARD_WORKAROUND +/// +/// This is to workaround a compiler bug that we found in VS2013. Update 1 did not fix it. +/// This should be fixed in a future release of VS2013 http://accentuable4.rssing.com/browser.php?indx=3511740&item=15696 +/// +#ifndef EASTL_USE_FORWARD_WORKAROUND + #if defined(_MSC_FULL_VER) && _MSC_FULL_VER == 180021005 || (defined(__EDG_VERSION__) && (__EDG_VERSION__ < 405))// VS2013 initial release + #define EASTL_USE_FORWARD_WORKAROUND 1 + #else + #define EASTL_USE_FORWARD_WORKAROUND 0 + #endif +#endif + + +/// EASTL_TUPLE_ENABLED +/// EASTL tuple implementation depends on variadic template support +#if EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_TUPLE_ENABLED 1 +#else + #define EASTL_TUPLE_ENABLED 0 +#endif + + +/// EASTL_FUNCTION_ENABLED +/// +#ifndef EASTL_FUNCTION_ENABLED + #define EASTL_FUNCTION_ENABLED 1 +#endif + + +/// EASTL_USER_LITERALS_ENABLED +#ifndef EASTL_USER_LITERALS_ENABLED + #if defined(EA_COMPILER_CPP14_ENABLED) + #define EASTL_USER_LITERALS_ENABLED 1 + + // Disabling the Clang/GCC/MSVC warning about using user defined literals without a leading '_' as they are + // reserved for standard libary usage. + EA_DISABLE_CLANG_WARNING(-Wuser-defined-literals) + EA_DISABLE_CLANG_WARNING(-Wreserved-user-defined-literal) + EA_DISABLE_GCC_WARNING(-Wliteral-suffix) + #ifdef _MSC_VER + #pragma warning(disable: 4455) // disable warning C4455: literal suffix identifiers that do not start with an underscore are reserved + #endif + #else + #define EASTL_USER_LITERALS_ENABLED 0 + #endif +#endif + + +/// EASTL_INLINE_NAMESPACES_ENABLED +#ifndef EASTL_INLINE_NAMESPACES_ENABLED + #if defined(EA_COMPILER_CPP14_ENABLED) + #define EASTL_INLINE_NAMESPACES_ENABLED 1 + #else + #define EASTL_INLINE_NAMESPACES_ENABLED 0 + #endif +#endif + + +/// EASTL_CORE_ALLOCATOR_ENABLED +#ifndef EASTL_CORE_ALLOCATOR_ENABLED + #define EASTL_CORE_ALLOCATOR_ENABLED 0 +#endif + +/// EASTL_OPENSOURCE +/// This is enabled when EASTL is building built in an "open source" mode. Which is a mode that eliminates code +/// dependencies on other technologies that have not been released publically. +/// EASTL_OPENSOURCE = 0, is the default. +/// EASTL_OPENSOURCE = 1, utilizes technologies that not publically available. +/// +#ifndef EASTL_OPENSOURCE + #define EASTL_OPENSOURCE 0 +#endif + + +/// EASTL_OPTIONAL_ENABLED +#if defined(EA_COMPILER_MSVC_2012) + #define EASTL_OPTIONAL_ENABLED 0 +#elif defined(EA_COMPILER_MSVC_2013) + #define EASTL_OPTIONAL_ENABLED 0 +#elif defined(EA_COMPILER_MSVC_2015) + #define EASTL_OPTIONAL_ENABLED 1 +#elif EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) && !defined(EA_COMPILER_NO_DEFAULTED_FUNCTIONS) && defined(EA_COMPILER_CPP11_ENABLED) + #define EASTL_OPTIONAL_ENABLED 1 +#else + #define EASTL_OPTIONAL_ENABLED 0 +#endif + + +/// EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE +#if defined(_MSC_VER) && (_MSC_VER >= 1913) // VS2017+ + #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1 +#elif defined(EA_COMPILER_CLANG) + #if !__is_identifier(__has_unique_object_representations) + #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 1 + #else + #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0 + #endif +#else + #define EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE 0 +#endif + + + +/// EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR +/// This feature define allows users to toggle the problematic eastl::pair implicit +/// single element constructor. +#ifndef EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR + #define EASTL_ENABLE_PAIR_FIRST_ELEMENT_CONSTRUCTOR 0 +#endif + +#endif // Header include guard diff --git a/include/EASTL/internal/copy_help.h b/include/EASTL/internal/copy_help.h new file mode 100644 index 0000000..e5fb2ab --- /dev/null +++ b/include/EASTL/internal/copy_help.h @@ -0,0 +1,215 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_COPY_HELP_H +#define EASTL_INTERNAL_COPY_HELP_H + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include // memcpy, memcmp, memmove + + +namespace eastl +{ + /// move / move_n / move_backward + /// copy / copy_n / copy_backward + /// + /// We want to optimize move, move_n, move_backward, copy, copy_backward, copy_n to do memmove operations + /// when possible. + /// + /// We could possibly use memcpy, though it has stricter overlap requirements than the move and copy + /// algorithms and would require a runtime if/else to choose it over memmove. In particular, memcpy + /// allows no range overlap at all, whereas move/copy allow output end overlap and move_backward/copy_backward + /// allow output begin overlap. Despite this it might be useful to use memcpy for any platforms where + /// memcpy is significantly faster than memmove, and since in most cases the copy/move operation in fact + /// doesn't target overlapping memory and so memcpy would be usable. + /// + /// We can use memmove/memcpy if the following hold true: + /// InputIterator and OutputIterator are of the same type. + /// InputIterator and OutputIterator are of type contiguous_iterator_tag or simply are pointers (the two are virtually synonymous). + /// is_trivially_copyable::value is true. i.e. the constructor T(const T& t) (or T(T&& t) if present) can be replaced by memmove(this, &t, sizeof(T)) + /// + /// copy normally differs from move, but there is a case where copy is the same as move: when copy is + /// used with a move_iterator. We handle that case here by detecting that copy is being done with a + /// move_iterator and redirect it to move (which can take advantage of memmove/memcpy). + /// + /// The generic_iterator class is typically used for wrapping raw memory pointers so they can act like + /// formal iterators. Since pointers provide an opportunity for memmove/memcpy operations, we can + /// detect a generic iterator and use it's wrapped type as a pointer if it happens to be one. + + // Implementation moving copying both trivial and non-trivial data via a lesser iterator than random-access. + template + struct move_and_copy_helper + { + template + static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result) + { + for(; first != last; ++result, ++first) + *result = *first; + return result; + } + }; + + // Specialization for copying non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const. + // This specialization converts the random access InputIterator last-first to an integral type. There's simple way for us to take advantage of a random access output iterator, + // as the range is specified by the input instead of the output, and distance(first, last) for a non-random-access iterator is potentially slow. + template <> + struct move_and_copy_helper + { + template + static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + for(difference_type n = (last - first); n > 0; --n, ++first, ++result) + *result = *first; + + return result; + } + }; + + // Specialization for moving non-trivial data via a lesser iterator than random-access. + template + struct move_and_copy_helper + { + template + static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result) + { + for(; first != last; ++result, ++first) + *result = eastl::move(*first); + return result; + } + }; + + // Specialization for moving non-trivial data via a random-access iterator. It's theoretically faster because the compiler can see the count when its a compile-time const. + template <> + struct move_and_copy_helper + { + template + static OutputIterator move_or_copy(InputIterator first, InputIterator last, OutputIterator result) + { + typedef typename eastl::iterator_traits::difference_type difference_type; + + for(difference_type n = (last - first); n > 0; --n, ++first, ++result) + *result = eastl::move(*first); + + return result; + } + }; + + // Specialization for when we can use memmove/memcpy. See the notes above for what conditions allow this. + template + struct move_and_copy_helper + { + template + static T* move_or_copy(const T* first, const T* last, T* result) + { + if (EASTL_UNLIKELY(first == last)) + return result; + + // We could use memcpy here if there's no range overlap, but memcpy is rarely much faster than memmove. + return (T*)memmove(result, first, (size_t)((uintptr_t)last - (uintptr_t)first)) + (last - first); + } + }; + + + + template + inline OutputIterator move_and_copy_chooser(InputIterator first, InputIterator last, OutputIterator result) + { + typedef typename eastl::iterator_traits::iterator_category IIC; + typedef typename eastl::iterator_traits::iterator_category OIC; + typedef typename eastl::iterator_traits::value_type value_type_input; + typedef typename eastl::iterator_traits::value_type value_type_output; + + const bool canBeMemmoved = eastl::is_trivially_copyable::value && + eastl::is_same::value && + (eastl::is_pointer::value || eastl::is_same::value) && + (eastl::is_pointer::value || eastl::is_same::value); + + return eastl::move_and_copy_helper::move_or_copy(first, last, result); // Need to chose based on the input iterator tag and not the output iterator tag, because containers accept input ranges of iterator types different than self. + } + + + // We have a second layer of unwrap_iterator calls because the original iterator might be something like move_iterator > (i.e. doubly-wrapped). + template + inline OutputIterator move_and_copy_unwrapper(InputIterator first, InputIterator last, OutputIterator result) + { + return OutputIterator(eastl::move_and_copy_chooser(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), eastl::unwrap_iterator(result))); // Have to convert to OutputIterator because result.base() could be a T* + } + + + /// move + /// + /// After this operation the elements in the moved-from range will still contain valid values of the + /// appropriate type, but not necessarily the same values as before the move. + /// Returns the end of the result range. + /// Note: When moving between containers, the dest range must be valid; this function doesn't resize containers. + /// Note: if result is within [first, last), move_backward must be used instead of move. + /// + /// Example usage: + /// eastl::move(myArray.begin(), myArray.end(), myDestArray.begin()); + /// + /// Reference implementation: + /// template + /// OutputIterator move(InputIterator first, InputIterator last, OutputIterator result) + /// { + /// while(first != last) + /// *result++ = eastl::move(*first++); + /// return result; + /// } + + template + inline OutputIterator move(InputIterator first, InputIterator last, OutputIterator result) + { + return eastl::move_and_copy_unwrapper(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), result); + } + + + /// copy + /// + /// Effects: Copies elements in the range [first, last) into the range [result, result + (last - first)) + /// starting from first and proceeding to last. For each nonnegative integer n < (last - first), + /// performs *(result + n) = *(first + n). + /// + /// Returns: result + (last - first). That is, returns the end of the result. Note that this + /// is different from how memmove/memcpy work, as they return the beginning of the result. + /// + /// Requires: result shall not be in the range [first, last). But the end of the result range + /// may in fact be within the input rante. + /// + /// Complexity: Exactly 'last - first' assignments. + /// + template + inline OutputIterator copy(InputIterator first, InputIterator last, OutputIterator result) + { + const bool isMove = eastl::is_move_iterator::value; EA_UNUSED(isMove); + + return eastl::move_and_copy_unwrapper(eastl::unwrap_iterator(first), eastl::unwrap_iterator(last), result); + } +} // namespace eastl + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/include/EASTL/internal/enable_shared.h b/include/EASTL/internal/enable_shared.h new file mode 100644 index 0000000..ac5f072 --- /dev/null +++ b/include/EASTL/internal/enable_shared.h @@ -0,0 +1,83 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_ENABLE_SHARED_H +#define EASTL_INTERNAL_ENABLE_SHARED_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +namespace eastl +{ + + /// enable_shared_from_this + /// + /// This is a helper mixin class that allows you to make any class + /// export a shared_ptr instance that is associated with the class + /// instance. Any class that inherits from this class gets two functions: + /// shared_ptr shared_from_this(); + /// shared_ptr shared_from_this() const; + /// If you call shared_from_this, you get back a shared_ptr that + /// refers to the class. A second call to shared_from_this returns + /// another shared_ptr that is shared with the first one. + /// + /// The trick that happens which is not so obvious here (and which is + /// not mentioned at all in the Boost documentation of their version + /// of this) is that the shared_ptr constructor detects that the + /// class has an enable_shared_from_this mixin and sets up this system + /// automatically for the user. This is done with template tricks. + /// + /// For some additional explanation, see the Boost documentation for + /// their description of their version of enable_shared_from_this. + /// + template + class enable_shared_from_this + { + public: + shared_ptr shared_from_this() + { return shared_ptr(mWeakPtr); } + + shared_ptr shared_from_this() const + { return shared_ptr(mWeakPtr); } + + weak_ptr weak_from_this() + { return mWeakPtr; } + + weak_ptr weak_from_this() const + { return mWeakPtr; } + + public: // This is public because the alternative fails on some compilers that we need to support. + mutable weak_ptr mWeakPtr; + + protected: + template friend class shared_ptr; + + EA_CONSTEXPR enable_shared_from_this() EA_NOEXCEPT + { } + + enable_shared_from_this(const enable_shared_from_this&) EA_NOEXCEPT + { } + + enable_shared_from_this& operator=(const enable_shared_from_this&) EA_NOEXCEPT + { return *this; } + + ~enable_shared_from_this() + { } + + }; // enable_shared_from_this + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/include/EASTL/internal/fill_help.h b/include/EASTL/internal/fill_help.h new file mode 100644 index 0000000..235a24e --- /dev/null +++ b/include/EASTL/internal/fill_help.h @@ -0,0 +1,484 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_FILL_HELP_H +#define EASTL_INTERNAL_FILL_HELP_H + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include + +#if defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) +#include +#endif + +namespace eastl +{ + // fill + // + // We implement some fill helper functions in order to allow us to optimize it + // where possible. + // + template + struct fill_imp + { + template + static void do_fill(ForwardIterator first, ForwardIterator last, const T& value) + { + // The C++ standard doesn't specify whether we need to create a temporary + // or not, but all std STL implementations are written like what we have here. + for(; first != last; ++first) + *first = value; + } + }; + + template <> + struct fill_imp + { + template + static void do_fill(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef typename eastl::iterator_traits::value_type value_type; + // We create a temp and fill from that because value might alias to the + // destination range and so the compiler would be forced into generating + // less efficient code. + for(const T temp = value; first != last; ++first) + { + EA_UNUSED(temp); + *first = static_cast(temp); + } + } + }; + + /// fill + /// + /// fill is like memset in that it assigns a single value repeatedly to a + /// destination range. It allows for any type of iterator (not just an array) + /// and the source value can be any type, not just a byte. + /// Note that the source value (which is a reference) can come from within + /// the destination range. + /// + /// Effects: Assigns value through all the iterators in the range [first, last). + /// + /// Complexity: Exactly 'last - first' assignments. + /// + /// Note: The C++ standard doesn't specify anything about the value parameter + /// coming from within the first-last range. All std STL implementations act + /// as if the standard specifies that value must not come from within this range. + /// + template + inline void fill(ForwardIterator first, ForwardIterator last, const T& value) + { + eastl::fill_imp< is_scalar::value >::do_fill(first, last, value); + + // Possibly better implementation, as it will deal with small PODs as well as scalars: + // bEasyCopy is true if the type has a trivial constructor (e.g. is a POD) and if + // it is small. Thus any built-in type or any small user-defined struct will qualify. + //const bool bEasyCopy = eastl::type_and::value, + // eastl::integral_constant::value; + //eastl::fill_imp::do_fill(first, last, value); + + } + + #if(defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #if defined(EA_PROCESSOR_X86_64) + template + inline void fill(uint64_t* first, uint64_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + uint64_t value = (uint64_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosq\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + + template + inline void fill(int64_t* first, int64_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + int64_t value = (int64_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosq\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + #endif + + template + inline void fill(uint32_t* first, uint32_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + uint32_t value = (uint32_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosl\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + + template + inline void fill(int32_t* first, int32_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + int32_t value = (int32_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosl\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + + template + inline void fill(uint16_t* first, uint16_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + uint16_t value = (uint16_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosw\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + + template + inline void fill(int16_t* first, int16_t* last, Value c) + { + uintptr_t count = (uintptr_t)(last - first); + int16_t value = (int16_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosw\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + } + + #elif defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #if defined(EA_PROCESSOR_X86_64) + template + inline void fill(uint64_t* first, uint64_t* last, Value c) + { + __stosq(first, (uint64_t)c, (size_t)(last - first)); + } + + template + inline void fill(int64_t* first, int64_t* last, Value c) + { + __stosq((uint64_t*)first, (uint64_t)c, (size_t)(last - first)); + } + #endif + + template + inline void fill(uint32_t* first, uint32_t* last, Value c) + { + __stosd((unsigned long*)first, (unsigned long)c, (size_t)(last - first)); + } + + template + inline void fill(int32_t* first, int32_t* last, Value c) + { + __stosd((unsigned long*)first, (unsigned long)c, (size_t)(last - first)); + } + + template + inline void fill(uint16_t* first, uint16_t* last, Value c) + { + __stosw(first, (uint16_t)c, (size_t)(last - first)); + } + + template + inline void fill(int16_t* first, int16_t* last, Value c) + { + __stosw((uint16_t*)first, (uint16_t)c, (size_t)(last - first)); + } + #endif + + + inline void fill(char* first, char* last, const char& c) // It's debateable whether we should use 'char& c' or 'char c' here. + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(char* first, char* last, const int c) // This is used for cases like 'fill(first, last, 0)'. + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(unsigned char* first, unsigned char* last, const unsigned char& c) + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(unsigned char* first, unsigned char* last, const int c) + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(signed char* first, signed char* last, const signed char& c) + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + inline void fill(signed char* first, signed char* last, const int c) + { + memset(first, (unsigned char)c, (size_t)(last - first)); + } + + #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) // ICL = Intel compiler + inline void fill(bool* first, bool* last, const bool& b) + { + memset(first, (char)b, (size_t)(last - first)); + } + #endif + + + + + // fill_n + // + // We implement some fill helper functions in order to allow us to optimize it + // where possible. + // + template + struct fill_n_imp + { + template + static OutputIterator do_fill(OutputIterator first, Size n, const T& value) + { + for(; n-- > 0; ++first) + *first = value; + return first; + } + }; + + template <> + struct fill_n_imp + { + template + static OutputIterator do_fill(OutputIterator first, Size n, const T& value) + { + typedef typename eastl::iterator_traits::value_type value_type; + + // We create a temp and fill from that because value might alias to + // the destination range and so the compiler would be forced into + // generating less efficient code. + for(const T temp = value; n-- > 0; ++first) + *first = static_cast(temp); + return first; + } + }; + + /// fill_n + /// + /// The fill_n function is very much like memset in that a copies a source value + /// n times into a destination range. The source value may come from within + /// the destination range. + /// + /// Effects: Assigns value through all the iterators in the range [first, first + n). + /// + /// Complexity: Exactly n assignments. + /// + template + OutputIterator fill_n(OutputIterator first, Size n, const T& value) + { + return eastl::fill_n_imp::value>::do_fill(first, n, value); + } + + template + inline char* fill_n(char* first, Size n, const char& c) + { + return (char*)memset(first, (char)c, (size_t)n) + n; + } + + template + inline unsigned char* fill_n(unsigned char* first, Size n, const unsigned char& c) + { + return (unsigned char*)memset(first, (unsigned char)c, (size_t)n) + n; + } + + template + inline signed char* fill_n(signed char* first, Size n, const signed char& c) + { + return (signed char*)memset(first, (signed char)c, n) + (size_t)n; + } + + #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL) // ICL = Intel compiler + template + inline bool* fill_n(bool* first, Size n, const bool& b) + { + return (bool*)memset(first, (char)b, n) + (size_t)n; + } + #endif + + #if(defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #if defined(EA_PROCESSOR_X86_64) + template + inline uint64_t* fill_n(uint64_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + uint64_t value = (uint64_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosq\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + + template + inline int64_t* fill_n(int64_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + int64_t value = (int64_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosq\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + #endif + + template + inline uint32_t* fill_n(uint32_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + uint32_t value = (uint32_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosl\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + + template + inline int32_t* fill_n(int32_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + int32_t value = (int32_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosl\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + + template + inline uint16_t* fill_n(uint16_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + uint16_t value = (uint16_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosw\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + + template + inline int16_t* fill_n(int16_t* first, Size n, Value c) + { + uintptr_t count = (uintptr_t)(n); + int16_t value = (int16_t)(c); + + __asm__ __volatile__ ("cld\n\t" + "rep stosw\n\t" + : "+c" (count), "+D" (first), "=m" (first) + : "a" (value) + : "cc" ); + return first; // first is updated by the code above. + } + + #elif defined(EA_COMPILER_MICROSOFT) && (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) + #if defined(EA_PROCESSOR_X86_64) + template + inline uint64_t* fill_n(uint64_t* first, Size n, Value c) + { + __stosq(first, (uint64_t)c, (size_t)n); + return first + n; + } + + template + inline int64_t* fill_n(int64_t* first, Size n, Value c) + { + __stosq((uint64_t*)first, (uint64_t)c, (size_t)n); + return first + n; + } + #endif + + template + inline uint32_t* fill_n(uint32_t* first, Size n, Value c) + { + __stosd((unsigned long*)first, (unsigned long)c, (size_t)n); + return first + n; + } + + template + inline int32_t* fill_n(int32_t* first, Size n, Value c) + { + __stosd((unsigned long*)first, (unsigned long)c, (size_t)n); + return first + n; + } + + template + inline uint16_t* fill_n(uint16_t* first, Size n, Value c) + { + __stosw(first, (uint16_t)c, (size_t)n); + return first + n; + } + + template + inline int16_t* fill_n(int16_t* first, Size n, Value c) + { + __stosw((uint16_t*)first, (uint16_t)c, (size_t)n); + return first + n; + } + #endif + +} // namespace eastl + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/include/EASTL/internal/fixed_pool.h b/include/EASTL/internal/fixed_pool.h new file mode 100644 index 0000000..0b610ed --- /dev/null +++ b/include/EASTL/internal/fixed_pool.h @@ -0,0 +1,1639 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the following +// aligned_buffer +// fixed_pool_base +// fixed_pool +// fixed_pool_with_overflow +// fixed_hashtable_allocator +// fixed_vector_allocator +// fixed_swap +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_FIXED_POOL_H +#define EASTL_INTERNAL_FIXED_POOL_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #include + #pragma warning(pop) +#else + #include +#endif + +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable: 4275) // non dll-interface class used as base for DLL-interface classkey 'identifier' +#endif + + +namespace eastl +{ + + /// EASTL_FIXED_POOL_DEFAULT_NAME + /// + /// Defines a default allocator name in the absence of a user-provided name. + /// + #ifndef EASTL_FIXED_POOL_DEFAULT_NAME + #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool". + #endif + + + + /////////////////////////////////////////////////////////////////////////// + // aligned_buffer + /////////////////////////////////////////////////////////////////////////// + + /// aligned_buffer + /// + /// This is useful for creating a buffer of the same size and alignment + /// of a given struct or class. This is useful for creating memory pools + /// that support both size and alignment requirements of stored objects + /// but without wasting space in over-allocating. + /// + /// Note that we implement this via struct specializations, as some + /// compilers such as VC++ do not support specification of alignments + /// in any way other than via an integral constant. + /// + /// Example usage: + /// struct Widget{ }; // This class has a given size and alignment. + /// + /// Declare a char buffer of equal size and alignment to Widget. + /// aligned_buffer mWidgetBuffer; + /// + /// Declare an array this time. + /// aligned_buffer mWidgetArray[15]; + /// + typedef char EASTL_MAY_ALIAS aligned_buffer_char; + + template + struct aligned_buffer { aligned_buffer_char buffer[size]; }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(2) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(4) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(8) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(16) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(32) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(64) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(128) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(256) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(512) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(1024) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(2048) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); }; + + template + struct aligned_buffer { EA_PREFIX_ALIGN(4096) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); }; + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_pool_base + /////////////////////////////////////////////////////////////////////////// + + /// fixed_pool_base + /// + /// This is a base class for the implementation of fixed-size pools. + /// In particular, the fixed_pool and fixed_pool_with_overflow classes + /// are based on fixed_pool_base. + /// + struct fixed_pool_base + { + public: + /// fixed_pool_base + /// + fixed_pool_base(void* pMemory = NULL) + : mpHead((Link*)pMemory) + , mpNext((Link*)pMemory) + , mpCapacity((Link*)pMemory) + , mnNodeSize(0) // This is normally set in the init function. + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + mnCurrentSize = 0; + mnPeakSize = 0; + #endif + } + + + /// fixed_pool_base + /// + // Disabled because the default is sufficient. While it normally makes no sense to deep copy + // this data, our usage of this class is such that this is OK and wanted. + // + // fixed_pool_base(const fixed_pool_base& x) + // { + // } + + + /// operator= + /// + fixed_pool_base& operator=(const fixed_pool_base&) + { + // By design we do nothing. We don't attempt to deep-copy member data. + return *this; + } + + + /// init + /// + /// Initializes a fixed_pool with a given set of parameters. + /// You cannot call this function twice else the resulting + /// behaviour will be undefined. You can only call this function + /// after constructing the fixed_pool with the default constructor. + /// + EASTL_API void init(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0); + + + /// peak_size + /// + /// Returns the maximum number of outstanding allocations there have been + /// at any one time. This represents a high water mark for the allocation count. + /// + size_t peak_size() const + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + return mnPeakSize; + #else + return 0; + #endif + } + + + /// can_allocate + /// + /// Returns true if there are any free links. + /// + bool can_allocate() const + { + return (mpHead != NULL) || (mpNext != mpCapacity); + } + + public: + /// Link + /// Implements a singly-linked list. + struct Link + { + Link* mpNext; + }; + + Link* mpHead; + Link* mpNext; + Link* mpCapacity; + size_t mnNodeSize; + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + uint32_t mnCurrentSize; /// Current number of allocated nodes. + uint32_t mnPeakSize; /// Max number of allocated nodes at any one time. + #endif + + }; // fixed_pool_base + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_pool + /////////////////////////////////////////////////////////////////////////// + + /// fixed_pool + /// + /// Implements a simple fixed pool allocator for use by fixed-size containers. + /// This is not a generic eastl allocator which can be plugged into an arbitrary + /// eastl container, as it simplifies some functions are arguments for the + /// purpose of efficiency. + /// + class EASTL_API fixed_pool : public fixed_pool_base + { + public: + /// fixed_pool + /// + /// Default constructor. User usually will want to call init() after + /// constructing via this constructor. The pMemory argument is for the + /// purposes of temporarily storing a pointer to the buffer to be used. + /// Even though init may have a pMemory argument, this arg is useful + /// for temporary storage, as per copy construction. + /// + fixed_pool(void* pMemory = NULL) + : fixed_pool_base(pMemory) + { + } + + + /// fixed_pool + /// + /// Constructs a fixed_pool with a given set of parameters. + /// + fixed_pool(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0) + { + init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + } + + + /// fixed_pool + /// + // Disabled because the default is sufficient. While it normally makes no sense to deep copy + // this data, our usage of this class is such that this is OK and wanted. + // + // fixed_pool(const fixed_pool& x) + // { + // } + + + /// operator= + /// + fixed_pool& operator=(const fixed_pool&) + { + // By design we do nothing. We don't attempt to deep-copy member data. + return *this; + } + + + /// allocate + /// + /// Allocates a new object of the size specified upon class initialization. + /// Returns NULL if there is no more memory. + /// + void* allocate() + { + Link* pLink = mpHead; + + if(pLink) // If we have space... + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(++mnCurrentSize > mnPeakSize) + mnPeakSize = mnCurrentSize; + #endif + + mpHead = pLink->mpNext; + return pLink; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if(mpNext != mpCapacity) + { + pLink = mpNext; + + mpNext = reinterpret_cast(reinterpret_cast(mpNext) + mnNodeSize); + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(++mnCurrentSize > mnPeakSize) + mnPeakSize = mnCurrentSize; + #endif + + return pLink; + } + + return NULL; + } + } + + void* allocate(size_t /*alignment*/, size_t /*offset*/) + { + return allocate(); + } + + /// deallocate + /// + /// Frees the given object which was allocated by allocate(). + /// If the given node was not allocated by allocate() then the behaviour + /// is undefined. + /// + void deallocate(void* p) + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + --mnCurrentSize; + #endif + + ((Link*)p)->mpNext = mpHead; + mpHead = ((Link*)p); + } + + + using fixed_pool_base::can_allocate; + + + const char* get_name() const + { + return EASTL_FIXED_POOL_DEFAULT_NAME; + } + + + void set_name(const char*) + { + // Nothing to do. We don't allocate memory. + } + + }; // fixed_pool + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_pool_with_overflow + /////////////////////////////////////////////////////////////////////////// + + /// fixed_pool_with_overflow + /// + template + class fixed_pool_with_overflow : public fixed_pool_base + { + public: + typedef OverflowAllocator overflow_allocator_type; + + + fixed_pool_with_overflow(void* pMemory = NULL) + : fixed_pool_base(pMemory), + mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME) + { + // Leave mpPoolBegin, mpPoolEnd uninitialized. + } + + + fixed_pool_with_overflow(void* pMemory, const overflow_allocator_type& allocator) + : fixed_pool_base(pMemory), + mOverflowAllocator(allocator) + { + // Leave mpPoolBegin, mpPoolEnd uninitialized. + } + + + fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0) + : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME) + { + fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + + mpPoolBegin = pMemory; + } + + + fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset, + const overflow_allocator_type& allocator) + : mOverflowAllocator(allocator) + { + fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + + mpPoolBegin = pMemory; + } + + + // Disabled because the default is sufficient. While it normally makes no sense to deep copy + // this data, our usage of this class is such that this is OK and wanted. + // + //fixed_pool_with_overflow(const fixed_pool_with_overflow& x) + //{ + // ... + //} + + + fixed_pool_with_overflow& operator=(const fixed_pool_with_overflow& x) + { + #if EASTL_ALLOCATOR_COPY_ENABLED + mOverflowAllocator = x.mOverflowAllocator; + #else + (void)x; + #endif + + return *this; + } + + + void init(void* pMemory, size_t memorySize, size_t nodeSize, + size_t alignment, size_t alignmentOffset = 0) + { + fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset); + + mpPoolBegin = pMemory; + } + + + void* allocate() + { + void* p = NULL; + Link* pLink = mpHead; + + if(pLink) + { + // Unlink from chain + p = pLink; + mpHead = pLink->mpNext; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if(mpNext != mpCapacity) + { + p = pLink = mpNext; + mpNext = reinterpret_cast(reinterpret_cast(mpNext) + mnNodeSize); + } + else + p = mOverflowAllocator.allocate(mnNodeSize); + } + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if(p && (++mnCurrentSize > mnPeakSize)) + mnPeakSize = mnCurrentSize; + #endif + + return p; + } + + + void* allocate(size_t alignment, size_t alignmentOffset) + { + void* p = NULL; + Link* pLink = mpHead; + + if (pLink) + { + // Unlink from chain + p = pLink; + mpHead = pLink->mpNext; + } + else + { + // If there's no free node in the free list, just + // allocate another from the reserved memory area + + if (mpNext != mpCapacity) + { + p = pLink = mpNext; + mpNext = reinterpret_cast(reinterpret_cast(mpNext)+mnNodeSize); + } + else + { + p = allocate_memory(mOverflowAllocator, mnNodeSize, alignment, alignmentOffset); + EASTL_ASSERT_MSG(p != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + } + + } + + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + if (p && (++mnCurrentSize > mnPeakSize)) + mnPeakSize = mnCurrentSize; + #endif + + return p; + } + + void deallocate(void* p) + { + #if EASTL_FIXED_SIZE_TRACKING_ENABLED + --mnCurrentSize; + #endif + + if((p >= mpPoolBegin) && (p < mpCapacity)) + { + ((Link*)p)->mpNext = mpHead; + mpHead = ((Link*)p); + } + else + mOverflowAllocator.deallocate(p, (size_t)mnNodeSize); + } + + + using fixed_pool_base::can_allocate; + + + const char* get_name() const + { + return mOverflowAllocator.get_name(); + } + + + void set_name(const char* pName) + { + mOverflowAllocator.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const + { + return mOverflowAllocator; + } + + + overflow_allocator_type& get_overflow_allocator() + { + return mOverflowAllocator; + } + + + void set_overflow_allocator(const overflow_allocator_type& overflowAllocator) + { + mOverflowAllocator = overflowAllocator; + } + public: + OverflowAllocator mOverflowAllocator; + void* mpPoolBegin; // Ideally we wouldn't need this member variable. he problem is that the information about the pool buffer and object size is stored in the owning container and we can't have access to it without increasing the amount of code we need and by templating more code. It may turn out that simply storing data here is smaller in the end. + + }; // fixed_pool_with_overflow + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_node_allocator + /////////////////////////////////////////////////////////////////////////// + + /// fixed_node_allocator + /// + /// Note: This class was previously named fixed_node_pool, but was changed because this name + /// was inconsistent with the other allocators here which ended with _allocator. + /// + /// Implements a fixed_pool with a given node count, alignment, and alignment offset. + /// fixed_node_allocator is like fixed_pool except it is templated on the node type instead + /// of being a generic allocator. All it does is pass allocations through to + /// the fixed_pool base. This functionality is separate from fixed_pool because there + /// are other uses for fixed_pool. + /// + /// We template on kNodeSize instead of node_type because the former allows for the + /// two different node_types of the same size to use the same template implementation. + /// + /// Template parameters: + /// nodeSize The size of the object to allocate. + /// nodeCount The number of objects the pool contains. + /// nodeAlignment The alignment of the objects to allocate. + /// nodeAlignmentOffset The alignment offset of the objects to allocate. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_node_allocator + { + public: + typedef typename type_select, fixed_pool>::type pool_type; + typedef fixed_node_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset + }; + + public: + pool_type mPool; + + public: + //fixed_node_allocator(const char* pName) + //{ + // mPool.set_name(pName); + //} + + + fixed_node_allocator(void* pNodeBuffer) + : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset) + { + } + + + fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator) + : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator) + { + } + + + /// fixed_node_allocator + /// + /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem + /// broken, as fixed pools cannot take over ownership of other fixed pools' memory. + /// However, we declare that this copy ctor can only ever be safely called when + /// the user has intentionally pre-seeded the source with the destination pointer. + /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg + /// problems with containers being their own allocators, without incurring any memory + /// costs or extra code costs. There's another reason for this: we very strongly want + /// to avoid full copying of instances of fixed_pool around, especially via the stack. + /// Larger pools won't even be able to fit on many machine's stacks. So this solution + /// is also a mechanism to prevent that situation from existing and being used. + /// Perhaps some day we'll find a more elegant yet costless way around this. + /// + fixed_node_allocator(const this_type& x) + : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator) + { + } + + + this_type& operator=(const this_type& x) + { + mPool = x.mPool; + return *this; + } + + + void* allocate(size_t n, int /*flags*/ = 0) + { + (void)n; + EASTL_ASSERT(n == kNodeSize); + return mPool.allocate(); + } + + + void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0) + { + (void)n; + EASTL_ASSERT(n == kNodeSize); + return mPool.allocate(alignment, offset); + } + + + void deallocate(void* p, size_t) + { + mPool.deallocate(p); + } + + + /// can_allocate + /// + /// Returns true if there are any free links. + /// + bool can_allocate() const + { + return mPool.can_allocate(); + } + + + /// reset + /// + /// This function unilaterally resets the fixed pool back to a newly initialized + /// state. This is useful for using in tandem with container reset functionality. + /// + void reset(void* pNodeBuffer) + { + mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset); + } + + + const char* get_name() const + { + return mPool.get_name(); + } + + + void set_name(const char* pName) + { + mPool.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + return mPool.mOverflowAllocator; + } + + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + return mPool.mOverflowAllocator; + } + + + void set_overflow_allocator(const overflow_allocator_type& allocator) + { + mPool.mOverflowAllocator = allocator; + } + + + void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + mPool.mOverflowAllocator = x.mPool.mOverflowAllocator; + } + + }; // fixed_node_allocator + + + // This is a near copy of the code above, with the only difference being + // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs, + // and the get_overflow_allocator / set_overflow_allocator functions. + template + class fixed_node_allocator + { + public: + typedef fixed_pool pool_type; + typedef fixed_node_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset + }; + + public: + pool_type mPool; + + public: + fixed_node_allocator(void* pNodeBuffer) + : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset) + { + } + + + fixed_node_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization. + : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset) + { + } + + + /// fixed_node_allocator + /// + /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem + /// broken, as fixed pools cannot take over ownership of other fixed pools' memory. + /// However, we declare that this copy ctor can only ever be safely called when + /// the user has intentionally pre-seeded the source with the destination pointer. + /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg + /// problems with containers being their own allocators, without incurring any memory + /// costs or extra code costs. There's another reason for this: we very strongly want + /// to avoid full copying of instances of fixed_pool around, especially via the stack. + /// Larger pools won't even be able to fit on many machine's stacks. So this solution + /// is also a mechanism to prevent that situation from existing and being used. + /// Perhaps some day we'll find a more elegant yet costless way around this. + /// + fixed_node_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization. + : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset) + { + } + + + this_type& operator=(const this_type& x) + { + mPool = x.mPool; + return *this; + } + + + void* allocate(size_t n, int /*flags*/ = 0) + { + (void)n; + EASTL_ASSERT(n == kNodeSize); + return mPool.allocate(); + } + + + void* allocate(size_t n, size_t alignment, size_t offset, int /*flags*/ = 0) + { + (void)n; + EASTL_ASSERT(n == kNodeSize); + return mPool.allocate(alignment, offset); + } + + + void deallocate(void* p, size_t) + { + mPool.deallocate(p); + } + + + bool can_allocate() const + { + return mPool.can_allocate(); + } + + + void reset(void* pNodeBuffer) + { + mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset); + } + + + const char* get_name() const + { + return mPool.get_name(); + } + + + void set_name(const char* pName) + { + mPool.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + + void set_overflow_allocator(const overflow_allocator_type& /*allocator*/) + { + // We don't have an overflow allocator. + EASTL_ASSERT(false); + } + + + void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + // We don't have an overflow allocator. + } + + }; // fixed_node_allocator + + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const fixed_node_allocator& a, + const fixed_node_allocator& b) + { + return (&a == &b); // They are only equal if they are the same object. + } + + + template + inline bool operator!=(const fixed_node_allocator& a, + const fixed_node_allocator& b) + { + return (&a != &b); // They are only equal if they are the same object. + } + + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_hashtable_allocator + /////////////////////////////////////////////////////////////////////////// + + /// fixed_hashtable_allocator + /// + /// Provides a base class for fixed hashtable allocations. + /// To consider: Have this inherit from fixed_node_allocator. + /// + /// Template parameters: + /// bucketCount The fixed number of hashtable buckets to provide. + /// nodeCount The number of objects the pool contains. + /// nodeAlignment The alignment of the objects to allocate. + /// nodeAlignmentOffset The alignment offset of the objects to allocate. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_hashtable_allocator + { + public: + typedef typename type_select, fixed_pool>::type pool_type; + typedef fixed_hashtable_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket. + kBucketsSize = bucketCount * sizeof(void*), + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets. + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset, + kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes. + }; + + protected: + pool_type mPool; + void* mpBucketBuffer; + + public: + // Disabled because it causes compile conflicts. + //fixed_hashtable_allocator(const char* pName) + //{ + // mPool.set_name(pName); + //} + + fixed_hashtable_allocator(void* pNodeBuffer) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(NULL) + { + // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called. + } + + + fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator), + mpBucketBuffer(NULL) + { + // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called. + } + + + fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(pBucketBuffer) + { + } + + + fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& allocator) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, allocator), + mpBucketBuffer(pBucketBuffer) + { + } + + + /// fixed_hashtable_allocator + /// + /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool. + /// See the discussion above in fixed_node_allocator for important information about this. + /// + fixed_hashtable_allocator(const this_type& x) + : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset, x.mPool.mOverflowAllocator), + mpBucketBuffer(x.mpBucketBuffer) + { + } + + + fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x) + { + mPool = x.mPool; + return *this; + } + + + void* allocate(size_t n, int flags = 0) + { + // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes. + EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum. + + if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets... + { + EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n); + return mPool.allocate(); + } + + // If bucket size no longer fits within local buffer... + if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize)) + return get_overflow_allocator().allocate(n); + + EASTL_ASSERT(n <= kBucketsSize); + return mpBucketBuffer; + } + + + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0) + { + // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes. + if ((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets... + { + EASTL_ASSERT(n == kNodeSize); EA_UNUSED(n); + return mPool.allocate(alignment, offset); + } + + // If bucket size no longer fits within local buffer... + if ((flags & kAllocFlagBuckets) == kAllocFlagBuckets && (n > kBucketsSize)) + return get_overflow_allocator().allocate(n, alignment, offset); + + EASTL_ASSERT(n <= kBucketsSize); + return mpBucketBuffer; + } + + + void deallocate(void* p, size_t) + { + if(p != mpBucketBuffer) // If we are freeing a node and not buckets... + mPool.deallocate(p); + } + + + bool can_allocate() const + { + return mPool.can_allocate(); + } + + + void reset(void* pNodeBuffer) + { + // No need to modify mpBucketBuffer, as that is constant. + mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset); + } + + + const char* get_name() const + { + return mPool.get_name(); + } + + + void set_name(const char* pName) + { + mPool.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const + { + return mPool.mOverflowAllocator; + } + + + overflow_allocator_type& get_overflow_allocator() + { + return mPool.mOverflowAllocator; + } + + + void set_overflow_allocator(const overflow_allocator_type& allocator) + { + mPool.mOverflowAllocator = allocator; + } + + + void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + mPool.mOverflowAllocator = x.mPool.mOverflowAllocator; + } + + }; // fixed_hashtable_allocator + + + // This is a near copy of the code above, with the only difference being + // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs, + // and the get_overflow_allocator / set_overflow_allocator functions. + template + class fixed_hashtable_allocator + { + public: + typedef fixed_pool pool_type; + typedef fixed_hashtable_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket. + kBucketsSize = bucketCount * sizeof(void*), + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets. + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset, + kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes. + }; + + protected: + pool_type mPool; + void* mpBucketBuffer; + + public: + // Disabled because it causes compile conflicts. + //fixed_hashtable_allocator(const char* pName) + //{ + // mPool.set_name(pName); + //} + + fixed_hashtable_allocator(void* pNodeBuffer) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(NULL) + { + // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called. + } + + fixed_hashtable_allocator(void* pNodeBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization. + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(NULL) + { + // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called. + } + + + fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer) + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(pBucketBuffer) + { + } + + + fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization. + : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(pBucketBuffer) + { + } + + + /// fixed_hashtable_allocator + /// + /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool. + /// See the discussion above in fixed_node_allocator for important information about this. + /// + fixed_hashtable_allocator(const this_type& x) // No need to copy the overflow allocator, because bEnableOverflow is false in this specialization. + : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset), + mpBucketBuffer(x.mpBucketBuffer) + { + } + + + fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x) + { + mPool = x.mPool; + return *this; + } + + + void* allocate(size_t n, int flags = 0) + { + // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes. + EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum. + if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets... + { + EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away. + return mPool.allocate(); + } + + // Don't allow hashtable buckets to overflow in this case. + EASTL_ASSERT(n <= kBucketsSize); + return mpBucketBuffer; + } + + + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0) + { + // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes. + if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets... + { + EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away. + return mPool.allocate(alignment, offset); + } + + // Don't allow hashtable buckets to overflow in this case. + EASTL_ASSERT(n <= kBucketsSize); + return mpBucketBuffer; + } + + + void deallocate(void* p, size_t) + { + if(p != mpBucketBuffer) // If we are freeing a node and not buckets... + mPool.deallocate(p); + } + + + bool can_allocate() const + { + return mPool.can_allocate(); + } + + + void reset(void* pNodeBuffer) + { + // No need to modify mpBucketBuffer, as that is constant. + mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset); + } + + + const char* get_name() const + { + return mPool.get_name(); + } + + + void set_name(const char* pName) + { + mPool.set_name(pName); + } + + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + void set_overflow_allocator(const overflow_allocator_type& /*allocator*/) + { + // We don't have an overflow allocator. + EASTL_ASSERT(false); + } + + void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + // We don't have an overflow allocator. + } + + }; // fixed_hashtable_allocator + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const fixed_hashtable_allocator& a, + const fixed_hashtable_allocator& b) + { + return (&a == &b); // They are only equal if they are the same object. + } + + + template + inline bool operator!=(const fixed_hashtable_allocator& a, + const fixed_hashtable_allocator& b) + { + return (&a != &b); // They are only equal if they are the same object. + } + + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_vector_allocator + /////////////////////////////////////////////////////////////////////////// + + /// fixed_vector_allocator + /// + /// Template parameters: + /// nodeSize The size of individual objects. + /// nodeCount The number of objects the pool contains. + /// nodeAlignment The alignment of the objects to allocate. + /// nodeAlignmentOffset The alignment offset of the objects to allocate. + /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted. + /// OverflowAllocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap. + /// + template + class fixed_vector_allocator + { + public: + typedef fixed_vector_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset + }; + + public: + overflow_allocator_type mOverflowAllocator; + void* mpPoolBegin; // To consider: Find some way to make this data unnecessary, without increasing template proliferation. + + public: + // Disabled because it causes compile conflicts. + //fixed_vector_allocator(const char* pName = NULL) + //{ + // mOverflowAllocator.set_name(pName); + //} + + fixed_vector_allocator(void* pNodeBuffer = nullptr) + : mpPoolBegin(pNodeBuffer) + { + } + + fixed_vector_allocator(void* pNodeBuffer, const overflow_allocator_type& allocator) + : mOverflowAllocator(allocator), mpPoolBegin(pNodeBuffer) + { + } + + // Disabled because the default is sufficient. + //fixed_vector_allocator(const fixed_vector_allocator& x) + //{ + // mpPoolBegin = x.mpPoolBegin; + // mOverflowAllocator = x.mOverflowAllocator; + //} + + fixed_vector_allocator& operator=(const fixed_vector_allocator& x) + { + // We leave our mpPoolBegin variable alone. + + #if EASTL_ALLOCATOR_COPY_ENABLED + mOverflowAllocator = x.mOverflowAllocator; + #else + (void)x; + #endif + + return *this; + } + + void* allocate(size_t n, int flags = 0) + { + return mOverflowAllocator.allocate(n, flags); + } + + void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0) + { + return mOverflowAllocator.allocate(n, alignment, offset, flags); + } + + void deallocate(void* p, size_t n) + { + if(p != mpPoolBegin) + mOverflowAllocator.deallocate(p, n); // Can't do this to our own allocation. + } + + const char* get_name() const + { + return mOverflowAllocator.get_name(); + } + + void set_name(const char* pName) + { + mOverflowAllocator.set_name(pName); + } + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + return mOverflowAllocator; + } + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + return mOverflowAllocator; + } + + void set_overflow_allocator(const overflow_allocator_type& allocator) + { + mOverflowAllocator = allocator; + } + + void copy_overflow_allocator(const this_type& x) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + mOverflowAllocator = x.mOverflowAllocator; + } + + }; // fixed_vector_allocator + + + template + class fixed_vector_allocator + { + public: + typedef fixed_vector_allocator this_type; + typedef OverflowAllocator overflow_allocator_type; + + enum + { + kNodeSize = nodeSize, + kNodeCount = nodeCount, + kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T). + kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, + kNodeAlignment = nodeAlignment, + kNodeAlignmentOffset = nodeAlignmentOffset + }; + + // Disabled because it causes compile conflicts. + //fixed_vector_allocator(const char* = NULL) // This char* parameter is present so that this class can be like the other version. + //{ + //} + + fixed_vector_allocator() + { + } + + fixed_vector_allocator(void* /*pNodeBuffer*/) + { + } + + fixed_vector_allocator(void* /*pNodeBuffer*/, const overflow_allocator_type& /*allocator*/) // allocator is unused because bEnableOverflow is false in this specialization. + { + } + + /// fixed_vector_allocator + /// + // Disabled because there is nothing to do. No member data. And the default for this is sufficient. + // fixed_vector_allocator(const fixed_vector_allocator&) + // { + // } + + // Disabled because there is nothing to do. No member data. + //fixed_vector_allocator& operator=(const fixed_vector_allocator& x) + //{ + // return *this; + //} + + void* allocate(size_t /*n*/, int /*flags*/ = 0) + { + EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space. + return NULL; + } + + void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0) + { + EASTL_ASSERT(false); + return NULL; + } + + void deallocate(void* /*p*/, size_t /*n*/) + { + } + + const char* get_name() const + { + return EASTL_FIXED_POOL_DEFAULT_NAME; + } + + void set_name(const char* /*pName*/) + { + } + + const overflow_allocator_type& get_overflow_allocator() const EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + overflow_allocator_type& get_overflow_allocator() EA_NOEXCEPT + { + EASTL_ASSERT(false); + overflow_allocator_type* pNULL = NULL; + return *pNULL; // This is not pretty, but it should never execute. This is here only to allow this to compile. + } + + void set_overflow_allocator(const overflow_allocator_type& /*allocator*/) + { + // We don't have an overflow allocator. + EASTL_ASSERT(false); + } + + void copy_overflow_allocator(const this_type&) // This function exists so we can write generic code that works for allocators that do and don't have overflow allocators. + { + // We don't have an overflow allocator. + } + + }; // fixed_vector_allocator + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const fixed_vector_allocator& a, + const fixed_vector_allocator& b) + { + return (&a == &b); // They are only equal if they are the same object. + } + + + template + inline bool operator!=(const fixed_vector_allocator& a, + const fixed_vector_allocator& b) + { + return (&a != &b); // They are only equal if they are the same object. + } + + + + + + /////////////////////////////////////////////////////////////////////////// + // fixed_swap + /////////////////////////////////////////////////////////////////////////// + + /// fixed_swap + /// + /// This function implements a swap suitable for fixed containers. + /// This is an issue because the size of fixed containers can be very + /// large, due to their having the container buffer within themselves. + /// Note that we are referring to sizeof(container) and not the total + /// sum of memory allocated by the container from the heap. + /// + /// + /// This implementation switches at compile time whether or not the + /// temporary is allocated on the stack or the heap as some compilers + /// will allocate the (large) stack frame regardless of which code + /// path is picked. + template + class fixed_swap_impl + { + public: + static void swap(Container& a, Container& b); + }; + + + template + class fixed_swap_impl + { + public: + static void swap(Container& a, Container& b) + { + Container temp(EASTL_MOVE(a)); // Can't use global swap because that could + a = EASTL_MOVE(b); // itself call this swap function in return. + b = EASTL_MOVE(temp); + } + }; + + + template + class fixed_swap_impl + { + public: + static void swap(Container& a, Container& b) + { + EASTLAllocatorType allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME); + void* const pMemory = allocator.allocate(sizeof(a)); + + if(pMemory) + { + Container* pTemp = ::new(pMemory) Container(EASTL_MOVE(a)); + a = EASTL_MOVE(b); + b = EASTL_MOVE(*pTemp); + + pTemp->~Container(); + allocator.deallocate(pMemory, sizeof(a)); + } + } + }; + + + template + void fixed_swap(Container& a, Container& b) + { + return fixed_swap_impl= EASTL_MAX_STACK_USAGE>::swap(a, b); + } + + + +} // namespace eastl + + +#if defined(_MSC_VER) + #pragma warning(pop) +#endif + + +#endif // Header include guard + diff --git a/include/EASTL/internal/function.h b/include/EASTL/internal/function.h new file mode 100644 index 0000000..6e857f0 --- /dev/null +++ b/include/EASTL/internal/function.h @@ -0,0 +1,161 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FUNCTION_H +#define EASTL_FUNCTION_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include + +namespace eastl +{ + + /// EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE + /// + /// Defines the size of the SSO buffer which is used to hold the specified capture state of the callable. + /// + #ifndef EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE + #define EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE (2 * sizeof(void*)) + #endif + + static_assert(EASTL_FUNCTION_DEFAULT_CAPTURE_SSO_SIZE >= sizeof(void*), "functor storage must be able to hold at least a pointer!"); + + template + class function; + + template + class function : public internal::function_detail + { + private: + using Base = internal::function_detail; + public: + using typename Base::result_type; + + function() EA_NOEXCEPT = default; + function(std::nullptr_t p) EA_NOEXCEPT + : Base(p) + { + } + + function(const function& other) + : Base(other) + { + } + + function(function&& other) + : Base(eastl::move(other)) + { + } + + template + function(Functor functor) + : Base(eastl::move(functor)) + { + } + + ~function() EA_NOEXCEPT = default; + + function& operator=(const function& other) + { + Base::operator=(other); + return *this; + } + + function& operator=(function&& other) + { + Base::operator=(eastl::move(other)); + return *this; + } + + function& operator=(std::nullptr_t p) EA_NOEXCEPT + { + Base::operator=(p); + return *this; + } + + template + function& operator=(Functor&& functor) + { + Base::operator=(eastl::forward(functor)); + return *this; + } + + template + function& operator=(eastl::reference_wrapper f) EA_NOEXCEPT + { + Base::operator=(f); + return *this; + } + + void swap(function& other) EA_NOEXCEPT + { + Base::swap(other); + } + + explicit operator bool() const EA_NOEXCEPT + { + return Base::operator bool(); + } + + R operator ()(Args... args) const + { + return Base::operator ()(eastl::forward(args)...); + } + + #if EASTL_RTTI_ENABLED + const std::type_info& target_type() const EA_NOEXCEPT + { + return Base::target_type(); + } + + template + Functor* target() EA_NOEXCEPT + { + return Base::target(); + } + + template + const Functor* target() const EA_NOEXCEPT + { + return Base::target(); + } + #endif // EASTL_RTTI_ENABLED + }; + + template + bool operator==(const function& f, std::nullptr_t) EA_NOEXCEPT + { + return !f; + } + + template + bool operator==(std::nullptr_t, const function& f) EA_NOEXCEPT + { + return !f; + } + + template + bool operator!=(const function& f, std::nullptr_t) EA_NOEXCEPT + { + return !!f; + } + + template + bool operator!=(std::nullptr_t, const function& f) EA_NOEXCEPT + { + return !!f; + } + + template + void swap(function& lhs, function& rhs) + { + lhs.swap(rhs); + } + +} // namespace eastl + +#endif // EASTL_FUNCTION_H diff --git a/include/EASTL/internal/function_detail.h b/include/EASTL/internal/function_detail.h new file mode 100644 index 0000000..17f281d --- /dev/null +++ b/include/EASTL/internal/function_detail.h @@ -0,0 +1,584 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_FUNCTION_DETAIL_H +#define EASTL_FUNCTION_DETAIL_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#if EASTL_RTTI_ENABLED + #include +#endif + +#if EASTL_EXCEPTIONS_ENABLED + EA_DISABLE_ALL_VC_WARNINGS() + #include + #include + EA_RESTORE_ALL_VC_WARNINGS() +#endif + +namespace eastl +{ + #if EASTL_EXCEPTIONS_ENABLED + class bad_function_call : public std::exception + { + public: + bad_function_call() EA_NOEXCEPT = default; + + const char* what() const EA_NOEXCEPT EA_OVERRIDE + { + return "bad function_detail call"; + } + }; + #endif + + namespace internal + { + class unused_class {}; + + union functor_storage_alignment + { + void (*unused_func_ptr)(void); + void (unused_class::*unused_func_mem_ptr)(void); + void* unused_ptr; + }; + + template + struct functor_storage + { + static_assert(SIZE_IN_BYTES >= 0, "local buffer storage cannot have a negative size!"); + template + Ret& GetStorageTypeRef() const + { + return *reinterpret_cast(const_cast(&storage[0])); + } + + union + { + functor_storage_alignment align; + char storage[SIZE_IN_BYTES]; + }; + }; + + template <> + struct functor_storage<0> + { + template + Ret& GetStorageTypeRef() const + { + return *reinterpret_cast(const_cast(&storage[0])); + } + + union + { + functor_storage_alignment align; + char storage[sizeof(functor_storage_alignment)]; + }; + }; + + template + struct is_functor_inplace_allocatable + { + static constexpr bool value = + sizeof(Functor) <= sizeof(functor_storage) && + (eastl::alignment_of_v> % eastl::alignment_of_v) == 0; + }; + + + /// function_base_detail + /// + template + class function_base_detail + { + public: + using FunctorStorageType = functor_storage; + FunctorStorageType mStorage; + + enum ManagerOperations : int + { + MGROPS_DESTRUCT_FUNCTOR = 0, + MGROPS_COPY_FUNCTOR = 1, + MGROPS_MOVE_FUNCTOR = 2, + #if EASTL_RTTI_ENABLED + MGROPS_GET_TYPE_INFO = 3, + MGROPS_GET_FUNC_PTR = 4, + #endif + }; + + // Functor can be allocated inplace + template + class function_manager_base + { + public: + + static Functor* GetFunctorPtr(const FunctorStorageType& storage) EA_NOEXCEPT + { + return &(storage.template GetStorageTypeRef()); + } + + template + static void CreateFunctor(FunctorStorageType& storage, T&& functor) + { + ::new (GetFunctorPtr(storage)) Functor(eastl::forward(functor)); + } + + static void DestructFunctor(FunctorStorageType& storage) + { + GetFunctorPtr(storage)->~Functor(); + } + + static void CopyFunctor(FunctorStorageType& to, const FunctorStorageType& from) + { + ::new (GetFunctorPtr(to)) Functor(*GetFunctorPtr(from)); + } + + static void MoveFunctor(FunctorStorageType& to, FunctorStorageType& from) EA_NOEXCEPT + { + ::new (GetFunctorPtr(to)) Functor(eastl::move(*GetFunctorPtr(from))); + } + + static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT + { + switch (ops) + { + case MGROPS_DESTRUCT_FUNCTOR: + { + DestructFunctor(*static_cast(to)); + } + break; + case MGROPS_COPY_FUNCTOR: + { + CopyFunctor(*static_cast(to), + *static_cast(from)); + } + break; + case MGROPS_MOVE_FUNCTOR: + { + MoveFunctor(*static_cast(to), *static_cast(from)); + DestructFunctor(*static_cast(from)); + } + break; + default: + break; + } + return nullptr; + } + }; + + // Functor is allocated on the heap + template + class function_manager_base::value>::type> + { + public: + static Functor* GetFunctorPtr(const FunctorStorageType& storage) EA_NOEXCEPT + { + return storage.template GetStorageTypeRef(); + } + + static Functor*& GetFunctorPtrRef(const FunctorStorageType& storage) EA_NOEXCEPT + { + return storage.template GetStorageTypeRef(); + } + + template + static void CreateFunctor(FunctorStorageType& storage, T&& functor) + { + auto& allocator = *EASTLAllocatorDefault(); + Functor* func = static_cast(allocator.allocate(sizeof(Functor), alignof(Functor), 0)); + + #if EASTL_EXCEPTIONS_ENABLED + if (!func) + { + throw std::bad_alloc(); + } + #else + EASTL_ASSERT_MSG(func != nullptr, "Allocation failed!"); + #endif + + ::new (static_cast(func)) Functor(eastl::forward(functor)); + GetFunctorPtrRef(storage) = func; + } + + static void DestructFunctor(FunctorStorageType& storage) + { + Functor* func = GetFunctorPtr(storage); + if (func) + { + auto& allocator = *EASTLAllocatorDefault(); + func->~Functor(); + allocator.deallocate(static_cast(func), sizeof(Functor)); + } + } + + static void CopyFunctor(FunctorStorageType& to, const FunctorStorageType& from) + { + auto& allocator = *EASTLAllocatorDefault(); + Functor* func = static_cast(allocator.allocate(sizeof(Functor), alignof(Functor), 0)); + #if EASTL_EXCEPTIONS_ENABLED + if (!func) + { + throw std::bad_alloc(); + } + #else + EASTL_ASSERT_MSG(func != nullptr, "Allocation failed!"); + #endif + ::new (static_cast(func)) Functor(*GetFunctorPtr(from)); + GetFunctorPtrRef(to) = func; + } + + static void MoveFunctor(FunctorStorageType& to, FunctorStorageType& from) EA_NOEXCEPT + { + Functor* func = GetFunctorPtr(from); + GetFunctorPtrRef(to) = func; + GetFunctorPtrRef(from) = nullptr; + } + + static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT + { + switch (ops) + { + case MGROPS_DESTRUCT_FUNCTOR: + { + DestructFunctor(*static_cast(to)); + } + break; + case MGROPS_COPY_FUNCTOR: + { + CopyFunctor(*static_cast(to), + *static_cast(from)); + } + break; + case MGROPS_MOVE_FUNCTOR: + { + MoveFunctor(*static_cast(to), *static_cast(from)); + // Moved ptr, no need to destruct ourselves + } + break; + default: + break; + } + return nullptr; + } + }; + + template + class function_manager final : public function_manager_base + { + public: + using Base = function_manager_base; + + #if EASTL_RTTI_ENABLED + static void* GetTypeInfo() EA_NOEXCEPT + { + return reinterpret_cast(const_cast(&typeid(Functor))); + } + + static void* Manager(void* to, void* from, typename function_base_detail::ManagerOperations ops) EA_NOEXCEPT + { + switch (ops) + { + case MGROPS_GET_TYPE_INFO: + { + return GetTypeInfo(); + } + break; + case MGROPS_GET_FUNC_PTR: + { + return static_cast(Base::GetFunctorPtr(*static_cast(to))); + } + break; + default: + { + return Base::Manager(to, from, ops); + } + break; + } + } + #endif // EASTL_RTTI_ENABLED + + static R Invoker(const FunctorStorageType& functor, Args... args) + { + return eastl::invoke(*Base::GetFunctorPtr(functor), eastl::forward(args)...); + } + }; + + function_base_detail() EA_NOEXCEPT = default; + ~function_base_detail() EA_NOEXCEPT = default; + }; + + #define EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, BASE, MYSELF) \ + typename eastl::enable_if_t && \ + !eastl::is_base_of_v> && \ + !eastl::is_same_v, MYSELF>> + + #define EASTL_INTERNAL_FUNCTION_DETAIL_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, MYSELF) \ + EASTL_INTERNAL_FUNCTION_VALID_FUNCTION_ARGS(FUNCTOR, RET, ARGS, MYSELF, MYSELF) + + + /// function_detail + /// + template + class function_detail; + + template + class function_detail : public function_base_detail + { + public: + using result_type = R; + + protected: + using Base = function_base_detail; + using FunctorStorageType = typename function_base_detail::FunctorStorageType; + using Base::mStorage; + + public: + function_detail() EA_NOEXCEPT = default; + function_detail(std::nullptr_t) EA_NOEXCEPT {} + + function_detail(const function_detail& other) + { + if (this != &other) + { + Copy(other); + } + } + + function_detail(function_detail&& other) + { + if (this != &other) + { + Move(eastl::move(other)); + } + } + + template + function_detail(Functor functor) + { + CreateForwardFunctor(eastl::move(functor)); + } + + ~function_detail() EA_NOEXCEPT + { + Destroy(); + } + + function_detail& operator=(const function_detail& other) + { + if (this != &other) + { + Destroy(); + Copy(other); + } + + return *this; + } + + function_detail& operator=(function_detail&& other) + { + if(this != &other) + { + Destroy(); + Move(eastl::move(other)); + } + + return *this; + } + + function_detail& operator=(std::nullptr_t) EA_NOEXCEPT + { + Destroy(); + mMgrFuncPtr = nullptr; + mInvokeFuncPtr = nullptr; + + return *this; + } + + template + function_detail& operator=(Functor&& functor) + { + Destroy(); + CreateForwardFunctor(eastl::forward(functor)); + return *this; + } + + template + function_detail& operator=(eastl::reference_wrapper f) EA_NOEXCEPT + { + Destroy(); + CreateForwardFunctor(f); + return *this; + } + + void swap(function_detail& other) EA_NOEXCEPT + { + if(this == &other) + return; + + FunctorStorageType tempStorage; + if (other.HaveManager()) + { + (void)(*other.mMgrFuncPtr)(static_cast(&tempStorage), static_cast(&other.mStorage), + Base::ManagerOperations::MGROPS_MOVE_FUNCTOR); + } + + if (HaveManager()) + { + (void)(*mMgrFuncPtr)(static_cast(&other.mStorage), static_cast(&mStorage), + Base::ManagerOperations::MGROPS_MOVE_FUNCTOR); + } + + if (other.HaveManager()) + { + (void)(*other.mMgrFuncPtr)(static_cast(&mStorage), static_cast(&tempStorage), + Base::ManagerOperations::MGROPS_MOVE_FUNCTOR); + } + + eastl::swap(mMgrFuncPtr, other.mMgrFuncPtr); + eastl::swap(mInvokeFuncPtr, other.mInvokeFuncPtr); + } + + explicit operator bool() const EA_NOEXCEPT + { + return HaveManager(); + } + + R operator ()(Args... args) const + { + #if EASTL_EXCEPTIONS_ENABLED + if (!HaveManager()) + { + throw eastl::bad_function_call(); + } + #else + EASTL_ASSERT_MSG(HaveManager(), "function_detail call on an empty function_detail"); + #endif + return (*mInvokeFuncPtr)(mStorage, eastl::forward(args)...); + } + + #if EASTL_RTTI_ENABLED + const std::type_info& target_type() const EA_NOEXCEPT + { + if (HaveManager()) + { + void* ret = (*mMgrFuncPtr)(nullptr, nullptr, Base::ManagerOperations::MGROPS_GET_TYPE_INFO); + return *(static_cast(ret)); + } + return typeid(void); + } + + template + Functor* target() EA_NOEXCEPT + { + if (HaveManager() && target_type() == typeid(Functor)) + { + void* ret = (*mMgrFuncPtr)(static_cast(&mStorage), nullptr, + Base::ManagerOperations::MGROPS_GET_FUNC_PTR); + return ret ? static_cast(ret) : nullptr; + } + return nullptr; + } + + template + const Functor* target() const EA_NOEXCEPT + { + if (HaveManager() && target_type() == typeid(Functor)) + { + void* ret = (*mMgrFuncPtr)(static_cast(&mStorage), nullptr, + Base::ManagerOperations::MGROPS_GET_FUNC_PTR); + return ret ? static_cast(ret) : nullptr; + } + return nullptr; + } + #endif // EASTL_RTTI_ENABLED + + private: + bool HaveManager() const EA_NOEXCEPT + { + return (mMgrFuncPtr != nullptr); + } + + void Destroy() EA_NOEXCEPT + { + if (HaveManager()) + { + (void)(*mMgrFuncPtr)(static_cast(&mStorage), nullptr, + Base::ManagerOperations::MGROPS_DESTRUCT_FUNCTOR); + } + } + + void Copy(const function_detail& other) + { + if (other.HaveManager()) + { + (void)(*other.mMgrFuncPtr)(static_cast(&mStorage), + const_cast(static_cast(&other.mStorage)), + Base::ManagerOperations::MGROPS_COPY_FUNCTOR); + } + + mMgrFuncPtr = other.mMgrFuncPtr; + mInvokeFuncPtr = other.mInvokeFuncPtr; + } + + void Move(function_detail&& other) + { + if (other.HaveManager()) + { + (void)(*other.mMgrFuncPtr)(static_cast(&mStorage), static_cast(&other.mStorage), + Base::ManagerOperations::MGROPS_MOVE_FUNCTOR); + } + + mMgrFuncPtr = other.mMgrFuncPtr; + mInvokeFuncPtr = other.mInvokeFuncPtr; + other.mMgrFuncPtr = nullptr; + other.mInvokeFuncPtr = nullptr; + } + + template + void CreateForwardFunctor(Functor&& functor) + { + using DecayedFunctorType = typename eastl::decay::type; + using FunctionManagerType = typename Base::template function_manager; + + if (internal::is_null(functor)) + { + mMgrFuncPtr = nullptr; + mInvokeFuncPtr = nullptr; + } + else + { + mMgrFuncPtr = &FunctionManagerType::Manager; + mInvokeFuncPtr = &FunctionManagerType::Invoker; + FunctionManagerType::CreateFunctor(mStorage, eastl::forward(functor)); + } + } + + private: + typedef void* (*ManagerFuncPtr)(void*, void*, typename Base::ManagerOperations); + typedef R (*InvokeFuncPtr)(const FunctorStorageType&, Args...); + + ManagerFuncPtr mMgrFuncPtr = nullptr; + InvokeFuncPtr mInvokeFuncPtr = nullptr; + }; + + } // namespace internal + +} // namespace eastl + +#endif // EASTL_FUNCTION_DETAIL_H diff --git a/include/EASTL/internal/function_help.h b/include/EASTL/internal/function_help.h new file mode 100644 index 0000000..04481d3 --- /dev/null +++ b/include/EASTL/internal/function_help.h @@ -0,0 +1,51 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_INTERNAL_FUNCTION_HELP_H +#define EASTL_INTERNAL_FUNCTION_HELP_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include + +namespace eastl +{ + namespace internal + { + + ////////////////////////////////////////////////////////////////////// + // is_null + // + template + bool is_null(const T&) + { + return false; + } + + template + bool is_null(Result (*const& function_pointer)(Arguments...)) + { + return function_pointer == nullptr; + } + + template + bool is_null(Result (Class::*const& function_pointer)(Arguments...)) + { + return function_pointer == nullptr; + } + + template + bool is_null(Result (Class::*const& function_pointer)(Arguments...) const) + { + return function_pointer == nullptr; + } + + } // namespace internal +} // namespace eastl + +#endif // Header include guard + diff --git a/include/EASTL/internal/functional_base.h b/include/EASTL/internal/functional_base.h new file mode 100644 index 0000000..669e5fc --- /dev/null +++ b/include/EASTL/internal/functional_base.h @@ -0,0 +1,389 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_FUNCTIONAL_BASE_H +#define EASTL_INTERNAL_FUNCTIONAL_BASE_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include + +namespace eastl +{ + // foward declaration for swap + template + inline void swap(T& a, T& b) + EA_NOEXCEPT_IF(eastl::is_nothrow_move_constructible::value&& eastl::is_nothrow_move_assignable::value); + + + /// invoke + /// + /// invoke is a generalized function-call operator which works on function pointers, member function + /// pointers, callable objects and member pointers. + /// + /// For (member/non-member) function pointers and callable objects, it returns the result of calling + /// the function/object with the specified arguments. For member data pointers, it simply returns + /// the member. + /// + /// Note that there are also reference_wrapper specializations of invoke, which need to be defined + /// later since reference_wrapper uses invoke in its implementation. Those are defined immediately + /// after the definition of reference_wrapper. + /// + /// http://en.cppreference.com/w/cpp/utility/functional/invoke + /// + template + auto invoke_impl(R C::*func, T&& obj, Args&&... args) -> + typename enable_if>::value, + decltype((eastl::forward(obj).*func)(eastl::forward(args)...))>::type + { + return (eastl::forward(obj).*func)(eastl::forward(args)...); + } + + template + auto invoke_impl(F&& func, Args&&... args) -> decltype(eastl::forward(func)(eastl::forward(args)...)) + { + return eastl::forward(func)(eastl::forward(args)...); + } + + + template + auto invoke_impl(R C::*func, T&& obj, Args&&... args) -> decltype(((*eastl::forward(obj)).*func)(eastl::forward(args)...)) + { + return ((*eastl::forward(obj)).*func)(eastl::forward(args)...); + } + + template + auto invoke_impl(M C::*member, T&& obj) -> + typename enable_if< + is_base_of>::value, + decltype(obj.*member) + >::type + { + return obj.*member; + } + + template + auto invoke_impl(M C::*member, T&& obj) -> decltype((*eastl::forward(obj)).*member) + { + return (*eastl::forward(obj)).*member; + } + + template + inline decltype(auto) invoke(F&& func, Args&&... args) + { + return invoke_impl(eastl::forward(func), eastl::forward(args)...); + } + + template + struct invoke_result_impl { + }; + + template + struct invoke_result_impl>(), eastl::declval()...))>, Args...> + { + typedef decltype(invoke_impl(eastl::declval>(), eastl::declval()...)) type; + }; + + template + struct invoke_result : public invoke_result_impl {}; + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + using invoke_result_t = typename invoke_result::type; + #endif + + template + struct is_invocable_impl : public eastl::false_type {}; + + template + struct is_invocable_impl::type>, Args...> : public eastl::true_type {}; + + template + struct is_invocable : public is_invocable_impl {}; + + template + struct is_invocable_r_impl : public eastl::false_type {}; + + template + struct is_invocable_r_impl::type>, Args...> + : public is_convertible::type, R> {}; + + template + struct is_invocable_r : public is_invocable_r_impl {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_v = is_invocable::value; + + template + EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_invocable_r_v = is_invocable_r::value; + #endif + + /// allocator_arg_t + /// + /// allocator_arg_t is an empty class type used to disambiguate the overloads of + /// constructors and member functions of allocator-aware objects, including tuple, + /// function, promise, and packaged_task. + /// http://en.cppreference.com/w/cpp/memory/allocator_arg_t + /// + struct allocator_arg_t + {}; + + + /// allocator_arg + /// + /// allocator_arg is a constant of type allocator_arg_t used to disambiguate, at call site, + /// the overloads of the constructors and member functions of allocator-aware objects, + /// such as tuple, function, promise, and packaged_task. + /// http://en.cppreference.com/w/cpp/memory/allocator_arg + /// + #if !defined(EA_COMPILER_NO_CONSTEXPR) + EA_CONSTEXPR allocator_arg_t allocator_arg = allocator_arg_t(); + #endif + + + template + struct unary_function + { + typedef Argument argument_type; + typedef Result result_type; + }; + + + template + struct binary_function + { + typedef Argument1 first_argument_type; + typedef Argument2 second_argument_type; + typedef Result result_type; + }; + + + /// less + template + struct less : public binary_function + { + EA_CPP14_CONSTEXPR bool operator()(const T& a, const T& b) const + { return a < b; } + }; + + // http://en.cppreference.com/w/cpp/utility/functional/less_void + template <> + struct less + { + template + EA_CPP14_CONSTEXPR auto operator()(A&& a, B&& b) const + -> decltype(eastl::forward(a) < eastl::forward(b)) + { return eastl::forward(a) < eastl::forward(b); } + }; + + + /// reference_wrapper + template + class reference_wrapper + { + public: + typedef T type; + + reference_wrapper(T&) EA_NOEXCEPT; + reference_wrapper(T&&) = delete; + reference_wrapper(const reference_wrapper& x) EA_NOEXCEPT; + + reference_wrapper& operator=(const reference_wrapper& x) EA_NOEXCEPT; + + operator T& () const EA_NOEXCEPT; + T& get() const EA_NOEXCEPT; + + template + typename eastl::result_of::type operator() (ArgTypes&&...) const; + + private: + T* val; + }; + + template + reference_wrapper::reference_wrapper(T &v) EA_NOEXCEPT + : val(addressof(v)) + {} + + template + reference_wrapper::reference_wrapper(const reference_wrapper& other) EA_NOEXCEPT + : val(other.val) + {} + + template + reference_wrapper& reference_wrapper::operator=(const reference_wrapper& other) EA_NOEXCEPT + { + val = other.val; + return *this; + } + + template + reference_wrapper::operator T&() const EA_NOEXCEPT + { + return *val; + } + + template + T& reference_wrapper::get() const EA_NOEXCEPT + { + return *val; + } + + template + template + typename eastl::result_of::type reference_wrapper::operator() (ArgTypes&&... args) const + { + return eastl::invoke(*val, eastl::forward(args)...); + } + + // reference_wrapper-specific utilties + template + reference_wrapper ref(T& t) EA_NOEXCEPT + { + return eastl::reference_wrapper(t); + } + + template + void ref(const T&&) = delete; + + template + reference_wrapper ref(reference_wrappert) EA_NOEXCEPT + { + return eastl::ref(t.get()); + } + + template + reference_wrapper cref(const T& t) EA_NOEXCEPT + { + return eastl::reference_wrapper(t); + } + + template + void cref(const T&&) = delete; + + template + reference_wrapper cref(reference_wrapper t) EA_NOEXCEPT + { + return eastl::cref(t.get()); + } + + + // reference_wrapper-specific type traits + template + struct is_reference_wrapper_helper + : public eastl::false_type {}; + + template + struct is_reference_wrapper_helper > + : public eastl::true_type {}; + + template + struct is_reference_wrapper + : public eastl::is_reference_wrapper_helper::type> {}; + + + // Helper which adds a reference to a type when given a reference_wrapper of that type. + template + struct remove_reference_wrapper + { typedef T type; }; + + template + struct remove_reference_wrapper< eastl::reference_wrapper > + { typedef T& type; }; + + template + struct remove_reference_wrapper< const eastl::reference_wrapper > + { typedef T& type; }; + + // reference_wrapper specializations of invoke + // These have to come after reference_wrapper is defined, but reference_wrapper needs to have a + // definition of invoke, so these specializations need to come after everything else has been defined. + template + auto invoke_impl(R (C::*func)(Args...), T&& obj, Args&&... args) -> + typename enable_if::type>::value, + decltype((obj.get().*func)(eastl::forward(args)...))>::type + { + return (obj.get().*func)(eastl::forward(args)...); + } + + template + auto invoke_impl(M(C::*member), T&& obj) -> + typename enable_if::type>::value, + decltype(obj.get().*member)>::type + { + return obj.get().*member; + } + + + /////////////////////////////////////////////////////////////////////// + // bind + /////////////////////////////////////////////////////////////////////// + + /// bind1st + /// + template + class binder1st : public unary_function + { + protected: + typename Operation::first_argument_type value; + Operation op; + + public: + binder1st(const Operation& x, const typename Operation::first_argument_type& y) + : value(y), op(x) { } + + typename Operation::result_type operator()(const typename Operation::second_argument_type& x) const + { return op(value, x); } + + typename Operation::result_type operator()(typename Operation::second_argument_type& x) const + { return op(value, x); } + }; + + + template + inline binder1st bind1st(const Operation& op, const T& x) + { + typedef typename Operation::first_argument_type value; + return binder1st(op, value(x)); + } + + + /// bind2nd + /// + template + class binder2nd : public unary_function + { + protected: + Operation op; + typename Operation::second_argument_type value; + + public: + binder2nd(const Operation& x, const typename Operation::second_argument_type& y) + : op(x), value(y) { } + + typename Operation::result_type operator()(const typename Operation::first_argument_type& x) const + { return op(x, value); } + + typename Operation::result_type operator()(typename Operation::first_argument_type& x) const + { return op(x, value); } + }; + + + template + inline binder2nd bind2nd(const Operation& op, const T& x) + { + typedef typename Operation::second_argument_type value; + return binder2nd(op, value(x)); + } + +} // namespace eastl + +#endif // Header include guard diff --git a/include/EASTL/internal/generic_iterator.h b/include/EASTL/internal/generic_iterator.h new file mode 100644 index 0000000..8aa630f --- /dev/null +++ b/include/EASTL/internal/generic_iterator.h @@ -0,0 +1,229 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// Implements a generic iterator from a given iteratable type, such as a pointer. +// We cannot put this file into our own iterator.h file because we need to +// still be able to use this file when we have our iterator.h disabled. +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_GENERIC_ITERATOR_H +#define EASTL_INTERNAL_GENERIC_ITERATOR_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include + + +#ifdef _MSC_VER + #pragma warning(push) // VC++ generates a bogus warning that you cannot code away. + #pragma warning(disable: 4619) // There is no warning number 'number'. + #pragma warning(disable: 4217) // Member template functions cannot be used for copy-assignment or copy-construction. +#endif + + +namespace eastl +{ + + /// generic_iterator + /// + /// Converts something which can be iterated into a formal iterator. + /// While this class' primary purpose is to allow the conversion of + /// a pointer to an iterator, you can convert anything else to an + /// iterator by defining an iterator_traits<> specialization for that + /// object type. See EASTL iterator.h for this. + /// + /// Example usage: + /// typedef generic_iterator IntArrayIterator; + /// typedef generic_iterator IntArrayIteratorOther; + /// + template + class generic_iterator + { + protected: + Iterator mIterator; + + public: + typedef typename eastl::iterator_traits::iterator_category iterator_category; + typedef typename eastl::iterator_traits::value_type value_type; + typedef typename eastl::iterator_traits::difference_type difference_type; + typedef typename eastl::iterator_traits::reference reference; + typedef typename eastl::iterator_traits::pointer pointer; + typedef Iterator iterator_type; + typedef iterator_type wrapped_iterator_type; // This is not in the C++ Standard; it's used by use to identify it as a wrapping iterator type. + typedef Container container_type; + typedef generic_iterator this_type; + + generic_iterator() + : mIterator(iterator_type()) { } + + explicit generic_iterator(const iterator_type& x) + : mIterator(x) { } + + this_type& operator=(const iterator_type& x) + { mIterator = x; return *this; } + + template + generic_iterator(const generic_iterator& x) + : mIterator(x.base()) { } + + reference operator*() const + { return *mIterator; } + + pointer operator->() const + { return mIterator; } + + this_type& operator++() + { ++mIterator; return *this; } + + this_type operator++(int) + { return this_type(mIterator++); } + + this_type& operator--() + { --mIterator; return *this; } + + this_type operator--(int) + { return this_type(mIterator--); } + + reference operator[](const difference_type& n) const + { return mIterator[n]; } + + this_type& operator+=(const difference_type& n) + { mIterator += n; return *this; } + + this_type operator+(const difference_type& n) const + { return this_type(mIterator + n); } + + this_type& operator-=(const difference_type& n) + { mIterator -= n; return *this; } + + this_type operator-(const difference_type& n) const + { return this_type(mIterator - n); } + + const iterator_type& base() const + { return mIterator; } + + }; // class generic_iterator + + + template + inline bool operator==(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() == rhs.base(); } + + template + inline bool operator==(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() == rhs.base(); } + + template + inline bool operator!=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() != rhs.base(); } + + template + inline bool operator!=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() != rhs.base(); } + + template + inline bool operator<(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() < rhs.base(); } + + template + inline bool operator<(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() < rhs.base(); } + + template + inline bool operator>(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() > rhs.base(); } + + template + inline bool operator>(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() > rhs.base(); } + + template + inline bool operator<=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() <= rhs.base(); } + + template + inline bool operator<=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() <= rhs.base(); } + + template + inline bool operator>=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() >= rhs.base(); } + + template + inline bool operator>=(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() >= rhs.base(); } + + template + inline typename generic_iterator::difference_type + operator-(const generic_iterator& lhs, const generic_iterator& rhs) + { return lhs.base() - rhs.base(); } + + template + inline generic_iterator + operator+(typename generic_iterator::difference_type n, const generic_iterator& x) + { return generic_iterator(x.base() + n); } + + + + /// is_generic_iterator + /// + /// Tells if an iterator is one of these generic_iterators. This is useful if you want to + /// write code that uses miscellaneous iterators but wants to tell if they are generic_iterators. + /// A primary reason to do so is that you can get at the pointer within the generic_iterator. + /// + template + struct is_generic_iterator : public false_type { }; + + template + struct is_generic_iterator > : public true_type { }; + + + /// unwrap_generic_iterator + /// + /// Returns Iterator::get_base() if it's a generic_iterator, else returns Iterator as-is. + /// + /// Example usage: + /// vector intVector; + /// eastl::generic_iterator::iterator> genericIterator(intVector.begin()); + /// vector::iterator it = unwrap_generic_iterator(genericIterator); + /// + template + inline typename eastl::is_iterator_wrapper_helper::value>::iterator_type unwrap_generic_iterator(Iterator it) + { return eastl::is_iterator_wrapper_helper::value>::get_base(it); } + + +} // namespace eastl + + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + + + + + + + + + + + + diff --git a/include/EASTL/internal/hashtable.h b/include/EASTL/internal/hashtable.h new file mode 100644 index 0000000..d45c432 --- /dev/null +++ b/include/EASTL/internal/hashtable.h @@ -0,0 +1,3235 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a hashtable, much like the C++11 unordered_set/unordered_map. +// proposed classes. +// The primary distinctions between this hashtable and C++11 unordered containers are: +// - hashtable is savvy to an environment that doesn't have exception handling, +// as is sometimes the case with console or embedded environments. +// - hashtable is slightly more space-efficient than a conventional std hashtable +// implementation on platforms with 64 bit size_t. This is +// because std STL uses size_t (64 bits) in data structures whereby 32 bits +// of data would be fine. +// - hashtable can contain objects with alignment requirements. TR1 hash tables +// cannot do so without a bit of tedious non-portable effort. +// - hashtable supports debug memory naming natively. +// - hashtable provides a find function that lets you specify a type that is +// different from the hash table key type. This is particularly useful for +// the storing of string objects but finding them by char pointers. +// - hashtable provides a lower level insert function which lets the caller +// specify the hash code and optionally the node instance. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_HASHTABLE_H +#define EASTL_INTERNAL_HASHTABLE_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() + #include + #include +EA_RESTORE_ALL_VC_WARNINGS() + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable: 4512) // 'class' : assignment operator could not be generated. + #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc + #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +#endif + + +namespace eastl +{ + + /// EASTL_HASHTABLE_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_HASHTABLE_DEFAULT_NAME + #define EASTL_HASHTABLE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hashtable" // Unless the user overrides something, this is "EASTL hashtable". + #endif + + + /// EASTL_HASHTABLE_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_HASHTABLE_DEFAULT_ALLOCATOR + #define EASTL_HASHTABLE_DEFAULT_ALLOCATOR allocator_type(EASTL_HASHTABLE_DEFAULT_NAME) + #endif + + + /// kHashtableAllocFlagBuckets + /// Flag to allocator which indicates that we are allocating buckets and not nodes. + enum { kHashtableAllocFlagBuckets = 0x00400000 }; + + + /// gpEmptyBucketArray + /// + /// A shared representation of an empty hash table. This is present so that + /// a new empty hashtable allocates no memory. It has two entries, one for + /// the first lone empty (NULL) bucket, and one for the non-NULL trailing sentinel. + /// + extern EASTL_API void* gpEmptyBucketArray[2]; + + + /// EASTL_MACRO_SWAP + /// + /// Use EASTL_MACRO_SWAP because GCC (at least v4.6-4.8) has a bug where it fails to compile eastl::swap(mpBucketArray, x.mpBucketArray). + /// + #define EASTL_MACRO_SWAP(Type, a, b) \ + { Type temp = a; a = b; b = temp; } + + + /// hash_node + /// + /// A hash_node stores an element in a hash table, much like a + /// linked list node stores an element in a linked list. + /// A hash_node additionally can, via template parameter, + /// store a hash code in the node to speed up hash calculations + /// and comparisons in some cases. + /// + template + struct hash_node; + + EA_DISABLE_VC_WARNING(4625 4626) // "copy constructor / assignment operator could not be generated because a base class copy constructor is inaccessible or deleted" + #ifdef EA_COMPILER_MSVC_2015 + EA_DISABLE_VC_WARNING(5026) // disable warning: "move constructor was implicitly defined as deleted" + #endif + template + struct hash_node + { + hash_node() = default; + hash_node(const hash_node&) = default; + hash_node(hash_node&&) = default; + + Value mValue; + hash_node* mpNext; + eastl_size_t mnHashCode; // See config.h for the definition of eastl_size_t, which defaults to size_t. + } EASTL_MAY_ALIAS; + + template + struct hash_node + { + hash_node() = default; + hash_node(const hash_node&) = default; + hash_node(hash_node&&) = default; + + Value mValue; + hash_node* mpNext; + } EASTL_MAY_ALIAS; + + #ifdef EA_COMPILER_MSVC_2015 + EA_RESTORE_VC_WARNING() + #endif + EA_RESTORE_VC_WARNING() + + + // has_hashcode_member + // + // Custom type-trait that checks for the existence of a class data member 'mnHashCode'. + // + // In order to explicitly instantiate the hashtable without error we need to SFINAE away the functions that will + // fail to compile based on if the 'hash_node' contains a 'mnHashCode' member dictated by the hashtable template + // parameters. The hashtable support this level of configuration to allow users to choose which between the space vs. + // time optimization. + // + namespace Internal + { + template + struct has_hashcode_member + { + private: + template static eastl::no_type test(...); + template static eastl::yes_type test(decltype(U::mnHashCode)* = 0); + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + } + + static_assert(Internal::has_hashcode_member>::value, "contains a mnHashCode member"); + static_assert(!Internal::has_hashcode_member>::value, "doesn't contain a mnHashCode member"); + + // convenience macros to increase the readability of the code paths that must SFINAE on if the 'hash_node' + // contains the cached hashed value or not. + #define ENABLE_IF_HAS_HASHCODE(T, RT) typename eastl::enable_if::value, RT>::type* + #define ENABLE_IF_HASHCODE_EASTLSIZET(T, RT) typename eastl::enable_if::value, RT>::type + #define ENABLE_IF_TRUETYPE(T) typename eastl::enable_if::type* + #define DISABLE_IF_TRUETYPE(T) typename eastl::enable_if::type* + + + /// node_iterator_base + /// + /// Node iterators iterate nodes within a given bucket. + /// + /// We define a base class here because it is shared by both const and + /// non-const iterators. + /// + template + struct node_iterator_base + { + typedef hash_node node_type; + + node_type* mpNode; + + node_iterator_base(node_type* pNode) + : mpNode(pNode) { } + + void increment() + { mpNode = mpNode->mpNext; } + }; + + + + /// node_iterator + /// + /// Node iterators iterate nodes within a given bucket. + /// + /// The bConst parameter defines if the iterator is a const_iterator + /// or an iterator. + /// + template + struct node_iterator : public node_iterator_base + { + public: + typedef node_iterator_base base_type; + typedef node_iterator this_type; + typedef typename base_type::node_type node_type; + typedef Value value_type; + typedef typename type_select::type pointer; + typedef typename type_select::type reference; + typedef ptrdiff_t difference_type; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + explicit node_iterator(node_type* pNode = NULL) + : base_type(pNode) { } + + node_iterator(const node_iterator& x) + : base_type(x.mpNode) { } + + reference operator*() const + { return base_type::mpNode->mValue; } + + pointer operator->() const + { return &(base_type::mpNode->mValue); } + + node_iterator& operator++() + { base_type::increment(); return *this; } + + node_iterator operator++(int) + { node_iterator temp(*this); base_type::increment(); return temp; } + + }; // node_iterator + + + + /// hashtable_iterator_base + /// + /// A hashtable_iterator iterates the entire hash table and not just + /// nodes within a single bucket. Users in general will use a hash + /// table iterator much more often, as it is much like other container + /// iterators (e.g. vector::iterator). + /// + /// We define a base class here because it is shared by both const and + /// non-const iterators. + /// + template + struct hashtable_iterator_base + { + public: + typedef hashtable_iterator_base this_type; + typedef hash_node node_type; + + protected: + template + friend class hashtable; + + template + friend struct hashtable_iterator; + + template + friend bool operator==(const hashtable_iterator_base&, const hashtable_iterator_base&); + + template + friend bool operator!=(const hashtable_iterator_base&, const hashtable_iterator_base&); + + node_type* mpNode; // Current node within current bucket. + node_type** mpBucket; // Current bucket. + + public: + hashtable_iterator_base(node_type* pNode, node_type** pBucket) + : mpNode(pNode), mpBucket(pBucket) { } + + void increment_bucket() + { + ++mpBucket; + while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end + ++mpBucket; // of the bucket array so that finding the end of the bucket + mpNode = *mpBucket; // array is quick and simple. + } + + void increment() + { + mpNode = mpNode->mpNext; + + while(mpNode == NULL) + mpNode = *++mpBucket; + } + + }; // hashtable_iterator_base + + + + + /// hashtable_iterator + /// + /// A hashtable_iterator iterates the entire hash table and not just + /// nodes within a single bucket. Users in general will use a hash + /// table iterator much more often, as it is much like other container + /// iterators (e.g. vector::iterator). + /// + /// The bConst parameter defines if the iterator is a const_iterator + /// or an iterator. + /// + template + struct hashtable_iterator : public hashtable_iterator_base + { + public: + typedef hashtable_iterator_base base_type; + typedef hashtable_iterator this_type; + typedef hashtable_iterator this_type_non_const; + typedef typename base_type::node_type node_type; + typedef Value value_type; + typedef typename type_select::type pointer; + typedef typename type_select::type reference; + typedef ptrdiff_t difference_type; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + hashtable_iterator(node_type* pNode = NULL, node_type** pBucket = NULL) + : base_type(pNode, pBucket) { } + + hashtable_iterator(node_type** pBucket) + : base_type(*pBucket, pBucket) { } + + hashtable_iterator(const this_type_non_const& x) + : base_type(x.mpNode, x.mpBucket) { } + + reference operator*() const + { return base_type::mpNode->mValue; } + + pointer operator->() const + { return &(base_type::mpNode->mValue); } + + hashtable_iterator& operator++() + { base_type::increment(); return *this; } + + hashtable_iterator operator++(int) + { hashtable_iterator temp(*this); base_type::increment(); return temp; } + + const node_type* get_node() const + { return base_type::mpNode; } + + }; // hashtable_iterator + + + + + /// ht_distance + /// + /// This function returns the same thing as distance() for + /// forward iterators but returns zero for input iterators. + /// The reason why is that input iterators can only be read + /// once, and calling distance() on an input iterator destroys + /// the ability to read it. This ht_distance is used only for + /// optimization and so the code will merely work better with + /// forward iterators that input iterators. + /// + template + inline typename eastl::iterator_traits::difference_type + distance_fw_impl(Iterator /*first*/, Iterator /*last*/, EASTL_ITC_NS::input_iterator_tag) + { + return 0; + } + + template + inline typename eastl::iterator_traits::difference_type + distance_fw_impl(Iterator first, Iterator last, EASTL_ITC_NS::forward_iterator_tag) + { return eastl::distance(first, last); } + + template + inline typename eastl::iterator_traits::difference_type + ht_distance(Iterator first, Iterator last) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return distance_fw_impl(first, last, IC()); + } + + + + + /// mod_range_hashing + /// + /// Implements the algorithm for conversion of a number in the range of + /// [0, SIZE_T_MAX] to the range of [0, BucketCount). + /// + struct mod_range_hashing + { + uint32_t operator()(size_t r, uint32_t n) const + { return r % n; } + }; + + + /// default_ranged_hash + /// + /// Default ranged hash function H. In principle it should be a + /// function object composed from objects of type H1 and H2 such that + /// h(k, n) = h2(h1(k), n), but that would mean making extra copies of + /// h1 and h2. So instead we'll just use a tag to tell class template + /// hashtable to do that composition. + /// + struct default_ranged_hash{ }; + + + /// prime_rehash_policy + /// + /// Default value for rehash policy. Bucket size is (usually) the + /// smallest prime that keeps the load factor small enough. + /// + struct EASTL_API prime_rehash_policy + { + public: + float mfMaxLoadFactor; + float mfGrowthFactor; + mutable uint32_t mnNextResize; + + public: + prime_rehash_policy(float fMaxLoadFactor = 1.f) + : mfMaxLoadFactor(fMaxLoadFactor), mfGrowthFactor(2.f), mnNextResize(0) { } + + float GetMaxLoadFactor() const + { return mfMaxLoadFactor; } + + /// Return a bucket count no greater than nBucketCountHint, + /// Don't update member variables while at it. + static uint32_t GetPrevBucketCountOnly(uint32_t nBucketCountHint); + + /// Return a bucket count no greater than nBucketCountHint. + /// This function has a side effect of updating mnNextResize. + uint32_t GetPrevBucketCount(uint32_t nBucketCountHint) const; + + /// Return a bucket count no smaller than nBucketCountHint. + /// This function has a side effect of updating mnNextResize. + uint32_t GetNextBucketCount(uint32_t nBucketCountHint) const; + + /// Return a bucket count appropriate for nElementCount elements. + /// This function has a side effect of updating mnNextResize. + uint32_t GetBucketCount(uint32_t nElementCount) const; + + /// nBucketCount is current bucket count, nElementCount is current element count, + /// and nElementAdd is number of elements to be inserted. Do we need + /// to increase bucket count? If so, return pair(true, n), where + /// n is the new bucket count. If not, return pair(false, 0). + eastl::pair + GetRehashRequired(uint32_t nBucketCount, uint32_t nElementCount, uint32_t nElementAdd) const; + }; + + + + + + /////////////////////////////////////////////////////////////////////// + // Base classes for hashtable. We define these base classes because + // in some cases we want to do different things depending on the + // value of a policy class. In some cases the policy class affects + // which member functions and nested typedefs are defined; we handle that + // by specializing base class templates. Several of the base class templates + // need to access other members of class template hashtable, so we use + // the "curiously recurring template pattern" (parent class is templated + // on type of child class) for them. + /////////////////////////////////////////////////////////////////////// + + + /// rehash_base + /// + /// Give hashtable the get_max_load_factor functions if the rehash + /// policy is prime_rehash_policy. + /// + template + struct rehash_base { }; + + template + struct rehash_base + { + // Returns the max load factor, which is the load factor beyond + // which we rebuild the container with a new bucket count. + float get_max_load_factor() const + { + const Hashtable* const pThis = static_cast(this); + return pThis->rehash_policy().GetMaxLoadFactor(); + } + + // If you want to make the hashtable never rehash (resize), + // set the max load factor to be a very high number (e.g. 100000.f). + void set_max_load_factor(float fMaxLoadFactor) + { + Hashtable* const pThis = static_cast(this); + pThis->rehash_policy(prime_rehash_policy(fMaxLoadFactor)); + } + }; + + + + + /// hash_code_base + /// + /// Encapsulates two policy issues that aren't quite orthogonal. + /// (1) The difference between using a ranged hash function and using + /// the combination of a hash function and a range-hashing function. + /// In the former case we don't have such things as hash codes, so + /// we have a dummy type as placeholder. + /// (2) Whether or not we cache hash codes. Caching hash codes is + /// meaningless if we have a ranged hash function. This is because + /// a ranged hash function converts an object directly to its + /// bucket index without ostensibly using a hash code. + /// We also put the key extraction and equality comparison function + /// objects here, for convenience. + /// + template + struct hash_code_base; + + + /// hash_code_base + /// + /// Specialization: ranged hash function, no caching hash codes. + /// H1 and H2 are provided but ignored. We define a dummy hash code type. + /// + template + struct hash_code_base + { + protected: + ExtractKey mExtractKey; // To do: Make this member go away entirely, as it never has any data. + Equal mEqual; // To do: Make this instance use zero space when it is zero size. + H mRangedHash; // To do: Make this instance use zero space when it is zero size + + public: + H1 hash_function() const + { return H1(); } + + Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard + { return mEqual; } // has specified in its hashtable (unordered_*) proposal. + + const Equal& key_eq() const + { return mEqual; } + + Equal& key_eq() + { return mEqual; } + + protected: + typedef void* hash_code_t; + typedef uint32_t bucket_index_t; + + hash_code_base(const ExtractKey& extractKey, const Equal& eq, const H1&, const H2&, const H& h) + : mExtractKey(extractKey), mEqual(eq), mRangedHash(h) { } + + hash_code_t get_hash_code(const Key& key) const + { + EA_UNUSED(key); + return NULL; + } + + bucket_index_t bucket_index(hash_code_t, uint32_t) const + { return (bucket_index_t)0; } + + bucket_index_t bucket_index(const Key& key, hash_code_t, uint32_t nBucketCount) const + { return (bucket_index_t)mRangedHash(key, nBucketCount); } + + bucket_index_t bucket_index(const hash_node* pNode, uint32_t nBucketCount) const + { return (bucket_index_t)mRangedHash(mExtractKey(pNode->mValue), nBucketCount); } + + bool compare(const Key& key, hash_code_t, hash_node* pNode) const + { return mEqual(key, mExtractKey(pNode->mValue)); } + + void copy_code(hash_node*, const hash_node*) const + { } // Nothing to do. + + void set_code(hash_node* pDest, hash_code_t c) const + { + EA_UNUSED(pDest); + EA_UNUSED(c); + } + + void base_swap(hash_code_base& x) + { + eastl::swap(mExtractKey, x.mExtractKey); + eastl::swap(mEqual, x.mEqual); + eastl::swap(mRangedHash, x.mRangedHash); + } + + }; // hash_code_base + + + + // No specialization for ranged hash function while caching hash codes. + // That combination is meaningless, and trying to do it is an error. + + + /// hash_code_base + /// + /// Specialization: ranged hash function, cache hash codes. + /// This combination is meaningless, so we provide only a declaration + /// and no definition. + /// + template + struct hash_code_base; + + + + /// hash_code_base + /// + /// Specialization: hash function and range-hashing function, + /// no caching of hash codes. H is provided but ignored. + /// Provides typedef and accessor required by TR1. + /// + template + struct hash_code_base + { + protected: + ExtractKey mExtractKey; + Equal mEqual; + H1 m_h1; + H2 m_h2; + + public: + typedef H1 hasher; + + H1 hash_function() const + { return m_h1; } + + Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard + { return mEqual; } // has specified in its hashtable (unordered_*) proposal. + + const Equal& key_eq() const + { return mEqual; } + + Equal& key_eq() + { return mEqual; } + + protected: + typedef size_t hash_code_t; + typedef uint32_t bucket_index_t; + typedef hash_node node_type; + + hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&) + : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { } + + hash_code_t get_hash_code(const Key& key) const + { return (hash_code_t)m_h1(key); } + + bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2(c, nBucketCount); } + + bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2(c, nBucketCount); } + + bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2((hash_code_t)m_h1(mExtractKey(pNode->mValue)), nBucketCount); } + + bool compare(const Key& key, hash_code_t, node_type* pNode) const + { return mEqual(key, mExtractKey(pNode->mValue)); } + + void copy_code(node_type*, const node_type*) const + { } // Nothing to do. + + void set_code(node_type*, hash_code_t) const + { } // Nothing to do. + + void base_swap(hash_code_base& x) + { + eastl::swap(mExtractKey, x.mExtractKey); + eastl::swap(mEqual, x.mEqual); + eastl::swap(m_h1, x.m_h1); + eastl::swap(m_h2, x.m_h2); + } + + }; // hash_code_base + + + + /// hash_code_base + /// + /// Specialization: hash function and range-hashing function, + /// caching hash codes. H is provided but ignored. + /// Provides typedef and accessor required by TR1. + /// + template + struct hash_code_base + { + protected: + ExtractKey mExtractKey; + Equal mEqual; + H1 m_h1; + H2 m_h2; + + public: + typedef H1 hasher; + + H1 hash_function() const + { return m_h1; } + + Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard + { return mEqual; } // has specified in its hashtable (unordered_*) proposal. + + const Equal& key_eq() const + { return mEqual; } + + Equal& key_eq() + { return mEqual; } + + protected: + typedef uint32_t hash_code_t; + typedef uint32_t bucket_index_t; + typedef hash_node node_type; + + hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&) + : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { } + + hash_code_t get_hash_code(const Key& key) const + { return (hash_code_t)m_h1(key); } + + bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2(c, nBucketCount); } + + bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2(c, nBucketCount); } + + bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const + { return (bucket_index_t)m_h2((uint32_t)pNode->mnHashCode, nBucketCount); } + + bool compare(const Key& key, hash_code_t c, node_type* pNode) const + { return (pNode->mnHashCode == c) && mEqual(key, mExtractKey(pNode->mValue)); } + + void copy_code(node_type* pDest, const node_type* pSource) const + { pDest->mnHashCode = pSource->mnHashCode; } + + void set_code(node_type* pDest, hash_code_t c) const + { pDest->mnHashCode = c; } + + void base_swap(hash_code_base& x) + { + eastl::swap(mExtractKey, x.mExtractKey); + eastl::swap(mEqual, x.mEqual); + eastl::swap(m_h1, x.m_h1); + eastl::swap(m_h2, x.m_h2); + } + + }; // hash_code_base + + + + + + /////////////////////////////////////////////////////////////////////////// + /// hashtable + /// + /// Key and Value: arbitrary CopyConstructible types. + /// + /// ExtractKey: function object that takes a object of type Value + /// and returns a value of type Key. + /// + /// Equal: function object that takes two objects of type k and returns + /// a bool-like value that is true if the two objects are considered equal. + /// + /// H1: a hash function. A unary function object with argument type + /// Key and result type size_t. Return values should be distributed + /// over the entire range [0, numeric_limits::max()]. + /// + /// H2: a range-hashing function (in the terminology of Tavori and + /// Dreizin). This is a function which takes the output of H1 and + /// converts it to the range of [0, n]. Usually it merely takes the + /// output of H1 and mods it to n. + /// + /// H: a ranged hash function (Tavori and Dreizin). This is merely + /// a class that combines the functionality of H1 and H2 together, + /// possibly in some way that is somehow improved over H1 and H2 + /// It is a binary function whose argument types are Key and size_t + /// and whose result type is uint32_t. Given arguments k and n, the + /// return value is in the range [0, n). Default: h(k, n) = h2(h1(k), n). + /// If H is anything other than the default, H1 and H2 are ignored, + /// as H is thus overriding H1 and H2. + /// + /// RehashPolicy: Policy class with three members, all of which govern + /// the bucket count. nBucket(n) returns a bucket count no smaller + /// than n. GetBucketCount(n) returns a bucket count appropriate + /// for an element count of n. GetRehashRequired(nBucketCount, nElementCount, nElementAdd) + /// determines whether, if the current bucket count is nBucket and the + /// current element count is nElementCount, we need to increase the bucket + /// count. If so, returns pair(true, n), where n is the new + /// bucket count. If not, returns pair(false, ). + /// + /// Currently it is hard-wired that the number of buckets never + /// shrinks. Should we allow RehashPolicy to change that? + /// + /// bCacheHashCode: true if we store the value of the hash + /// function along with the value. This is a time-space tradeoff. + /// Storing it may improve lookup speed by reducing the number of + /// times we need to call the Equal function. + /// + /// bMutableIterators: true if hashtable::iterator is a mutable + /// iterator, false if iterator and const_iterator are both const + /// iterators. This is true for hash_map and hash_multimap, + /// false for hash_set and hash_multiset. + /// + /// bUniqueKeys: true if the return value of hashtable::count(k) + /// is always at most one, false if it may be an arbitrary number. + /// This is true for hash_set and hash_map and is false for + /// hash_multiset and hash_multimap. + /// + /////////////////////////////////////////////////////////////////////// + /// Note: + /// If you want to make a hashtable never increase its bucket usage, + /// call set_max_load_factor with a very high value such as 100000.f. + /// + /// find_as + /// In order to support the ability to have a hashtable of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the hashtable key type. See the find_as function + /// for more documentation on this. + /// + /// find_by_hash + /// In the interest of supporting fast operations wherever possible, + /// we provide a find_by_hash function which finds a node using its + /// hash code. This is useful for cases where the node's hash is + /// already known, allowing us to avoid a redundant hash operation + /// in the normal find path. + /// + template + class hashtable + : public rehash_base >, + public hash_code_base + { + public: + typedef Key key_type; + typedef Value value_type; + typedef typename ExtractKey::result_type mapped_type; + typedef hash_code_base hash_code_base_type; + typedef typename hash_code_base_type::hash_code_t hash_code_t; + typedef Allocator allocator_type; + typedef Equal key_equal; + typedef ptrdiff_t difference_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef value_type& reference; + typedef const value_type& const_reference; + typedef node_iterator local_iterator; + typedef node_iterator const_local_iterator; + typedef hashtable_iterator iterator; + typedef hashtable_iterator const_iterator; + typedef hash_node node_type; + typedef typename type_select, iterator>::type insert_return_type; + typedef hashtable this_type; + typedef RehashPolicy rehash_policy_type; + typedef ExtractKey extract_key_type; + typedef H1 h1_type; + typedef H2 h2_type; + typedef H h_type; + typedef integral_constant has_unique_keys_type; + + using hash_code_base_type::key_eq; + using hash_code_base_type::hash_function; + using hash_code_base_type::mExtractKey; + using hash_code_base_type::get_hash_code; + using hash_code_base_type::bucket_index; + using hash_code_base_type::compare; + using hash_code_base_type::set_code; + using hash_code_base_type::copy_code; + + static const bool kCacheHashCode = bCacheHashCode; + + enum + { + // This enumeration is deprecated in favor of eastl::kHashtableAllocFlagBuckets. + kAllocFlagBuckets = eastl::kHashtableAllocFlagBuckets // Flag to allocator which indicates that we are allocating buckets and not nodes. + }; + + protected: + node_type** mpBucketArray; + size_type mnBucketCount; + size_type mnElementCount; + RehashPolicy mRehashPolicy; // To do: Use base class optimization to make this go away. + allocator_type mAllocator; // To do: Use base class optimization to make this go away. + + public: + hashtable(size_type nBucketCount, const H1&, const H2&, const H&, const Equal&, const ExtractKey&, + const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR); + + template + hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount, + const H1&, const H2&, const H&, const Equal&, const ExtractKey&, + const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR); + + hashtable(const hashtable& x); + + // initializer_list ctor support is implemented in subclasses (e.g. hash_set). + // hashtable(initializer_list, size_type nBucketCount, const H1&, const H2&, const H&, + // const Equal&, const ExtractKey&, const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR); + + hashtable(this_type&& x); + hashtable(this_type&& x, const allocator_type& allocator); + ~hashtable(); + + const allocator_type& get_allocator() const EA_NOEXCEPT; + allocator_type& get_allocator() EA_NOEXCEPT; + void set_allocator(const allocator_type& allocator); + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + iterator begin() EA_NOEXCEPT + { + iterator i(mpBucketArray); + if(!i.mpNode) + i.increment_bucket(); + return i; + } + + const_iterator begin() const EA_NOEXCEPT + { + const_iterator i(mpBucketArray); + if(!i.mpNode) + i.increment_bucket(); + return i; + } + + const_iterator cbegin() const EA_NOEXCEPT + { return begin(); } + + iterator end() EA_NOEXCEPT + { return iterator(mpBucketArray + mnBucketCount); } + + const_iterator end() const EA_NOEXCEPT + { return const_iterator(mpBucketArray + mnBucketCount); } + + const_iterator cend() const EA_NOEXCEPT + { return const_iterator(mpBucketArray + mnBucketCount); } + + // Returns an iterator to the first item in bucket n. + local_iterator begin(size_type n) EA_NOEXCEPT + { return local_iterator(mpBucketArray[n]); } + + const_local_iterator begin(size_type n) const EA_NOEXCEPT + { return const_local_iterator(mpBucketArray[n]); } + + const_local_iterator cbegin(size_type n) const EA_NOEXCEPT + { return const_local_iterator(mpBucketArray[n]); } + + // Returns an iterator to the last item in a bucket returned by begin(n). + local_iterator end(size_type) EA_NOEXCEPT + { return local_iterator(NULL); } + + const_local_iterator end(size_type) const EA_NOEXCEPT + { return const_local_iterator(NULL); } + + const_local_iterator cend(size_type) const EA_NOEXCEPT + { return const_local_iterator(NULL); } + + bool empty() const EA_NOEXCEPT + { return mnElementCount == 0; } + + size_type size() const EA_NOEXCEPT + { return mnElementCount; } + + size_type bucket_count() const EA_NOEXCEPT + { return mnBucketCount; } + + size_type bucket_size(size_type n) const EA_NOEXCEPT + { return (size_type)eastl::distance(begin(n), end(n)); } + + //size_type bucket(const key_type& k) const EA_NOEXCEPT + // { return bucket_index(k, (hash code here), (uint32_t)mnBucketCount); } + + // Returns the ratio of element count to bucket count. A return value of 1 means + // there's an optimal 1 bucket for each element. + float load_factor() const EA_NOEXCEPT + { return (float)mnElementCount / (float)mnBucketCount; } + + // Inherited from the base class. + // Returns the max load factor, which is the load factor beyond + // which we rebuild the container with a new bucket count. + // get_max_load_factor comes from rehash_base. + // float get_max_load_factor() const; + + // Inherited from the base class. + // If you want to make the hashtable never rehash (resize), + // set the max load factor to be a very high number (e.g. 100000.f). + // set_max_load_factor comes from rehash_base. + // void set_max_load_factor(float fMaxLoadFactor); + + /// Generalization of get_max_load_factor. This is an extension that's + /// not present in C++ hash tables (unordered containers). + const rehash_policy_type& rehash_policy() const EA_NOEXCEPT + { return mRehashPolicy; } + + /// Generalization of set_max_load_factor. This is an extension that's + /// not present in C++ hash tables (unordered containers). + void rehash_policy(const rehash_policy_type& rehashPolicy); + + template + insert_return_type emplace(Args&&... args); + + template + iterator emplace_hint(const_iterator position, Args&&... args); + + template insert_return_type try_emplace(const key_type& k, Args&&... args); + template insert_return_type try_emplace(key_type&& k, Args&&... args); + template iterator try_emplace(const_iterator position, const key_type& k, Args&&... args); + template iterator try_emplace(const_iterator position, key_type&& k, Args&&... args); + + insert_return_type insert(const value_type& value); + insert_return_type insert(value_type&& otherValue); + iterator insert(const_iterator hint, const value_type& value); + iterator insert(const_iterator hint, value_type&& value); + void insert(std::initializer_list ilist); + template void insert(InputIterator first, InputIterator last); + //insert_return_type insert(node_type&& nh); + //iterator insert(const_iterator hint, node_type&& nh); + + // This overload attempts to mitigate the overhead associated with mismatched cv-quality elements of + // the hashtable pair. It can avoid copy overhead because it will perfect forward the user provided pair types + // until it can constructed in-place in the allocated hashtable node. + // + // Ideally we would remove this overload as it deprecated and removed in C++17 but it currently causes + // performance regressions for hashtables with complex keys (keys that allocate resources). + template , key_type> && + #endif + !eastl::is_literal_type_v

&& + eastl::is_constructible_v>> + insert_return_type insert(P&& otherValue); + + // Non-standard extension + template // See comments below for the const value_type& equivalent to this function. + insert_return_type insert(hash_code_t c, node_type* pNodeNew, P&& otherValue); + + // We provide a version of insert which lets the caller directly specify the hash value and + // a potential node to insert if needed. This allows for less thread contention in the case + // of a thread-shared hash table that's accessed during a mutex lock, because the hash calculation + // and node creation is done outside of the lock. If pNodeNew is supplied by the user (i.e. non-NULL) + // then it must be freeable via the hash table's allocator. If the return value is true then this function + // took over ownership of pNodeNew, else pNodeNew is still owned by the caller to free or to pass + // to another call to insert. pNodeNew need not be assigned the value by the caller, as the insert + // function will assign value to pNodeNew upon insertion into the hash table. pNodeNew may be + // created by the user with the allocate_uninitialized_node function, and freed by the free_uninitialized_node function. + insert_return_type insert(hash_code_t c, node_type* pNodeNew, const value_type& value); + + template eastl::pair insert_or_assign(const key_type& k, M&& obj); + template eastl::pair insert_or_assign(key_type&& k, M&& obj); + template iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj); + template iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj); + + // Used to allocate and free memory used by insert(const value_type& value, hash_code_t c, node_type* pNodeNew). + node_type* allocate_uninitialized_node(); + void free_uninitialized_node(node_type* pNode); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + size_type erase(const key_type& k); + + void clear(); + void clear(bool clearBuckets); // If clearBuckets is true, we free the bucket memory and set the bucket count back to the newly constructed count. + void reset_lose_memory() EA_NOEXCEPT; // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + void rehash(size_type nBucketCount); + void reserve(size_type nElementCount); + + iterator find(const key_type& key); + const_iterator find(const key_type& key) const; + + /// Implements a find whereby the user supplies a comparison of a different type + /// than the hashtable value_type. A useful case of this is one whereby you have + /// a container of string objects but want to do searches via passing in char pointers. + /// The problem is that without this kind of find, you need to do the expensive operation + /// of converting the char pointer to a string so it can be used as the argument to the + /// find function. + /// + /// Example usage (namespaces omitted for brevity): + /// hash_set hashSet; + /// hashSet.find_as("hello"); // Use default hash and compare. + /// + /// Example usage (note that the predicate uses string as first type and char* as second): + /// hash_set hashSet; + /// hashSet.find_as("hello", hash(), equal_to_2()); + /// + template + iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate); + + template + const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const; + + template + iterator find_as(const U& u); + + template + const_iterator find_as(const U& u) const; + + // Note: find_by_hash and find_range_by_hash both perform a search based on a hash value. + // It is important to note that multiple hash values may map to the same hash bucket, so + // it would be incorrect to assume all items returned match the hash value that + // was searched for. + + /// Implements a find whereby the user supplies the node's hash code. + /// It returns an iterator to the first element that matches the given hash. However, there may be multiple elements that match the given hash. + + template + ENABLE_IF_HASHCODE_EASTLSIZET(HashCodeT, iterator) find_by_hash(HashCodeT c) + { + EASTL_CT_ASSERT_MSG(bCacheHashCode, + "find_by_hash(hash_code_t c) is designed to avoid recomputing hashes, " + "so it requires cached hash codes. Consider setting template parameter " + "bCacheHashCode to true or using find_by_hash(const key_type& k, hash_code_t c) instead."); + + const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], c); + + return pNode ? iterator(pNode, mpBucketArray + n) : + iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + template + ENABLE_IF_HASHCODE_EASTLSIZET(HashCodeT, const_iterator) find_by_hash(HashCodeT c) const + { + EASTL_CT_ASSERT_MSG(bCacheHashCode, + "find_by_hash(hash_code_t c) is designed to avoid recomputing hashes, " + "so it requires cached hash codes. Consider setting template parameter " + "bCacheHashCode to true or using find_by_hash(const key_type& k, hash_code_t c) instead."); + + const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], c); + + return pNode ? + const_iterator(pNode, mpBucketArray + n) : + const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + iterator find_by_hash(const key_type& k, hash_code_t c) + { + const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + const_iterator find_by_hash(const key_type& k, hash_code_t c) const + { + const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + // Returns a pair that allows iterating over all nodes in a hash bucket + // first in the pair returned holds the iterator for the beginning of the bucket, + // second in the pair returned holds the iterator for the end of the bucket, + // If no bucket is found, both values in the pair are set to end(). + // + // See also the note above. + eastl::pair find_range_by_hash(hash_code_t c); + eastl::pair find_range_by_hash(hash_code_t c) const; + + size_type count(const key_type& k) const EA_NOEXCEPT; + + eastl::pair equal_range(const key_type& k); + eastl::pair equal_range(const key_type& k) const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + // We must remove one of the 'DoGetResultIterator' overloads from the overload-set (via SFINAE) because both can + // not compile successfully at the same time. The 'bUniqueKeys' template parameter chooses at compile-time the + // type of 'insert_return_type' between a pair and a raw iterator. We must pick between the two + // overloads that unpacks the iterator from the pair or simply passes the provided iterator to the caller based + // on the class template parameter. + template + iterator DoGetResultIterator(BoolConstantT, + const insert_return_type& irt, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr) const EA_NOEXCEPT + { + return irt.first; + } + + template + iterator DoGetResultIterator(BoolConstantT, + const insert_return_type& irt, + DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr) const EA_NOEXCEPT + { + return irt; + } + + node_type* DoAllocateNodeFromKey(const key_type& key); + node_type* DoAllocateNodeFromKey(key_type&& key); + void DoFreeNode(node_type* pNode); + void DoFreeNodes(node_type** pBucketArray, size_type); + + node_type** DoAllocateBuckets(size_type n); + void DoFreeBuckets(node_type** pBucketArray, size_type n); + + template + eastl::pair DoInsertValue(BoolConstantT, Args&&... args); + + template + iterator DoInsertValue(BoolConstantT, Args&&... args); + + + template + eastl::pair DoInsertValueExtra(BoolConstantT, + const key_type& k, + hash_code_t c, + node_type* pNodeNew, + value_type&& value, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + eastl::pair DoInsertValue(BoolConstantT, + value_type&& value, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + iterator DoInsertValueExtra(BoolConstantT, + const key_type& k, + hash_code_t c, + node_type* pNodeNew, + value_type&& value, + DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + iterator DoInsertValue(BoolConstantT, value_type&& value, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + + template + eastl::pair DoInsertValueExtra(BoolConstantT, + const key_type& k, + hash_code_t c, + node_type* pNodeNew, + const value_type& value, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + eastl::pair DoInsertValue(BoolConstantT, + const value_type& value, + ENABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + iterator DoInsertValueExtra(BoolConstantT, + const key_type& k, + hash_code_t c, + node_type* pNodeNew, + const value_type& value, + DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + iterator DoInsertValue(BoolConstantT, const value_type& value, DISABLE_IF_TRUETYPE(BoolConstantT) = nullptr); + + template + node_type* DoAllocateNode(Args&&... args); + node_type* DoAllocateNode(value_type&& value); + node_type* DoAllocateNode(const value_type& value); + + // DoInsertKey is supposed to get hash_code_t c = get_hash_code(key). + // it is done in case application has it's own hashset/hashmap-like containter, where hash code is for some reason known prior the insert + // this allows to save some performance, especially with heavy hash functions + eastl::pair DoInsertKey(true_type, const key_type& key, hash_code_t c); + iterator DoInsertKey(false_type, const key_type& key, hash_code_t c); + eastl::pair DoInsertKey(true_type, key_type&& key, hash_code_t c); + iterator DoInsertKey(false_type, key_type&& key, hash_code_t c); + + // We keep DoInsertKey overload without third parameter, for compatibility with older revisions of EASTL (3.12.07 and earlier) + // It used to call get_hash_code as a first call inside the DoInsertKey. + eastl::pair DoInsertKey(true_type, const key_type& key) { return DoInsertKey(true_type(), key, get_hash_code(key)); } + iterator DoInsertKey(false_type, const key_type& key) { return DoInsertKey(false_type(), key, get_hash_code(key)); } + eastl::pair DoInsertKey(true_type, key_type&& key) { return DoInsertKey(true_type(), eastl::move(key), get_hash_code(key)); } + iterator DoInsertKey(false_type, key_type&& key) { return DoInsertKey(false_type(), eastl::move(key), get_hash_code(key)); } + + void DoRehash(size_type nBucketCount); + node_type* DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const; + + template + ENABLE_IF_HAS_HASHCODE(T, node_type) DoFindNode(T* pNode, hash_code_t c) const + { + for (; pNode; pNode = pNode->mpNext) + { + if (pNode->mnHashCode == c) + return pNode; + } + return NULL; + } + + template + node_type* DoFindNodeT(node_type* pNode, const U& u, BinaryPredicate predicate) const; + + }; // class hashtable + + + + + + /////////////////////////////////////////////////////////////////////// + // node_iterator_base + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const node_iterator_base& a, const node_iterator_base& b) + { return a.mpNode == b.mpNode; } + + template + inline bool operator!=(const node_iterator_base& a, const node_iterator_base& b) + { return a.mpNode != b.mpNode; } + + + + + /////////////////////////////////////////////////////////////////////// + // hashtable_iterator_base + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const hashtable_iterator_base& a, const hashtable_iterator_base& b) + { return a.mpNode == b.mpNode; } + + template + inline bool operator!=(const hashtable_iterator_base& a, const hashtable_iterator_base& b) + { return a.mpNode != b.mpNode; } + + + + + /////////////////////////////////////////////////////////////////////// + // hashtable + /////////////////////////////////////////////////////////////////////// + + template + hashtable + ::hashtable(size_type nBucketCount, const H1& h1, const H2& h2, const H& h, + const Eq& eq, const EK& ek, const allocator_type& allocator) + : rehash_base(), + hash_code_base(ek, eq, h1, h2, h), + mnBucketCount(0), + mnElementCount(0), + mRehashPolicy(), + mAllocator(allocator) + { + if(nBucketCount < 2) // If we are starting in an initially empty state, with no memory allocation done. + reset_lose_memory(); + else // Else we are creating a potentially non-empty hashtable... + { + EASTL_ASSERT(nBucketCount < 10000000); + mnBucketCount = (size_type)mRehashPolicy.GetNextBucketCount((uint32_t)nBucketCount); + mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2. + } + } + + + + template + template + hashtable::hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount, + const H1& h1, const H2& h2, const H& h, + const Eq& eq, const EK& ek, const allocator_type& allocator) + : rehash_base(), + hash_code_base(ek, eq, h1, h2, h), + //mnBucketCount(0), // This gets re-assigned below. + mnElementCount(0), + mRehashPolicy(), + mAllocator(allocator) + { + if(nBucketCount < 2) + { + const size_type nElementCount = (size_type)eastl::ht_distance(first, last); + mnBucketCount = (size_type)mRehashPolicy.GetBucketCount((uint32_t)nElementCount); + } + else + { + EASTL_ASSERT(nBucketCount < 10000000); + mnBucketCount = nBucketCount; + } + + mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; first != last; ++first) + insert(*first); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + clear(); + DoFreeBuckets(mpBucketArray, mnBucketCount); + throw; + } + #endif + } + + + + template + hashtable::hashtable(const this_type& x) + : rehash_base(x), + hash_code_base(x), + mnBucketCount(x.mnBucketCount), + mnElementCount(x.mnElementCount), + mRehashPolicy(x.mRehashPolicy), + mAllocator(x.mAllocator) + { + if(mnElementCount) // If there is anything to copy... + { + mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will be at least 2. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(size_type i = 0; i < x.mnBucketCount; ++i) + { + node_type* pNodeSource = x.mpBucketArray[i]; + node_type** ppNodeDest = mpBucketArray + i; + + while(pNodeSource) + { + *ppNodeDest = DoAllocateNode(pNodeSource->mValue); + copy_code(*ppNodeDest, pNodeSource); + ppNodeDest = &(*ppNodeDest)->mpNext; + pNodeSource = pNodeSource->mpNext; + } + } + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + clear(); + DoFreeBuckets(mpBucketArray, mnBucketCount); + throw; + } + #endif + } + else + { + // In this case, instead of allocate memory and copy nothing from x, + // we reset ourselves to a zero allocation state. + reset_lose_memory(); + } + } + + + template + hashtable::hashtable(this_type&& x) + : rehash_base(x), + hash_code_base(x), + mnBucketCount(0), + mnElementCount(0), + mRehashPolicy(x.mRehashPolicy), + mAllocator(x.mAllocator) + { + reset_lose_memory(); // We do this here the same as we do it in the default ctor because it puts the container in a proper initial empty state. This code would be cleaner if we could rely on being able to use C++11 delegating constructors and just call the default ctor here. + swap(x); + } + + + template + hashtable::hashtable(this_type&& x, const allocator_type& allocator) + : rehash_base(x), + hash_code_base(x), + mnBucketCount(0), + mnElementCount(0), + mRehashPolicy(x.mRehashPolicy), + mAllocator(allocator) + { + reset_lose_memory(); // We do this here the same as we do it in the default ctor because it puts the container in a proper initial empty state. This code would be cleaner if we could rely on being able to use C++11 delegating constructors and just call the default ctor here. + swap(x); // swap will directly or indirectly handle the possibility that mAllocator != x.mAllocator. + } + + + template + inline const typename hashtable::allocator_type& + hashtable::get_allocator() const EA_NOEXCEPT + { + return mAllocator; + } + + + + template + inline typename hashtable::allocator_type& + hashtable::get_allocator() EA_NOEXCEPT + { + return mAllocator; + } + + + + template + inline void hashtable::set_allocator(const allocator_type& allocator) + { + mAllocator = allocator; + } + + + + template + inline typename hashtable::this_type& + hashtable::operator=(const this_type& x) + { + if(this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + mAllocator = x.mAllocator; + #endif + + insert(x.begin(), x.end()); + } + return *this; + } + + + template + inline typename hashtable::this_type& + hashtable::operator=(this_type&& x) + { + if(this != &x) + { + clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor. + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + return *this; + } + + + template + inline typename hashtable::this_type& + hashtable::operator=(std::initializer_list ilist) + { + // The simplest means of doing this is to clear and insert. There probably isn't a generic + // solution that's any more efficient without having prior knowledge of the ilist contents. + clear(); + insert(ilist.begin(), ilist.end()); + return *this; + } + + + + template + inline hashtable::~hashtable() + { + clear(); + DoFreeBuckets(mpBucketArray, mnBucketCount); + } + + + template + typename hashtable::node_type* + hashtable::DoAllocateNodeFromKey(const key_type& key) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(pair_first_construct, key); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + template + typename hashtable::node_type* + hashtable::DoAllocateNodeFromKey(key_type&& key) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(pair_first_construct, eastl::move(key)); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + template + inline void hashtable::DoFreeNode(node_type* pNode) + { + pNode->~node_type(); + EASTLFree(mAllocator, pNode, sizeof(node_type)); + } + + + + template + void hashtable::DoFreeNodes(node_type** pNodeArray, size_type n) + { + for(size_type i = 0; i < n; ++i) + { + node_type* pNode = pNodeArray[i]; + while(pNode) + { + node_type* const pTempNode = pNode; + pNode = pNode->mpNext; + DoFreeNode(pTempNode); + } + pNodeArray[i] = NULL; + } + } + + + + template + typename hashtable::node_type** + hashtable::DoAllocateBuckets(size_type n) + { + // We allocate one extra bucket to hold a sentinel, an arbitrary + // non-null pointer. Iterator increment relies on this. + EASTL_ASSERT(n > 1); // We reserve an mnBucketCount of 1 for the shared gpEmptyBucketArray. + EASTL_CT_ASSERT(kHashtableAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the allocator has a copy of this enum. + node_type** const pBucketArray = (node_type**)EASTLAllocAlignedFlags(mAllocator, (n + 1) * sizeof(node_type*), EASTL_ALIGN_OF(node_type*), 0, kHashtableAllocFlagBuckets); + //eastl::fill(pBucketArray, pBucketArray + n, (node_type*)NULL); + memset(pBucketArray, 0, n * sizeof(node_type*)); + pBucketArray[n] = reinterpret_cast((uintptr_t)~0); + return pBucketArray; + } + + + + template + inline void hashtable::DoFreeBuckets(node_type** pBucketArray, size_type n) + { + // If n <= 1, then pBucketArray is from the shared gpEmptyBucketArray. We don't test + // for pBucketArray == &gpEmptyBucketArray because one library have a different gpEmptyBucketArray + // than another but pass a hashtable to another. So we go by the size. + if(n > 1) + EASTLFree(mAllocator, pBucketArray, (n + 1) * sizeof(node_type*)); // '+1' because DoAllocateBuckets allocates nBucketCount + 1 buckets in order to have a NULL sentinel at the end. + } + + + template + void hashtable::swap(this_type& x) + { + hash_code_base::base_swap(x); // hash_code_base has multiple implementations, so we let them handle the swap. + eastl::swap(mRehashPolicy, x.mRehashPolicy); + EASTL_MACRO_SWAP(node_type**, mpBucketArray, x.mpBucketArray); + eastl::swap(mnBucketCount, x.mnBucketCount); + eastl::swap(mnElementCount, x.mnElementCount); + + if (mAllocator != x.mAllocator) // If allocators are not equivalent... + { + eastl::swap(mAllocator, x.mAllocator); + } + } + + + template + inline void hashtable::rehash_policy(const rehash_policy_type& rehashPolicy) + { + mRehashPolicy = rehashPolicy; + + const size_type nBuckets = rehashPolicy.GetBucketCount((uint32_t)mnElementCount); + + if(nBuckets > mnBucketCount) + DoRehash(nBuckets); + } + + + + template + inline typename hashtable::iterator + hashtable::find(const key_type& k) + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + + + template + inline typename hashtable::const_iterator + hashtable::find(const key_type& k) const + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + + + template + template + inline typename hashtable::iterator + hashtable::find_as(const U& other, UHash uhash, BinaryPredicate predicate) + { + const hash_code_t c = (hash_code_t)uhash(other); + const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy. + + node_type* const pNode = DoFindNodeT(mpBucketArray[n], other, predicate); + return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + + + template + template + inline typename hashtable::const_iterator + hashtable::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const + { + const hash_code_t c = (hash_code_t)uhash(other); + const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy. + + node_type* const pNode = DoFindNodeT(mpBucketArray[n], other, predicate); + return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end() + } + + + /// hashtable_find + /// + /// Helper function that defaults to using hash and equal_to_2. + /// This makes it so that by default you don't need to provide these. + /// Note that the default hash functions may not be what you want, though. + /// + /// Example usage. Instead of this: + /// hash_set hashSet; + /// hashSet.find("hello", hash(), equal_to_2()); + /// + /// You can use this: + /// hash_set hashSet; + /// hashtable_find(hashSet, "hello"); + /// + template + inline typename H::iterator hashtable_find(H& hashTable, U u) + { return hashTable.find_as(u, eastl::hash(), eastl::equal_to_2()); } + + template + inline typename H::const_iterator hashtable_find(const H& hashTable, U u) + { return hashTable.find_as(u, eastl::hash(), eastl::equal_to_2()); } + + + + template + template + inline typename hashtable::iterator + hashtable::find_as(const U& other) + { return eastl::hashtable_find(*this, other); } + // VC++ doesn't appear to like the following, though it seems correct to me. + // So we implement the workaround above until we can straighten this out. + //{ return find_as(other, eastl::hash(), eastl::equal_to_2()); } + + + template + template + inline typename hashtable::const_iterator + hashtable::find_as(const U& other) const + { return eastl::hashtable_find(*this, other); } + // VC++ doesn't appear to like the following, though it seems correct to me. + // So we implement the workaround above until we can straighten this out. + //{ return find_as(other, eastl::hash(), eastl::equal_to_2()); } + + + + template + eastl::pair::const_iterator, + typename hashtable::const_iterator> + hashtable::find_range_by_hash(hash_code_t c) const + { + const size_type start = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + node_type* const pNodeStart = mpBucketArray[start]; + + if (pNodeStart) + { + eastl::pair pair(const_iterator(pNodeStart, mpBucketArray + start), + const_iterator(pNodeStart, mpBucketArray + start)); + pair.second.increment_bucket(); + return pair; + } + + return eastl::pair(const_iterator(mpBucketArray + mnBucketCount), + const_iterator(mpBucketArray + mnBucketCount)); + } + + + + template + eastl::pair::iterator, + typename hashtable::iterator> + hashtable::find_range_by_hash(hash_code_t c) + { + const size_type start = (size_type)bucket_index(c, (uint32_t)mnBucketCount); + node_type* const pNodeStart = mpBucketArray[start]; + + if (pNodeStart) + { + eastl::pair pair(iterator(pNodeStart, mpBucketArray + start), + iterator(pNodeStart, mpBucketArray + start)); + pair.second.increment_bucket(); + return pair; + + } + + return eastl::pair(iterator(mpBucketArray + mnBucketCount), + iterator(mpBucketArray + mnBucketCount)); + } + + + + template + typename hashtable::size_type + hashtable::count(const key_type& k) const EA_NOEXCEPT + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + size_type result = 0; + + // To do: Make a specialization for bU (unique keys) == true and take + // advantage of the fact that the count will always be zero or one in that case. + for(node_type* pNode = mpBucketArray[n]; pNode; pNode = pNode->mpNext) + { + if(compare(k, c, pNode)) + ++result; + } + return result; + } + + + + template + eastl::pair::iterator, + typename hashtable::iterator> + hashtable::equal_range(const key_type& k) + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type** head = mpBucketArray + n; + node_type* pNode = DoFindNode(*head, k, c); + + if(pNode) + { + node_type* p1 = pNode->mpNext; + + for(; p1; p1 = p1->mpNext) + { + if(!compare(k, c, p1)) + break; + } + + iterator first(pNode, head); + iterator last(p1, head); + + if(!p1) + last.increment_bucket(); + + return eastl::pair(first, last); + } + + return eastl::pair(iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end() + iterator(mpBucketArray + mnBucketCount)); + } + + + + + template + eastl::pair::const_iterator, + typename hashtable::const_iterator> + hashtable::equal_range(const key_type& k) const + { + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type** head = mpBucketArray + n; + node_type* pNode = DoFindNode(*head, k, c); + + if(pNode) + { + node_type* p1 = pNode->mpNext; + + for(; p1; p1 = p1->mpNext) + { + if(!compare(k, c, p1)) + break; + } + + const_iterator first(pNode, head); + const_iterator last(p1, head); + + if(!p1) + last.increment_bucket(); + + return eastl::pair(first, last); + } + + return eastl::pair(const_iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end() + const_iterator(mpBucketArray + mnBucketCount)); + } + + + + template + inline typename hashtable::node_type* + hashtable::DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const + { + for(; pNode; pNode = pNode->mpNext) + { + if(compare(k, c, pNode)) + return pNode; + } + return NULL; + } + + + + template + template + inline typename hashtable::node_type* + hashtable::DoFindNodeT(node_type* pNode, const U& other, BinaryPredicate predicate) const + { + for(; pNode; pNode = pNode->mpNext) + { + if(predicate(mExtractKey(pNode->mValue), other)) // Intentionally compare with key as first arg and other as second arg. + return pNode; + } + return NULL; + } + + + + template + template + eastl::pair::iterator, bool> + hashtable::DoInsertValue(BoolConstantT, Args&&... args) // true_type means bUniqueKeys is true. + { + // Adds the value to the hash table if not already present. + // If already present then the existing value is returned via an iterator/bool pair. + + // We have a chicken-and-egg problem here. In order to know if and where to insert the value, we need to get the + // hashtable key for the value. But we don't explicitly have a value argument, we have a templated Args&&... argument. + // We need the value_type in order to proceed, but that entails getting an instance of a value_type from the args. + // And it may turn out that the value is already present in the hashtable and we need to cancel the insertion, + // despite having obtained a value_type to put into the hashtable. We have mitigated this problem somewhat by providing + // specializations of the insert function for const value_type& and value_type&&, and so the only time this function + // should get called is when args refers to arguments to construct a value_type. + + node_type* const pNodeNew = DoAllocateNode(eastl::forward(args)...); + const key_type& k = mExtractKey(pNodeNew->mValue); + const hash_code_t c = get_hash_code(k); + size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + + if(pNode == NULL) // If value is not present... add it. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(bRehash.first) + { + n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second); + DoRehash(bRehash.second); + } + + EASTL_ASSERT((uintptr_t)mpBucketArray != (uintptr_t)&gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + ++mnElementCount; + + return eastl::pair(iterator(pNodeNew, mpBucketArray + n), true); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNodeNew); + throw; + } + #endif + } + else + { + // To do: We have an inefficiency to deal with here. We allocated a node above but we are freeing it here because + // it turned out it wasn't needed. But we needed to create the node in order to get the hashtable key for + // the node. One possible resolution is to create specializations: DoInsertValue(true_type, value_type&&) and + // DoInsertValue(true_type, const value_type&) which don't need to create a node up front in order to get the + // hashtable key. Probably most users would end up using these pathways instead of this Args... pathway. + // While we should considering handling this to-do item, a lot of the performance limitations of maps and sets + // in practice is with finding elements rather than adding (potentially redundant) new elements. + DoFreeNode(pNodeNew); + } + + return eastl::pair(iterator(pNode, mpBucketArray + n), false); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValue(BoolConstantT, Args&&... args) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); + + node_type* pNodeNew = DoAllocateNode(eastl::forward(args)...); + const key_type& k = mExtractKey(pNodeNew->mValue); + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + template + typename hashtable::node_type* + hashtable::DoAllocateNode(Args&&... args) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(eastl::forward(args)...); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + //////////////////////////////////////////////////////////////////////////////////////////////////// + // Note: The following insertion-related functions are nearly copies of the above three functions, + // but are for value_type&& and const value_type& arguments. It's useful for us to have the functions + // below, even when using a fully compliant C++11 compiler that supports the above functions. + // The reason is because the specializations below are slightly more efficient because they can delay + // the creation of a node until it's known that it will be needed. + //////////////////////////////////////////////////////////////////////////////////////////////////// + + template + template + eastl::pair::iterator, bool> + hashtable::DoInsertValueExtra(BoolConstantT, const key_type& k, + hash_code_t c, node_type* pNodeNew, value_type&& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true. + { + // Adds the value to the hash table if not already present. + // If already present then the existing value is returned via an iterator/bool pair. + size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + + if(pNode == NULL) // If value is not present... add it. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + // Allocate the new node before doing the rehash so that we don't + // do a rehash if the allocation throws. + #if EASTL_EXCEPTIONS_ENABLED + bool nodeAllocated; // If exceptions are enabled then we we need to track if we allocated the node so we can free it in the catch block. + #endif + + if(pNodeNew) + { + ::new(eastl::addressof(pNodeNew->mValue)) value_type(eastl::move(value)); // It's expected that pNodeNew was allocated with allocate_uninitialized_node. + #if EASTL_EXCEPTIONS_ENABLED + nodeAllocated = false; + #endif + } + else + { + pNodeNew = DoAllocateNode(eastl::move(value)); + #if EASTL_EXCEPTIONS_ENABLED + nodeAllocated = true; + #endif + } + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(bRehash.first) + { + n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second); + DoRehash(bRehash.second); + } + + EASTL_ASSERT((uintptr_t)mpBucketArray != (uintptr_t)&gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + ++mnElementCount; + + return eastl::pair(iterator(pNodeNew, mpBucketArray + n), true); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + if(nodeAllocated) // If we allocated the node within this function, free it. Else let the caller retain ownership of it. + DoFreeNode(pNodeNew); + throw; + } + #endif + } + // Else the value is already present, so don't add a new node. And don't free pNodeNew. + + return eastl::pair(iterator(pNode, mpBucketArray + n), false); + } + + + template + template + eastl::pair::iterator, bool> + hashtable::DoInsertValue(BoolConstantT, value_type&& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true. + { + const key_type& k = mExtractKey(value); + const hash_code_t c = get_hash_code(k); + + return DoInsertValueExtra(true_type(), k, c, NULL, eastl::move(value)); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, value_type&& value, + DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); // Note: We don't need to wrap this call with try/catch because there's nothing we would need to do in the catch. + + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + if(pNodeNew) + ::new(eastl::addressof(pNodeNew->mValue)) value_type(eastl::move(value)); // It's expected that pNodeNew was allocated with allocate_uninitialized_node. + else + pNodeNew = DoAllocateNode(eastl::move(value)); + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValue(BoolConstantT, value_type&& value, DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false. + { + const key_type& k = mExtractKey(value); + const hash_code_t c = get_hash_code(k); + + return DoInsertValueExtra(false_type(), k, c, NULL, eastl::move(value)); + } + + + template + typename hashtable::node_type* + hashtable::DoAllocateNode(value_type&& value) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(eastl::move(value)); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + template + template + eastl::pair::iterator, bool> + hashtable::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, const value_type& value, + ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true. + { + // Adds the value to the hash table if not already present. + // If already present then the existing value is returned via an iterator/bool pair. + size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + node_type* const pNode = DoFindNode(mpBucketArray[n], k, c); + + if(pNode == NULL) // If value is not present... add it. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + // Allocate the new node before doing the rehash so that we don't + // do a rehash if the allocation throws. + #if EASTL_EXCEPTIONS_ENABLED + bool nodeAllocated; // If exceptions are enabled then we we need to track if we allocated the node so we can free it in the catch block. + #endif + + if(pNodeNew) + { + ::new(eastl::addressof(pNodeNew->mValue)) value_type(value); // It's expected that pNodeNew was allocated with allocate_uninitialized_node. + #if EASTL_EXCEPTIONS_ENABLED + nodeAllocated = false; + #endif + } + else + { + pNodeNew = DoAllocateNode(value); + #if EASTL_EXCEPTIONS_ENABLED + nodeAllocated = true; + #endif + } + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(bRehash.first) + { + n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second); + DoRehash(bRehash.second); + } + + EASTL_ASSERT((uintptr_t)mpBucketArray != (uintptr_t)&gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + ++mnElementCount; + + return eastl::pair(iterator(pNodeNew, mpBucketArray + n), true); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + if(nodeAllocated) // If we allocated the node within this function, free it. Else let the caller retain ownership of it. + DoFreeNode(pNodeNew); + throw; + } + #endif + } + // Else the value is already present, so don't add a new node. And don't free pNodeNew. + + return eastl::pair(iterator(pNode, mpBucketArray + n), false); + } + + + template + template + eastl::pair::iterator, bool> + hashtable::DoInsertValue(BoolConstantT, const value_type& value, ENABLE_IF_TRUETYPE(BoolConstantT)) // true_type means bUniqueKeys is true. + { + const key_type& k = mExtractKey(value); + const hash_code_t c = get_hash_code(k); + + return DoInsertValueExtra(true_type(), k, c, NULL, value); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValueExtra(BoolConstantT, const key_type& k, hash_code_t c, node_type* pNodeNew, const value_type& value, + DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); // Note: We don't need to wrap this call with try/catch because there's nothing we would need to do in the catch. + + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + + if(pNodeNew) + ::new(eastl::addressof(pNodeNew->mValue)) value_type(value); // It's expected that pNodeNew was allocated with allocate_uninitialized_node. + else + pNodeNew = DoAllocateNode(value); + + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + template + typename hashtable::iterator + hashtable::DoInsertValue(BoolConstantT, const value_type& value, DISABLE_IF_TRUETYPE(BoolConstantT)) // false_type means bUniqueKeys is false. + { + const key_type& k = mExtractKey(value); + const hash_code_t c = get_hash_code(k); + + return DoInsertValueExtra(false_type(), k, c, NULL, value); + } + + + template + typename hashtable::node_type* + hashtable::DoAllocateNode(const value_type& value) + { + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(value); + pNode->mpNext = NULL; + return pNode; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + EASTLFree(mAllocator, pNode, sizeof(node_type)); + throw; + } + #endif + } + + + template + typename hashtable::node_type* + hashtable::allocate_uninitialized_node() + { + // We don't wrap this in try/catch because users of this function are expected to do that themselves as needed. + node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + // Leave pNode->mValue uninitialized. + pNode->mpNext = NULL; + return pNode; + } + + + template + void hashtable::free_uninitialized_node(node_type* pNode) + { + // pNode->mValue is expected to be uninitialized. + EASTLFree(mAllocator, pNode, sizeof(node_type)); + } + + + template + eastl::pair::iterator, bool> + hashtable::DoInsertKey(true_type, const key_type& key, const hash_code_t c) // true_type means bUniqueKeys is true. + { + size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount); + node_type* const pNode = DoFindNode(mpBucketArray[n], key, c); + + if(pNode == NULL) + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + // Allocate the new node before doing the rehash so that we don't + // do a rehash if the allocation throws. + node_type* const pNodeNew = DoAllocateNodeFromKey(key); + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(bRehash.first) + { + n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second); + DoRehash(bRehash.second); + } + + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + ++mnElementCount; + + return eastl::pair(iterator(pNodeNew, mpBucketArray + n), true); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNodeNew); + throw; + } + #endif + } + + return eastl::pair(iterator(pNode, mpBucketArray + n), false); + } + + + + template + typename hashtable::iterator + hashtable::DoInsertKey(false_type, const key_type& key, const hash_code_t c) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); + + const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount); + + node_type* const pNodeNew = DoAllocateNodeFromKey(key); + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + eastl::pair::iterator, bool> + hashtable::DoInsertKey(true_type, key_type&& key, const hash_code_t c) // true_type means bUniqueKeys is true. + { + size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount); + node_type* const pNode = DoFindNode(mpBucketArray[n], key, c); + + if(pNode == NULL) + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + // Allocate the new node before doing the rehash so that we don't + // do a rehash if the allocation throws. + node_type* const pNodeNew = DoAllocateNodeFromKey(eastl::move(key)); + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + if(bRehash.first) + { + n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second); + DoRehash(bRehash.second); + } + + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + ++mnElementCount; + + return eastl::pair(iterator(pNodeNew, mpBucketArray + n), true); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNodeNew); + throw; + } + #endif + } + + return eastl::pair(iterator(pNode, mpBucketArray + n), false); + } + + + template + typename hashtable::iterator + hashtable::DoInsertKey(false_type, key_type&& key, const hash_code_t c) // false_type means bUniqueKeys is false. + { + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1); + + if(bRehash.first) + DoRehash(bRehash.second); + + const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount); + + node_type* const pNodeNew = DoAllocateNodeFromKey(eastl::move(key)); + set_code(pNodeNew, c); // This is a no-op for most hashtables. + + // To consider: Possibly make this insertion not make equal elements contiguous. + // As it stands now, we insert equal values contiguously in the hashtable. + // The benefit is that equal_range can work in a sensible manner and that + // erase(value) can more quickly find equal values. The downside is that + // this insertion operation taking some extra time. How important is it to + // us that equal_range span all equal items? + node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c); + + if(pNodePrev == NULL) + { + EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]); + pNodeNew->mpNext = mpBucketArray[n]; + mpBucketArray[n] = pNodeNew; + } + else + { + pNodeNew->mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = pNodeNew; + } + + ++mnElementCount; + + return iterator(pNodeNew, mpBucketArray + n); + } + + + template + template + typename hashtable::insert_return_type + hashtable::emplace(Args&&... args) + { + return DoInsertValue(has_unique_keys_type(), eastl::forward(args)...); // Need to use forward instead of move because Args&& is a "universal reference" instead of an rvalue reference. + } + + template + template + typename hashtable::iterator + hashtable::emplace_hint(const_iterator, Args&&... args) + { + // We currently ignore the iterator argument as a hint. + insert_return_type result = DoInsertValue(has_unique_keys_type(), eastl::forward(args)...); + return DoGetResultIterator(has_unique_keys_type(), result); + } + + template + template + // inline eastl::pair::iterator, bool> + inline typename hashtable::insert_return_type + hashtable::try_emplace(const key_type& key, Args&&... args) + { + return DoInsertValue(has_unique_keys_type(), piecewise_construct, forward_as_tuple(key), + forward_as_tuple(forward(args)...)); + } + + template + template + // inline eastl::pair::iterator, bool> + inline typename hashtable::insert_return_type + hashtable::try_emplace(key_type&& key, Args&&... args) + { + return DoInsertValue(has_unique_keys_type(), piecewise_construct, forward_as_tuple(eastl::move(key)), + forward_as_tuple(forward(args)...)); + } + + template + template + inline typename hashtable::iterator + hashtable::try_emplace(const_iterator, const key_type& key, Args&&... args) + { + insert_return_type result = DoInsertValue( + has_unique_keys_type(), + value_type(piecewise_construct, forward_as_tuple(key), forward_as_tuple(forward(args)...))); + + return DoGetResultIterator(has_unique_keys_type(), result); + } + + template + template + inline typename hashtable::iterator + hashtable::try_emplace(const_iterator, key_type&& key, Args&&... args) + { + insert_return_type result = + DoInsertValue(has_unique_keys_type(), value_type(piecewise_construct, forward_as_tuple(eastl::move(key)), + forward_as_tuple(forward(args)...))); + + return DoGetResultIterator(has_unique_keys_type(), result); + } + + template + typename hashtable::insert_return_type + hashtable::insert(value_type&& otherValue) + { + return DoInsertValue(has_unique_keys_type(), eastl::move(otherValue)); + } + + + template + template + typename hashtable::insert_return_type + hashtable::insert(hash_code_t c, node_type* pNodeNew, P&& otherValue) + { + // pNodeNew->mValue is expected to be uninitialized. + value_type value(eastl::forward

(otherValue)); // Need to use forward instead of move because P&& is a "universal reference" instead of an rvalue reference. + const key_type& k = mExtractKey(value); + return DoInsertValueExtra(has_unique_keys_type(), k, c, pNodeNew, eastl::move(value)); + } + + + template + typename hashtable::iterator + hashtable::insert(const_iterator, value_type&& value) + { + // We currently ignore the iterator argument as a hint. + insert_return_type result = DoInsertValue(has_unique_keys_type(), value_type(eastl::move(value))); + return DoGetResultIterator(has_unique_keys_type(), result); + } + + + template + typename hashtable::insert_return_type + hashtable::insert(const value_type& value) + { + return DoInsertValue(has_unique_keys_type(), value); + } + + + template + typename hashtable::insert_return_type + hashtable::insert(hash_code_t c, node_type* pNodeNew, const value_type& value) + { + // pNodeNew->mValue is expected to be uninitialized. + const key_type& k = mExtractKey(value); + return DoInsertValueExtra(has_unique_keys_type(), k, c, pNodeNew, value); + } + + + template + template + typename hashtable::insert_return_type + hashtable::insert(P&& otherValue) + { + return emplace(eastl::forward

(otherValue)); + } + + + template + typename hashtable::iterator + hashtable::insert(const_iterator, const value_type& value) + { + // We ignore the first argument (hint iterator). It's not likely to be useful for hashtable containers. + insert_return_type result = DoInsertValue(has_unique_keys_type(), value); + return result.first; // Note by Paul Pedriana while perusing this code: This code will fail to compile when bU is false (i.e. for multiset, multimap). + } + + + template + void hashtable::insert(std::initializer_list ilist) + { + insert(ilist.begin(), ilist.end()); + } + + + template + template + void + hashtable::insert(InputIterator first, InputIterator last) + { + const uint32_t nElementAdd = (uint32_t)eastl::ht_distance(first, last); + const eastl::pair bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, nElementAdd); + + if(bRehash.first) + DoRehash(bRehash.second); + + for(; first != last; ++first) + DoInsertValue(has_unique_keys_type(), *first); + } + + + template + template + eastl::pair::iterator, bool> + hashtable::insert_or_assign(const key_type& k, M&& obj) + { + auto iter = find(k); + if(iter == end()) + { + return insert(value_type(piecewise_construct, forward_as_tuple(k), forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return {iter, false}; + } + } + + template + template + eastl::pair::iterator, bool> + hashtable::insert_or_assign(key_type&& k, M&& obj) + { + auto iter = find(k); + if(iter == end()) + { + return insert(value_type(piecewise_construct, forward_as_tuple(eastl::move(k)), forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return {iter, false}; + } + } + + template + template + typename hashtable::iterator + hashtable::insert_or_assign(const_iterator, const key_type& k, M&& obj) + { + return insert_or_assign(k, eastl::forward(obj)).first; // we ignore the iterator hint + } + + template + template + typename hashtable::iterator + hashtable::insert_or_assign(const_iterator, key_type&& k, M&& obj) + { + return insert_or_assign(eastl::move(k), eastl::forward(obj)).first; // we ignore the iterator hint + } + + + template + typename hashtable::iterator + hashtable::erase(const_iterator i) + { + iterator iNext(i.mpNode, i.mpBucket); // Convert from const_iterator to iterator while constructing. + ++iNext; + + node_type* pNode = i.mpNode; + node_type* pNodeCurrent = *i.mpBucket; + + if(pNodeCurrent == pNode) + *i.mpBucket = pNodeCurrent->mpNext; + else + { + // We have a singly-linked list, so we have no choice but to + // walk down it till we find the node before the node at 'i'. + node_type* pNodeNext = pNodeCurrent->mpNext; + + while(pNodeNext != pNode) + { + pNodeCurrent = pNodeNext; + pNodeNext = pNodeCurrent->mpNext; + } + + pNodeCurrent->mpNext = pNodeNext->mpNext; + } + + DoFreeNode(pNode); + --mnElementCount; + + return iNext; + } + + + + template + inline typename hashtable::iterator + hashtable::erase(const_iterator first, const_iterator last) + { + while(first != last) + first = erase(first); + return iterator(first.mpNode, first.mpBucket); + } + + + + template + typename hashtable::size_type + hashtable::erase(const key_type& k) + { + // To do: Reimplement this function to do a single loop and not try to be + // smart about element contiguity. The mechanism here is only a benefit if the + // buckets are heavily overloaded; otherwise this mechanism may be slightly slower. + + const hash_code_t c = get_hash_code(k); + const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount); + const size_type nElementCountSaved = mnElementCount; + + node_type** pBucketArray = mpBucketArray + n; + + while(*pBucketArray && !compare(k, c, *pBucketArray)) + pBucketArray = &(*pBucketArray)->mpNext; + + while(*pBucketArray && compare(k, c, *pBucketArray)) + { + node_type* const pNode = *pBucketArray; + *pBucketArray = pNode->mpNext; + DoFreeNode(pNode); + --mnElementCount; + } + + return nElementCountSaved - mnElementCount; + } + + + + template + inline void hashtable::clear() + { + DoFreeNodes(mpBucketArray, mnBucketCount); + mnElementCount = 0; + } + + + + template + inline void hashtable::clear(bool clearBuckets) + { + DoFreeNodes(mpBucketArray, mnBucketCount); + if(clearBuckets) + { + DoFreeBuckets(mpBucketArray, mnBucketCount); + reset_lose_memory(); + } + mnElementCount = 0; + } + + + + template + inline void hashtable::reset_lose_memory() EA_NOEXCEPT + { + // The reset function is a special extension function which unilaterally + // resets the container to an empty state without freeing the memory of + // the contained objects. This is useful for very quickly tearing down a + // container built into scratch memory. + mnBucketCount = 1; + + #ifdef _MSC_VER + mpBucketArray = (node_type**)&gpEmptyBucketArray[0]; + #else + void* p = &gpEmptyBucketArray[0]; + memcpy(&mpBucketArray, &p, sizeof(mpBucketArray)); // Other compilers implement strict aliasing and casting is thus unsafe. + #endif + + mnElementCount = 0; + mRehashPolicy.mnNextResize = 0; + } + + + template + inline void hashtable::reserve(size_type nElementCount) + { + rehash(mRehashPolicy.GetBucketCount(uint32_t(nElementCount))); + } + + + + template + inline void hashtable::rehash(size_type nBucketCount) + { + // Note that we unilaterally use the passed in bucket count; we do not attempt migrate it + // up to the next prime number. We leave it at the user's discretion to do such a thing. + DoRehash(nBucketCount); + } + + + + template + void hashtable::DoRehash(size_type nNewBucketCount) + { + node_type** const pBucketArray = DoAllocateBuckets(nNewBucketCount); // nNewBucketCount should always be >= 2. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + node_type* pNode; + + for(size_type i = 0; i < mnBucketCount; ++i) + { + while((pNode = mpBucketArray[i]) != NULL) // Using '!=' disables compiler warnings. + { + const size_type nNewBucketIndex = (size_type)bucket_index(pNode, (uint32_t)nNewBucketCount); + + mpBucketArray[i] = pNode->mpNext; + pNode->mpNext = pBucketArray[nNewBucketIndex]; + pBucketArray[nNewBucketIndex] = pNode; + } + } + + DoFreeBuckets(mpBucketArray, mnBucketCount); + mnBucketCount = nNewBucketCount; + mpBucketArray = pBucketArray; + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + // A failure here means that a hash function threw an exception. + // We can't restore the previous state without calling the hash + // function again, so the only sensible recovery is to delete everything. + DoFreeNodes(pBucketArray, nNewBucketCount); + DoFreeBuckets(pBucketArray, nNewBucketCount); + DoFreeNodes(mpBucketArray, mnBucketCount); + mnElementCount = 0; + throw; + } + #endif + } + + + template + inline bool hashtable::validate() const + { + // Verify our empty bucket array is unmodified. + if(gpEmptyBucketArray[0] != NULL) + return false; + + if(gpEmptyBucketArray[1] != (void*)uintptr_t(~0)) + return false; + + // Verify that we have at least one bucket. Calculations can + // trigger division by zero exceptions otherwise. + if(mnBucketCount == 0) + return false; + + // Verify that gpEmptyBucketArray is used correctly. + // gpEmptyBucketArray is only used when initially empty. + if((void**)mpBucketArray == &gpEmptyBucketArray[0]) + { + if(mnElementCount) // gpEmptyBucketArray is used only for empty hash tables. + return false; + + if(mnBucketCount != 1) // gpEmptyBucketArray is used exactly an only for mnBucketCount == 1. + return false; + } + else + { + if(mnBucketCount < 2) // Small bucket counts *must* use gpEmptyBucketArray. + return false; + } + + // Verify that the element count matches mnElementCount. + size_type nElementCount = 0; + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + ++nElementCount; + + if(nElementCount != mnElementCount) + return false; + + // To do: Verify that individual elements are in the expected buckets. + + return true; + } + + + template + int hashtable::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + // operator==, != have been moved to the specific container subclasses (e.g. hash_map). + + // The following comparison operators are deprecated and will likely be removed in a + // future version of this package. + // + // Comparing hash tables for less-ness is an odd thing to do. We provide it for + // completeness, though the user is advised to be wary of how they use this. + // + template + inline bool operator<(const hashtable& a, + const hashtable& b) + { + // This requires hash table elements to support operator<. Since the hash table + // doesn't compare elements via less (it does so via equals), we must use the + // globally defined operator less for the elements. + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + inline bool operator>(const hashtable& a, + const hashtable& b) + { + return b < a; + } + + + template + inline bool operator<=(const hashtable& a, + const hashtable& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const hashtable& a, + const hashtable& b) + { + return !(a < b); + } + + + template + inline void swap(const hashtable& a, + const hashtable& b) + { + a.swap(b); + } + + +} // namespace eastl + + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + + + + + diff --git a/include/EASTL/internal/in_place_t.h b/include/EASTL/internal/in_place_t.h new file mode 100644 index 0000000..79acd18 --- /dev/null +++ b/include/EASTL/internal/in_place_t.h @@ -0,0 +1,82 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_IN_PLACE_T_H +#define EASTL_INTERNAL_IN_PLACE_T_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +namespace eastl +{ + namespace Internal + { + struct in_place_tag {}; + template struct in_place_type_tag {}; + template struct in_place_index_tag {}; + } + + /////////////////////////////////////////////////////////////////////////////// + /// in_place_tag + /// + /// http://en.cppreference.com/w/cpp/utility/in_place_tag + /// + struct in_place_tag + { + in_place_tag() = delete; + + private: + explicit in_place_tag(Internal::in_place_tag) {} + friend inline in_place_tag Internal_ConstructInPlaceTag(); + }; + + // internal factory function for in_place_tag + inline in_place_tag Internal_ConstructInPlaceTag() { return in_place_tag(Internal::in_place_tag{}); } + + + /////////////////////////////////////////////////////////////////////////////// + /// in_place_t / in_place_type_t / in_place_index_t + /// + /// used to disambiguate overloads that take arguments (possibly a parameter + /// pack) for in-place construction of some value. + /// + /// http://en.cppreference.com/w/cpp/utility/optional/in_place_t + /// + using in_place_t = in_place_tag(&)(Internal::in_place_tag); + + template + using in_place_type_t = in_place_tag(&)(Internal::in_place_type_tag); + + template + using in_place_index_t = in_place_tag(&)(Internal::in_place_index_tag); + + + /////////////////////////////////////////////////////////////////////////////// + /// in_place / in_place / in_place + /// + /// http://en.cppreference.com/w/cpp/utility/in_place + /// + inline in_place_tag in_place(Internal::in_place_tag) { return Internal_ConstructInPlaceTag(); } + + template + inline in_place_tag in_place(Internal::in_place_type_tag) { return Internal_ConstructInPlaceTag(); } + + template + inline in_place_tag in_place(Internal::in_place_index_tag) { return Internal_ConstructInPlaceTag(); } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/include/EASTL/internal/integer_sequence.h b/include/EASTL/internal/integer_sequence.h new file mode 100644 index 0000000..88cf1b1 --- /dev/null +++ b/include/EASTL/internal/integer_sequence.h @@ -0,0 +1,74 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_INTEGER_SEQUENCE_H +#define EASTL_INTEGER_SEQUENCE_H + +#include +#include +#include + +namespace eastl +{ + +#if EASTL_VARIADIC_TEMPLATES_ENABLED && !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + +// integer_sequence +template +class integer_sequence +{ +public: + typedef T value_type; + static_assert(is_integral::value, "eastl::integer_sequence can only be instantiated with an integral type"); + static EA_CONSTEXPR size_t size() EA_NOEXCEPT { return sizeof...(Ints); } +}; + +template +struct make_index_sequence_impl; + +template +struct make_index_sequence_impl> +{ + typedef typename make_index_sequence_impl>::type type; +}; + +template +struct make_index_sequence_impl<0, integer_sequence> +{ + typedef integer_sequence type; +}; + +template +using index_sequence = integer_sequence; + +template +using make_index_sequence = typename make_index_sequence_impl>::type; + +template +struct integer_sequence_convert_impl; + +template +struct integer_sequence_convert_impl> +{ + typedef integer_sequence type; +}; + +template +struct make_integer_sequence_impl +{ + typedef typename integer_sequence_convert_impl>::type type; +}; + +template +using make_integer_sequence = typename make_integer_sequence_impl::type; + +// Helper alias template that converts any type parameter pack into an index sequence of the same length +template +using index_sequence_for = make_index_sequence; + +#endif // EASTL_VARIADIC_TEMPLATES_ENABLED + +} // namespace eastl + +#endif // EASTL_INTEGER_SEQUENCE_H diff --git a/include/EASTL/internal/intrusive_hashtable.h b/include/EASTL/internal/intrusive_hashtable.h new file mode 100644 index 0000000..269a672 --- /dev/null +++ b/include/EASTL/internal/intrusive_hashtable.h @@ -0,0 +1,1005 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements an intrusive hash table, which is a hash table whereby +// the container nodes are the hash table objects themselves. This has benefits +// primarily in terms of memory management. There are some minor limitations +// that result from this. +// +/////////////////////////////////////////////////////////////////////////////// + + + +#ifndef EASTL_INTERNAL_INTRUSIVE_HASHTABLE_H +#define EASTL_INTERNAL_INTRUSIVE_HASHTABLE_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) + #include + #include + #include + #pragma warning(pop) +#else + #include + #include + #include +#endif + + + +namespace eastl +{ + + /// intrusive_hash_node + /// + /// A hash_node stores an element in a hash table, much like a + /// linked list node stores an element in a linked list. + /// An intrusive_hash_node additionally can, via template parameter, + /// store a hash code in the node to speed up hash calculations + /// and comparisons in some cases. + /// + /// To consider: Make a version of intrusive_hash_node which is + /// templated on the container type. This would allow for the + /// mpNext pointer to be the container itself and thus allow + /// for easier debugging. + /// + /// Example usage: + /// struct Widget : public intrusive_hash_node{ ... }; + /// + /// struct Dagget : public intrusive_hash_node_key{ ... }; + /// + struct intrusive_hash_node + { + intrusive_hash_node* mpNext; + }; + + + template + struct intrusive_hash_node_key : public intrusive_hash_node + { + typedef Key key_type; + Key mKey; + }; + + + + /// intrusive_node_iterator + /// + /// Node iterators iterate nodes within a given bucket. + /// + /// The bConst parameter defines if the iterator is a const_iterator + /// or an iterator. + /// + template + struct intrusive_node_iterator + { + public: + typedef intrusive_node_iterator this_type; + typedef Value value_type; + typedef Value node_type; + typedef ptrdiff_t difference_type; + typedef typename type_select::type pointer; + typedef typename type_select::type reference; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + node_type* mpNode; + + public: + intrusive_node_iterator() + : mpNode(NULL) { } + + explicit intrusive_node_iterator(value_type* pNode) + : mpNode(pNode) { } + + intrusive_node_iterator(const intrusive_node_iterator& x) + : mpNode(x.mpNode) { } + + reference operator*() const + { return *mpNode; } + + pointer operator->() const + { return mpNode; } + + this_type& operator++() + { mpNode = static_cast(mpNode->mpNext); return *this; } + + this_type operator++(int) + { this_type temp(*this); mpNode = static_cast(mpNode->mpNext); return temp; } + + }; // intrusive_node_iterator + + + + + /// intrusive_hashtable_iterator_base + /// + /// An intrusive_hashtable_iterator_base iterates the entire hash table and + /// not just nodes within a single bucket. Users in general will use a hash + /// table iterator much more often, as it is much like other container + /// iterators (e.g. vector::iterator). + /// + /// We define a base class here because it is shared by both const and + /// non-const iterators. + /// + template + struct intrusive_hashtable_iterator_base + { + public: + typedef Value value_type; + + protected: + template + friend class intrusive_hashtable; + + template + friend struct intrusive_hashtable_iterator; + + template + friend bool operator==(const intrusive_hashtable_iterator_base&, const intrusive_hashtable_iterator_base&); + + template + friend bool operator!=(const intrusive_hashtable_iterator_base&, const intrusive_hashtable_iterator_base&); + + value_type* mpNode; // Current node within current bucket. + value_type** mpBucket; // Current bucket. + + public: + intrusive_hashtable_iterator_base(value_type* pNode, value_type** pBucket) + : mpNode(pNode), mpBucket(pBucket) { } + + void increment_bucket() + { + ++mpBucket; + while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end + ++mpBucket; // of the bucket array so that finding the end of the bucket + mpNode = *mpBucket; // array is quick and simple. + } + + void increment() + { + mpNode = static_cast(mpNode->mpNext); + + while(mpNode == NULL) + mpNode = *++mpBucket; + } + + }; // intrusive_hashtable_iterator_base + + + + + /// intrusive_hashtable_iterator + /// + /// An intrusive_hashtable_iterator iterates the entire hash table and not + /// just nodes within a single bucket. Users in general will use a hash + /// table iterator much more often, as it is much like other container + /// iterators (e.g. vector::iterator). + /// + /// The bConst parameter defines if the iterator is a const_iterator + /// or an iterator. + /// + template + struct intrusive_hashtable_iterator : public intrusive_hashtable_iterator_base + { + public: + typedef intrusive_hashtable_iterator_base base_type; + typedef intrusive_hashtable_iterator this_type; + typedef intrusive_hashtable_iterator this_type_non_const; + typedef typename base_type::value_type value_type; + typedef typename type_select::type pointer; + typedef typename type_select::type reference; + typedef ptrdiff_t difference_type; + typedef EASTL_ITC_NS::forward_iterator_tag iterator_category; + + public: + intrusive_hashtable_iterator() + : base_type(NULL, NULL) { } + + explicit intrusive_hashtable_iterator(value_type* pNode, value_type** pBucket) + : base_type(pNode, pBucket) { } + + explicit intrusive_hashtable_iterator(value_type** pBucket) + : base_type(*pBucket, pBucket) { } + + intrusive_hashtable_iterator(const this_type_non_const& x) + : base_type(x.mpNode, x.mpBucket) { } + + reference operator*() const + { return *base_type::mpNode; } + + pointer operator->() const + { return base_type::mpNode; } + + this_type& operator++() + { base_type::increment(); return *this; } + + this_type operator++(int) + { this_type temp(*this); base_type::increment(); return temp; } + + }; // intrusive_hashtable_iterator + + + + /// use_intrusive_key + /// + /// operator()(x) returns x.mKey. Used in maps, as opposed to sets. + /// This is a template policy implementation; it is an alternative to + /// the use_self template implementation, which is used for sets. + /// + template + struct use_intrusive_key // : public unary_function // Perhaps we want to make it a subclass of unary_function. + { + typedef Key result_type; + + const result_type& operator()(const Node& x) const + { return x.mKey; } + }; + + + + /////////////////////////////////////////////////////////////////////////// + /// intrusive_hashtable + /// + template + class intrusive_hashtable + { + public: + typedef intrusive_hashtable this_type; + typedef Key key_type; + typedef Value value_type; + typedef Value mapped_type; + typedef Value node_type; + typedef uint32_t hash_code_t; + typedef Equal key_equal; + typedef ptrdiff_t difference_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef value_type& reference; + typedef const value_type& const_reference; + typedef intrusive_node_iterator local_iterator; + typedef intrusive_node_iterator const_local_iterator; + typedef intrusive_hashtable_iterator iterator; + typedef intrusive_hashtable_iterator const_iterator; + typedef typename type_select, iterator>::type insert_return_type; + typedef typename type_select, + eastl::use_intrusive_key >::type extract_key; + + enum + { + kBucketCount = bucketCount + }; + + protected: + node_type* mBucketArray[kBucketCount + 1]; // '+1' because we have an end bucket which is non-NULL so iterators always stop on it. + size_type mnElementCount; + Hash mHash; // To do: Use base class optimization to make this go away when it is of zero size. + Equal mEqual; // To do: Use base class optimization to make this go away when it is of zero size. + + public: + intrusive_hashtable(const Hash&, const Equal&); + + void swap(this_type& x); + + iterator begin() EA_NOEXCEPT + { + iterator i(mBucketArray); + if(!i.mpNode) + i.increment_bucket(); + return i; + } + + const_iterator begin() const EA_NOEXCEPT + { + const_iterator i(const_cast(mBucketArray)); + if(!i.mpNode) + i.increment_bucket(); + return i; + } + + const_iterator cbegin() const EA_NOEXCEPT + { + return begin(); + } + + iterator end() EA_NOEXCEPT + { return iterator(mBucketArray + kBucketCount); } + + const_iterator end() const EA_NOEXCEPT + { return const_iterator(const_cast(mBucketArray) + kBucketCount); } + + const_iterator cend() const EA_NOEXCEPT + { return const_iterator(const_cast(mBucketArray) + kBucketCount); } + + local_iterator begin(size_type n) EA_NOEXCEPT + { return local_iterator(mBucketArray[n]); } + + const_local_iterator begin(size_type n) const EA_NOEXCEPT + { return const_local_iterator(mBucketArray[n]); } + + const_local_iterator cbegin(size_type n) const EA_NOEXCEPT + { return const_local_iterator(mBucketArray[n]); } + + local_iterator end(size_type) EA_NOEXCEPT + { return local_iterator(NULL); } + + const_local_iterator end(size_type) const EA_NOEXCEPT + { return const_local_iterator(NULL); } + + const_local_iterator cend(size_type) const EA_NOEXCEPT + { return const_local_iterator(NULL); } + + size_type size() const EA_NOEXCEPT + { return mnElementCount; } + + bool empty() const EA_NOEXCEPT + { return mnElementCount == 0; } + + size_type bucket_count() const EA_NOEXCEPT // This function is unnecessary, as the user can directly reference + { return kBucketCount; } // intrusive_hashtable::kBucketCount as a constant. + + size_type bucket_size(size_type n) const EA_NOEXCEPT + { return (size_type)eastl::distance(begin(n), end(n)); } + + size_type bucket(const key_type& k) const EA_NOEXCEPT + { return (size_type)(mHash(k) % kBucketCount); } + + public: + float load_factor() const EA_NOEXCEPT + { return (float)mnElementCount / (float)kBucketCount; } + + public: + insert_return_type insert(value_type& value) + { return DoInsertValue(value, integral_constant()); } + + insert_return_type insert(const_iterator, value_type& value) + { return insert(value); } // To consider: We might be able to use the iterator argument to specify a specific insertion location. + + template + void insert(InputIterator first, InputIterator last); + + public: + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + size_type erase(const key_type& k); + iterator remove(value_type& value); // Removes by value instead of by iterator. This is an O(1) operation, due to this hashtable being 'intrusive'. + + void clear(); + + public: + iterator find(const key_type& k); + const_iterator find(const key_type& k) const; + + /// Implements a find whereby the user supplies a comparison of a different type + /// than the hashtable value_type. A useful case of this is one whereby you have + /// a container of string objects but want to do searches via passing in char pointers. + /// The problem is that without this kind of find, you need to do the expensive operation + /// of converting the char pointer to a string so it can be used as the argument to the + /// find function. + /// + /// Example usage: + /// hash_set hashSet; + /// hashSet.find_as("hello"); // Use default hash and compare. + /// + /// Example usage (namespaces omitted for brevity): + /// hash_set hashSet; + /// hashSet.find_as("hello", hash(), equal_to_2()); + /// + template + iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate); + + template + const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const; + + template + iterator find_as(const U& u); + + template + const_iterator find_as(const U& u) const; + + size_type count(const key_type& k) const; + + // The use for equal_range in a hash_table seems somewhat questionable. + // The primary reason for its existence is to replicate the interface of set/map. + eastl::pair equal_range(const key_type& k); + eastl::pair equal_range(const key_type& k) const; + + public: + bool validate() const; + int validate_iterator(const_iterator i) const; + + public: + Hash hash_function() const + { return mHash; } + + Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard + { return mEqual; } // has specified in its hashtable (unordered_*) proposal. + + const key_equal& key_eq() const + { return mEqual; } + + key_equal& key_eq() + { return mEqual; } + + protected: + eastl::pair DoInsertValue(value_type&, true_type); // true_type means bUniqueKeys is true. + iterator DoInsertValue(value_type&, false_type); // false_type means bUniqueKeys is false. + + node_type* DoFindNode(node_type* pNode, const key_type& k) const; + + template + node_type* DoFindNode(node_type* pNode, const U& u, BinaryPredicate predicate) const; + + }; // class intrusive_hashtable + + + + + + /////////////////////////////////////////////////////////////////////// + // node_iterator_base + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const intrusive_node_iterator& a, + const intrusive_node_iterator& b) + { return a.mpNode == b.mpNode; } + + template + inline bool operator!=(const intrusive_node_iterator& a, + const intrusive_node_iterator& b) + { return a.mpNode != b.mpNode; } + + + + + /////////////////////////////////////////////////////////////////////// + // hashtable_iterator_base + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const intrusive_hashtable_iterator_base& a, + const intrusive_hashtable_iterator_base& b) + { return a.mpNode == b.mpNode; } + + + template + inline bool operator!=(const intrusive_hashtable_iterator_base& a, + const intrusive_hashtable_iterator_base& b) + { return a.mpNode != b.mpNode; } + + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_hashtable + /////////////////////////////////////////////////////////////////////// + + template + inline intrusive_hashtable::intrusive_hashtable(const H& h, const Eq& eq) + : mnElementCount(0), + mHash(h), + mEqual(eq) + { + memset(mBucketArray, 0, kBucketCount * sizeof(mBucketArray[0])); + mBucketArray[kBucketCount] = reinterpret_cast((uintptr_t)~0); + } + + + template + void intrusive_hashtable::swap(this_type& x) + { + for(size_t i = 0; i < kBucketCount; i++) + eastl::swap(mBucketArray[i], x.mBucketArray[i]); + + eastl::swap(mnElementCount, x.mnElementCount); + eastl::swap(mHash, x.mHash); + eastl::swap(mEqual, x.mEqual); + } + + + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::find(const key_type& k) + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], k); + return pNode ? iterator(pNode, mBucketArray + n) : iterator(mBucketArray + kBucketCount); + } + + + template + inline typename intrusive_hashtable::const_iterator + intrusive_hashtable::find(const key_type& k) const + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], k); + return pNode ? const_iterator(pNode, const_cast(mBucketArray) + n) : const_iterator(const_cast(mBucketArray) + kBucketCount); + } + + + template + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::find_as(const U& other, UHash uhash, BinaryPredicate predicate) + { + const size_type n = (size_type)(uhash(other) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], other, predicate); + return pNode ? iterator(pNode, mBucketArray + n) : iterator(mBucketArray + kBucketCount); + } + + + template + template + inline typename intrusive_hashtable::const_iterator + intrusive_hashtable::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const + { + const size_type n = (size_type)(uhash(other) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], other, predicate); + return pNode ? const_iterator(pNode, const_cast(mBucketArray) + n) : const_iterator(const_cast(mBucketArray) + kBucketCount); + } + + + /// intrusive_hashtable_find + /// + /// Helper function that defaults to using hash and equal_to_2. + /// This makes it so that by default you don't need to provide these. + /// Note that the default hash functions may not be what you want, though. + /// + /// Example usage. Instead of this: + /// hash_set hashSet; + /// hashSet.find("hello", hash(), equal_to_2()); + /// + /// You can use this: + /// hash_set hashSet; + /// hashtable_find(hashSet, "hello"); + /// + template + inline typename H::iterator intrusive_hashtable_find(H& hashTable, const U& u) + { return hashTable.find_as(u, eastl::hash(), eastl::equal_to_2()); } + + template + inline typename H::const_iterator intrusive_hashtable_find(const H& hashTable, const U& u) + { return hashTable.find_as(u, eastl::hash(), eastl::equal_to_2()); } + + + + template + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::find_as(const U& other) + { return eastl::intrusive_hashtable_find(*this, other); } + // VC++ doesn't appear to like the following, though it seems correct to me. + // So we implement the workaround above until we can straighten this out. + //{ return find_as(other, eastl::hash(), eastl::equal_to_2()); } + + + template + template + inline typename intrusive_hashtable::const_iterator + intrusive_hashtable::find_as(const U& other) const + { return eastl::intrusive_hashtable_find(*this, other); } + // VC++ doesn't appear to like the following, though it seems correct to me. + // So we implement the workaround above until we can straighten this out. + //{ return find_as(other, eastl::hash(), eastl::equal_to_2()); } + + + template + typename intrusive_hashtable::size_type + intrusive_hashtable::count(const key_type& k) const + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + size_type result = 0; + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + // To do: Make a specialization for bU (unique keys) == true and take + // advantage of the fact that the count will always be zero or one in that case. + for(node_type* pNode = mBucketArray[n]; pNode; pNode = static_cast(pNode->mpNext)) + { + if(mEqual(k, extractKey(*pNode))) + ++result; + } + return result; + } + + + template + eastl::pair::iterator, + typename intrusive_hashtable::iterator> + intrusive_hashtable::equal_range(const key_type& k) + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + node_type** head = mBucketArray + n; + node_type* pNode = DoFindNode(*head, k); + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + if(pNode) + { + node_type* p1 = static_cast(pNode->mpNext); + + for(; p1; p1 = static_cast(p1->mpNext)) + { + if(!mEqual(k, extractKey(*p1))) + break; + } + + iterator first(pNode, head); + iterator last(p1, head); + + if(!p1) + last.increment_bucket(); + + return eastl::pair(first, last); + } + + return eastl::pair(iterator(mBucketArray + kBucketCount), + iterator(mBucketArray + kBucketCount)); + } + + + + + template + eastl::pair::const_iterator, + typename intrusive_hashtable::const_iterator> + intrusive_hashtable::equal_range(const key_type& k) const + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + node_type** head = const_cast(mBucketArray + n); + node_type* pNode = DoFindNode(*head, k); + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + if(pNode) + { + node_type* p1 = static_cast(pNode->mpNext); + + for(; p1; p1 = static_cast(p1->mpNext)) + { + if(!mEqual(k, extractKey(*p1))) + break; + } + + const_iterator first(pNode, head); + const_iterator last(p1, head); + + if(!p1) + last.increment_bucket(); + + return eastl::pair(first, last); + } + + return eastl::pair(const_iterator(const_cast(mBucketArray) + kBucketCount), + const_iterator(const_cast(mBucketArray) + kBucketCount)); + } + + + template + inline typename intrusive_hashtable::node_type* + intrusive_hashtable::DoFindNode(node_type* pNode, const key_type& k) const + { + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + for(; pNode; pNode = static_cast(pNode->mpNext)) + { + if(mEqual(k, extractKey(*pNode))) + return pNode; + } + return NULL; + } + + + template + template + inline typename intrusive_hashtable::node_type* + intrusive_hashtable::DoFindNode(node_type* pNode, const U& other, BinaryPredicate predicate) const + { + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + for(; pNode; pNode = static_cast(pNode->mpNext)) + { + if(predicate(extractKey(*pNode), other)) // Intentionally compare with key as first arg and other as second arg. + return pNode; + } + return NULL; + } + + + template + eastl::pair::iterator, bool> + intrusive_hashtable::DoInsertValue(value_type& value, true_type) // true_type means bUniqueKeys is true. + { + // For sets (as opposed to maps), one could argue that all insertions are successful, + // as all elements are unique. However, the equal function might not think so. + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount); + node_type* const pNode = DoFindNode(mBucketArray[n], extractKey(value)); + + if(pNode == NULL) + { + value.mpNext = mBucketArray[n]; + mBucketArray[n] = &value; + ++mnElementCount; + + return eastl::pair(iterator(&value, mBucketArray + n), true); + } + + return eastl::pair(iterator(pNode, mBucketArray + n), false); + } + + + template + typename intrusive_hashtable::iterator + intrusive_hashtable::DoInsertValue(value_type& value, false_type) // false_type means bUniqueKeys is false. + { + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount); + node_type* const pNodePrev = DoFindNode(mBucketArray[n], extractKey(value)); + + if(pNodePrev == NULL) + { + value.mpNext = mBucketArray[n]; + mBucketArray[n] = &value; + } + else + { + value.mpNext = pNodePrev->mpNext; + pNodePrev->mpNext = &value; + } + + ++mnElementCount; + + return iterator(&value, mBucketArray + n); + } + + + + template + template + inline void intrusive_hashtable::insert(InputIterator first, InputIterator last) + { + for(; first != last; ++first) + insert(*first); + } + + + template + typename intrusive_hashtable::iterator + intrusive_hashtable::erase(const_iterator i) + { + iterator iNext(i.mpNode, i.mpBucket); + ++iNext; + + node_type* pNode = i.mpNode; + node_type* pNodeCurrent = *i.mpBucket; + + if(pNodeCurrent == pNode) + *i.mpBucket = static_cast(pNodeCurrent->mpNext); + else + { + // We have a singly-linked list, so we have no choice but to + // walk down it till we find the node before the node at 'i'. + node_type* pNodeNext = static_cast(pNodeCurrent->mpNext); + + while(pNodeNext != pNode) + { + pNodeCurrent = pNodeNext; + pNodeNext = static_cast(pNodeCurrent->mpNext); + } + + pNodeCurrent->mpNext = static_cast(pNodeNext->mpNext); + } + + // To consider: In debug builds set the node mpNext to NULL. + --mnElementCount; + + return iNext; + } + + + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::erase(const_iterator first, const_iterator last) + { + while(first != last) + first = erase(first); + return iterator(first.mpNode, first.mpBucket); + } + + + template + typename intrusive_hashtable::size_type + intrusive_hashtable::erase(const key_type& k) + { + const size_type n = (size_type)(mHash(k) % kBucketCount); + const size_type nElementCountSaved = mnElementCount; + node_type*& pNodeBase = mBucketArray[n]; + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + + // Note by Paul Pedriana: + // We have two loops here, and I'm not finding any easy way to having just one + // loop without changing the requirements of the hashtable node definition. + // It's a problem of taking an address of a variable and converting it to the + // address of another type without knowing what that type is. Perhaps I'm a + // little overly tired, so if there is a simple solution I am probably missing it. + + while(pNodeBase && mEqual(k, extractKey(*pNodeBase))) + { + pNodeBase = static_cast(pNodeBase->mpNext); + --mnElementCount; + } + + node_type* pNodePrev = pNodeBase; + + if(pNodePrev) + { + node_type* pNodeCur; + + while((pNodeCur = static_cast(pNodePrev->mpNext)) != NULL) + { + if(mEqual(k, extractKey(*pNodeCur))) + { + pNodePrev->mpNext = static_cast(pNodeCur->mpNext); + --mnElementCount; // To consider: In debug builds set the node mpNext to NULL. + } + else + pNodePrev = static_cast(pNodePrev->mpNext); + } + } + + return nElementCountSaved - mnElementCount; + } + + + template + inline typename intrusive_hashtable::iterator + intrusive_hashtable::remove(value_type& value) + { + extract_key extractKey; // extract_key is empty and thus this ctor is a no-op. + const size_type n = (size_type)(mHash(extractKey(value)) % kBucketCount); + + return erase(iterator(&value, &mBucketArray[n])); + } + + + template + inline void intrusive_hashtable::clear() + { + // To consider: In debug builds set the node mpNext to NULL. + memset(mBucketArray, 0, kBucketCount * sizeof(mBucketArray[0])); + mnElementCount = 0; + } + + + template + inline bool intrusive_hashtable::validate() const + { + // Verify that the element count matches mnElementCount. + size_type nElementCount = 0; + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + ++nElementCount; + + if(nElementCount != mnElementCount) + return false; + + // To do: Verify that individual elements are in the expected buckets. + + return true; + } + + + template + int intrusive_hashtable::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()); + } + + + template + inline bool operator!=(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return !(a == b); + } + + + // Comparing hash tables for less-ness is an odd thing to do. We provide it for + // completeness, though the user is advised to be wary of how they use this. + template + inline bool operator<(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + // This requires hash table elements to support operator<. Since the hash table + // doesn't compare elements via less (it does so via equals), we must use the + // globally defined operator less for the elements. + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + inline bool operator>(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return b < a; + } + + + template + inline bool operator<=(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + return !(a < b); + } + + + template + inline void swap(const intrusive_hashtable& a, + const intrusive_hashtable& b) + { + a.swap(b); + } + + +} // namespace eastl + + + +#endif // Header include guard + + + + + + + + + diff --git a/include/EASTL/internal/mem_fn.h b/include/EASTL/internal/mem_fn.h new file mode 100644 index 0000000..1d3e7b3 --- /dev/null +++ b/include/EASTL/internal/mem_fn.h @@ -0,0 +1,304 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_MEM_FN_H +#define EASTL_INTERNAL_MEM_FN_H + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) +#pragma once +#endif + +//////////////////////////////////////////////////////////////////////////////// +// The code in this file is a modification of the libcxx implementation. We copy +// the license information here as required. +// +// We implement only enough of mem_fn to implement eastl::function. +//////////////////////////////////////////////////////////////////////////////// + +//===------------------------ functional ----------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + +namespace eastl +{ + // + // apply_cv + // + template ::type>::value, + bool = is_volatile::type>::value> + struct apply_cv { typedef U type; }; + + template struct apply_cv { typedef const U type; }; + template struct apply_cv { typedef volatile U type; }; + template struct apply_cv { typedef const volatile U type; }; + template struct apply_cv { typedef U& type; }; + template struct apply_cv { typedef const U& type; }; + template struct apply_cv { typedef volatile U& type; }; + template struct apply_cv { typedef const volatile U& type; }; + + + + // + // has_result_type + // + template + struct has_result_type + { + private: + template + static eastl::no_type test(...); + + template + static eastl::yes_type test(typename U::result_type* = 0); + + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + + + + // + // derives_from_unary_function + // derives_from_binary_function + // + template + struct derives_from_unary_function + { + private: + static eastl::no_type test(...); + + template + static unary_function test(const volatile unary_function*); + + public: + static const bool value = !is_same::value; + typedef decltype(test((T*)0)) type; + }; + + template + struct derives_from_binary_function + { + private: + static eastl::no_type test(...); + template + static binary_function test(const volatile binary_function*); + + public: + static const bool value = !is_same::value; + typedef decltype(test((T*)0)) type; + }; + + + + // + // maybe_derives_from_unary_function + // maybe_derives_from_binary_function + // + template ::value> + struct maybe_derive_from_unary_function // bool is true + : public derives_from_unary_function::type { }; + + template + struct maybe_derive_from_unary_function { }; + + template ::value> + struct maybe_derive_from_binary_function // bool is true + : public derives_from_binary_function::type { }; + + template + struct maybe_derive_from_binary_function { }; + + + + // + // weak_result_type_imp + // + template ::value> + struct weak_result_type_imp // bool is true + : public maybe_derive_from_unary_function, + public maybe_derive_from_binary_function + { + typedef typename T::result_type result_type; + }; + + template + struct weak_result_type_imp : public maybe_derive_from_unary_function, + public maybe_derive_from_binary_function { }; + + + + // + // weak_result_type + // + template + struct weak_result_type : public weak_result_type_imp { }; + + // 0 argument case + template struct weak_result_type { typedef R result_type; }; + template struct weak_result_type { typedef R result_type; }; + template struct weak_result_type { typedef R result_type; }; + + // 1 argument case + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + template struct weak_result_type : public unary_function { }; + + // 2 argument case + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + template struct weak_result_type : public binary_function { }; + + // 3 or more arguments +#if EASTL_VARIADIC_TEMPLATES_ENABLED + template struct weak_result_type { typedef R result_type; }; + template struct weak_result_type { typedef R result_type; }; + template struct weak_result_type { typedef R result_type; }; + template struct weak_result_type { typedef R result_type; }; + template struct weak_result_type { typedef R result_type; }; + template struct weak_result_type { typedef R result_type; }; + template struct weak_result_type { typedef R result_type; }; +#endif + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // mem_fn_impl + // + template + class mem_fn_impl +#if defined(_MSC_VER) && (_MSC_VER >= 1900) // VS2015 or later + // Due to a (seemingly random) internal compiler error on VS2013 we disable eastl::unary_function and + // binary_function support for eastl::mem_fn as its not widely (if at all) used. If you require this support + // on VS2013 or below please contact us. + : public weak_result_type +#endif + { + public: + typedef T type; + + private: + type func; + + public: + EASTL_FORCE_INLINE mem_fn_impl(type _func) : func(_func) {} + +#if EASTL_VARIADIC_TEMPLATES_ENABLED + template + typename invoke_result::type operator()(ArgTypes&&... args) const + { + return invoke(func, eastl::forward(args)...); + } +#else + typename invoke_result::type operator()() const { return invoke_impl(func); } + + template + typename invoke_result0::type operator()(A0& a0) const + { + return invoke(func, a0); + } + + template + typename invoke_result1::type operator()(A0& a0, A1& a1) const + { + return invoke(func, a0, a1); + } + + template + typename invoke_result2::type operator()(A0& a0, A1& a1, A2& a2) const + { + return invoke(func, a0, a1, a2); + } +#endif + }; // mem_fn_impl + + + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // mem_fn -> mem_fn_impl adapters + // + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R T::*pm) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)()) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0)) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1)) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1, A2)) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)() const) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0) const) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1) const) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1, A2) const) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)() volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0) volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1) volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1, A2) volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)() const volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0) const volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1) const volatile) + { return mem_fn_impl(pm); } + + template + EASTL_FORCE_INLINE mem_fn_impl mem_fn(R (T::*pm)(A0, A1, A2) const volatile) + { return mem_fn_impl(pm); } + +} // namespace eastl + +#endif // EASTL_INTERNAL_MEM_FN_H diff --git a/include/EASTL/internal/memory_base.h b/include/EASTL/internal/memory_base.h new file mode 100644 index 0000000..b1c3490 --- /dev/null +++ b/include/EASTL/internal/memory_base.h @@ -0,0 +1,37 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_INTERNAL_MEMORY_BASE_H +#define EASTL_INTERNAL_MEMORY_BASE_H + +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +//////////////////////////////////////////////////////////////////////////////////////////// +// This file contains basic functionality found in the standard library 'memory' header that +// have limited or no dependencies. This allows us to utilize these utilize these functions +// in other EASTL code while avoid circular dependencies. +//////////////////////////////////////////////////////////////////////////////////////////// + +namespace eastl +{ + /// addressof + /// + /// From the C++11 Standard, section 20.6.12.1 + /// Returns the actual address of the object or function referenced by r, even in the presence of an overloaded operator&. + /// + template + T* addressof(T& value) EA_NOEXCEPT + { + return reinterpret_cast(&const_cast(reinterpret_cast(value))); + } + +} // namespace eastl + +#endif // EASTL_INTERNAL_MEMORY_BASE_H + diff --git a/include/EASTL/internal/move_help.h b/include/EASTL/internal/move_help.h new file mode 100644 index 0000000..44c4dec --- /dev/null +++ b/include/EASTL/internal/move_help.h @@ -0,0 +1,162 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_MOVE_HELP_H +#define EASTL_INTERNAL_MOVE_HELP_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include + + +// C++11's rvalue references aren't supported by earlier versions of C++. +// It turns out that in a number of cases under earlier C++ versions we can +// write code that uses rvalues similar to lvalues. We have macros below for +// such cases. For example, eastl::move (same as std::move) can be treated +// as a no-op under C++03, though with the consequence that move functionality +// isn't taken advantage of. + + +/// EASTL_MOVE +/// Acts like eastl::move when possible. Same as C++11 std::move. +/// +/// EASTL_MOVE_INLINE +/// Acts like eastl::move but is implemented inline instead of a function call. +/// This allows code to be faster in debug builds in particular. +/// Depends on C++ compiler decltype support or a similar extension. +/// +/// EASTL_FORWARD +/// Acts like eastl::forward when possible. Same as C++11 std::forward. +/// +/// EASTL_FORWARD_INLINE +/// Acts like eastl::forward but is implemented inline instead of a function call. +/// This allows code to be faster in debug builds in particular. +/// +#define EASTL_MOVE(x) eastl::move(x) +#if !defined(EA_COMPILER_NO_DECLTYPE) + #define EASTL_MOVE_INLINE(x) static_cast::type&&>(x) +#elif defined(__GNUC__) + #define EASTL_MOVE_INLINE(x) static_cast::type&&>(x) +#else + #define EASTL_MOVE_INLINE(x) eastl::move(x) +#endif + +#define EASTL_FORWARD(T, x) eastl::forward(x) +#define EASTL_FORWARD_INLINE(T, x) eastl::forward(x) // Need to investigate how to properly make a macro for this. (eastl::is_reference::value ? static_cast(static_cast(x)) : static_cast(x)) + + + + +/// EASTL_MOVE_RANGE +/// Acts like the eastl::move algorithm when possible. Same as C++11 std::move. +/// Note to be confused with the single argument move: (typename remove_reference::type&& move(T&& x)) +/// http://en.cppreference.com/w/cpp/algorithm/move +/// http://en.cppreference.com/w/cpp/algorithm/move_backward +/// +#define EASTL_MOVE_RANGE(first, last, result) eastl::move(first, last, result) +#define EASTL_MOVE_BACKWARD_RANGE(first, last, resultEnd) eastl::move_backward(first, last, resultEnd) + + +namespace eastl +{ + // forward + // + // forwards the argument to another function exactly as it was passed to the calling function. + // Not to be confused with move, this is specifically for echoing templated argument types + // to another function. move is specifically about making a type be an rvalue reference (i.e. movable) type. + // + // Example usage: + // template + // void WrapperFunction(T&& arg) + // { foo(eastl::forward(arg)); } + // + // template + // void WrapperFunction(Args&&... args) + // { foo(eastl::forward(args)...); } + // + // See the C++ Standard, section 20.2.3 + // http://en.cppreference.com/w/cpp/utility/forward + // + template + EA_CPP14_CONSTEXPR T&& forward(typename eastl::remove_reference::type& x) EA_NOEXCEPT + { + return static_cast(x); + } + + + template + EA_CPP14_CONSTEXPR T&& forward(typename eastl::remove_reference::type&& x) EA_NOEXCEPT + { + static_assert(!is_lvalue_reference::value, "forward T isn't lvalue reference"); + return static_cast(x); + } + + + // move + // + // move obtains an rvalue reference to its argument and converts it to an xvalue. + // Returns, by definition: static_cast::type&&>(t). + // The primary use of this is to pass a move'd type to a function which takes T&&, + // and thus select that function instead of (e.g.) a function which takes T or T&. + // See the C++ Standard, section 20.2.3 + // http://en.cppreference.com/w/cpp/utility/move + // + template + EA_CPP14_CONSTEXPR typename eastl::remove_reference::type&& + move(T&& x) EA_NOEXCEPT + { + return ((typename eastl::remove_reference::type&&)x); + } + + + // move_if_noexcept + // + // Returns T&& if move-constructing T throws no exceptions. Instead returns const T& if + // move-constructing T throws exceptions or has no accessible copy constructor. + // The purpose of this is to use automatically use copy construction instead of move + // construction when the move may possible throw an exception. + // See the C++ Standard, section 20.2.3 + // http://en.cppreference.com/w/cpp/utility/move_if_noexcept + // + #if EASTL_EXCEPTIONS_ENABLED + template + EA_CPP14_CONSTEXPR typename eastl::conditional::value && + eastl::is_copy_constructible::value, const T&, T&&>::type + move_if_noexcept(T& x) EA_NOEXCEPT + { + return eastl::move(x); + } + #else + template + EA_CPP14_CONSTEXPR T&& + move_if_noexcept(T& x) EA_NOEXCEPT + { + return eastl::move(x); + } + #endif + +} // namespace eastl + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/include/EASTL/internal/pair_fwd_decls.h b/include/EASTL/internal/pair_fwd_decls.h new file mode 100644 index 0000000..a716482 --- /dev/null +++ b/include/EASTL/internal/pair_fwd_decls.h @@ -0,0 +1,16 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_PAIR_FWD_DECLS_H +#define EASTL_PAIR_FWD_DECLS_H + +#include + +namespace eastl +{ + template + struct pair; +} + +#endif // EASTL_PAIR_FWD_DECLS_H diff --git a/include/EASTL/internal/piecewise_construct_t.h b/include/EASTL/internal/piecewise_construct_t.h new file mode 100644 index 0000000..d853f0e --- /dev/null +++ b/include/EASTL/internal/piecewise_construct_t.h @@ -0,0 +1,46 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_PIECEWISE_CONSTRUCT_T_H +#define EASTL_INTERNAL_PIECEWISE_CONSTRUCT_T_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////////////// + /// piecewise_construct_t + /// + /// http://en.cppreference.com/w/cpp/utility/piecewise_construct_t + /// + struct piecewise_construct_t + { + explicit piecewise_construct_t() = default; + }; + + + /////////////////////////////////////////////////////////////////////////////// + /// piecewise_construct + /// + /// A tag type used to disambiguate between function overloads that take two tuple arguments. + /// + /// http://en.cppreference.com/w/cpp/utility/piecewise_construct + /// + EA_CONSTEXPR piecewise_construct_t piecewise_construct = eastl::piecewise_construct_t(); + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/include/EASTL/internal/red_black_tree.h b/include/EASTL/internal/red_black_tree.h new file mode 100644 index 0000000..cc198fe --- /dev/null +++ b/include/EASTL/internal/red_black_tree.h @@ -0,0 +1,2352 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_RED_BLACK_TREE_H +#define EASTL_RED_BLACK_TREE_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable: 4512) // 'class' : assignment operator could not be generated + #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc + #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +#endif + + +namespace eastl +{ + + /// EASTL_RBTREE_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_RBTREE_DEFAULT_NAME + #define EASTL_RBTREE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " rbtree" // Unless the user overrides something, this is "EASTL rbtree". + #endif + + + /// EASTL_RBTREE_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_RBTREE_DEFAULT_ALLOCATOR + #define EASTL_RBTREE_DEFAULT_ALLOCATOR allocator_type(EASTL_RBTREE_DEFAULT_NAME) + #endif + + + /// EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + /// + #ifndef EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + #define EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR 0 + #endif + + + /// RBTreeColor + /// + enum RBTreeColor + { + kRBTreeColorRed, + kRBTreeColorBlack + }; + + + + /// RBTreeColor + /// + enum RBTreeSide + { + kRBTreeSideLeft, + kRBTreeSideRight + }; + + + + /// rbtree_node_base + /// + /// We define a rbtree_node_base separately from rbtree_node (below), because it + /// allows us to have non-templated operations, and it makes it so that the + /// rbtree anchor node doesn't carry a T with it, which would waste space and + /// possibly lead to surprising the user due to extra Ts existing that the user + /// didn't explicitly create. The downside to all of this is that it makes debug + /// viewing of an rbtree harder, given that the node pointers are of type + /// rbtree_node_base and not rbtree_node. + /// + struct rbtree_node_base + { + typedef rbtree_node_base this_type; + + public: + this_type* mpNodeRight; // Declared first because it is used most often. + this_type* mpNodeLeft; + this_type* mpNodeParent; + char mColor; // We only need one bit here, would be nice if we could stuff that bit somewhere else. + }; + + + /// rbtree_node + /// + template + struct rbtree_node : public rbtree_node_base + { + Value mValue; // For set and multiset, this is the user's value, for map and multimap, this is a pair of key/value. + + // This type is never constructed, so to avoid a MSVC warning we "delete" the copy constructor. + // + // Potentially we could provide a constructor that would satisfy the compiler and change the code to use this constructor + // instead of constructing mValue in place within an unconstructed rbtree_node. + #if defined(_MSC_VER) + rbtree_node(const rbtree_node&) = delete; + #endif + }; + + + + + // rbtree_node_base functions + // + // These are the fundamental functions that we use to maintain the + // tree. The bulk of the work of the tree maintenance is done in + // these functions. + // + EASTL_API rbtree_node_base* RBTreeIncrement (const rbtree_node_base* pNode); + EASTL_API rbtree_node_base* RBTreeDecrement (const rbtree_node_base* pNode); + EASTL_API rbtree_node_base* RBTreeGetMinChild (const rbtree_node_base* pNode); + EASTL_API rbtree_node_base* RBTreeGetMaxChild (const rbtree_node_base* pNode); + EASTL_API size_t RBTreeGetBlackCount(const rbtree_node_base* pNodeTop, + const rbtree_node_base* pNodeBottom); + EASTL_API void RBTreeInsert ( rbtree_node_base* pNode, + rbtree_node_base* pNodeParent, + rbtree_node_base* pNodeAnchor, + RBTreeSide insertionSide); + EASTL_API void RBTreeErase ( rbtree_node_base* pNode, + rbtree_node_base* pNodeAnchor); + + + + + + + + /// rbtree_iterator + /// + template + struct rbtree_iterator + { + typedef rbtree_iterator this_type; + typedef rbtree_iterator iterator; + typedef rbtree_iterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef rbtree_node_base base_node_type; + typedef rbtree_node node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category; + + public: + node_type* mpNode; + + public: + rbtree_iterator(); + explicit rbtree_iterator(const node_type* pNode); + rbtree_iterator(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + rbtree_iterator& operator++(); + rbtree_iterator operator++(int); + + rbtree_iterator& operator--(); + rbtree_iterator operator--(int); + + }; // rbtree_iterator + + + /////////////////////////////////////////////////////////////////////////////// + // rb_base_compare_ebo + // + // Utilizes the "empty base-class optimization" to reduce the size of the rbtree + // when its Compare template argument is an empty class. + /////////////////////////////////////////////////////////////////////////////// + + template ::value> + struct rb_base_compare_ebo + { + protected: + rb_base_compare_ebo() : mCompare() {} + rb_base_compare_ebo(const Compare& compare) : mCompare(compare) {} + + Compare& get_compare() { return mCompare; } + const Compare& get_compare() const { return mCompare; } + + template + bool compare(const T& lhs, const T& rhs) + { + return mCompare(lhs, rhs); + } + + template + bool compare(const T& lhs, const T& rhs) const + { + return mCompare(lhs, rhs); + } + + private: + Compare mCompare; + }; + + template + struct rb_base_compare_ebo : private Compare + { + protected: + rb_base_compare_ebo() {} + rb_base_compare_ebo(const Compare& compare) : Compare(compare) {} + + Compare& get_compare() { return *this; } + const Compare& get_compare() const { return *this; } + + template + bool compare(const T& lhs, const T& rhs) + { + return Compare::operator()(lhs, rhs); + } + + template + bool compare(const T& lhs, const T& rhs) const + { + return Compare::operator()(lhs, rhs); + } + }; + + + + /////////////////////////////////////////////////////////////////////////////// + // rb_base + // + // This class allows us to use a generic rbtree as the basis of map, multimap, + // set, and multiset transparently. The vital template parameters for this are + // the ExtractKey and the bUniqueKeys parameters. + // + // If the rbtree has a value type of the form pair (i.e. it is a map or + // multimap and not a set or multiset) and a key extraction policy that returns + // the first part of the pair, the rbtree gets a mapped_type typedef. + // If it satisfies those criteria and also has unique keys, then it also gets an + // operator[] (which only map and set have and multimap and multiset don't have). + // + /////////////////////////////////////////////////////////////////////////////// + + + + /// rb_base + /// This specialization is used for 'set'. In this case, Key and Value + /// will be the same as each other and ExtractKey will be eastl::use_self. + /// + template + struct rb_base : public rb_base_compare_ebo + { + typedef ExtractKey extract_key; + + protected: + using rb_base_compare_ebo::compare; + using rb_base_compare_ebo::get_compare; + + public: + rb_base() {} + rb_base(const Compare& compare) : rb_base_compare_ebo(compare) {} + }; + + + /// rb_base + /// This class is used for 'multiset'. + /// In this case, Key and Value will be the same as each + /// other and ExtractKey will be eastl::use_self. + /// + template + struct rb_base : public rb_base_compare_ebo + { + typedef ExtractKey extract_key; + + protected: + using rb_base_compare_ebo::compare; + using rb_base_compare_ebo::get_compare; + + public: + rb_base() {} + rb_base(const Compare& compare) : rb_base_compare_ebo(compare) {} + }; + + + /// rb_base + /// This specialization is used for 'map'. + /// + template + struct rb_base, true, RBTree> : public rb_base_compare_ebo + { + typedef eastl::use_first extract_key; + + using rb_base_compare_ebo::compare; + using rb_base_compare_ebo::get_compare; + + public: + rb_base() {} + rb_base(const Compare& compare) : rb_base_compare_ebo(compare) {} + }; + + + /// rb_base + /// This specialization is used for 'multimap'. + /// + template + struct rb_base, false, RBTree> : public rb_base_compare_ebo + { + typedef eastl::use_first extract_key; + + using rb_base_compare_ebo::compare; + using rb_base_compare_ebo::get_compare; + + public: + rb_base() {} + rb_base(const Compare& compare) : rb_base_compare_ebo(compare) {} + }; + + + /// rbtree + /// + /// rbtree is the red-black tree basis for the map, multimap, set, and multiset + /// containers. Just about all the work of those containers is done here, and + /// they are merely a shell which sets template policies that govern the code + /// generation for this rbtree. + /// + /// This rbtree implementation is pretty much the same as all other modern + /// rbtree implementations, as the topic is well known and researched. We may + /// choose to implement a "relaxed balancing" option at some point in the + /// future if it is deemed worthwhile. Most rbtree implementations don't do this. + /// + /// The primary rbtree member variable is mAnchor, which is a node_type and + /// acts as the end node. However, like any other node, it has mpNodeLeft, + /// mpNodeRight, and mpNodeParent members. We do the conventional trick of + /// assigning begin() (left-most rbtree node) to mpNodeLeft, assigning + /// 'end() - 1' (a.k.a. rbegin()) to mpNodeRight, and assigning the tree root + /// node to mpNodeParent. + /// + /// Compare (functor): This is a comparison class which defaults to 'less'. + /// It is a common STL thing which takes two arguments and returns true if + /// the first is less than the second. + /// + /// ExtractKey (functor): This is a class which gets the key from a stored + /// node. With map and set, the node is a pair, whereas with set and multiset + /// the node is just the value. ExtractKey will be either eastl::use_first (map and multimap) + /// or eastl::use_self (set and multiset). + /// + /// bMutableIterators (bool): true if rbtree::iterator is a mutable + /// iterator, false if iterator and const_iterator are both const iterators. + /// It will be true for map and multimap and false for set and multiset. + /// + /// bUniqueKeys (bool): true if the keys are to be unique, and false if there + /// can be multiple instances of a given key. It will be true for set and map + /// and false for multiset and multimap. + /// + /// To consider: Add an option for relaxed tree balancing. This could result + /// in performance improvements but would require a more complicated implementation. + /// + /////////////////////////////////////////////////////////////////////// + /// find_as + /// In order to support the ability to have a tree of strings but + /// be able to do efficiently lookups via char pointers (i.e. so they + /// aren't converted to string objects), we provide the find_as + /// function. This function allows you to do a find with a key of a + /// type other than the tree's key type. See the find_as function + /// for more documentation on this. + /// + template + class rbtree + : public rb_base > + { + public: + typedef ptrdiff_t difference_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef Key key_type; + typedef Value value_type; + typedef rbtree_node node_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + + typedef typename type_select, + rbtree_iterator >::type iterator; + typedef rbtree_iterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + + typedef Allocator allocator_type; + typedef Compare key_compare; + typedef typename type_select, iterator>::type insert_return_type; // map/set::insert return a pair, multimap/multiset::iterator return an iterator. + typedef rbtree this_type; + typedef rb_base base_type; + typedef integral_constant has_unique_keys_type; + typedef typename base_type::extract_key extract_key; + + protected: + using base_type::compare; + using base_type::get_compare; + + public: + rbtree_node_base mAnchor; /// This node acts as end() and its mpLeft points to begin(), and mpRight points to rbegin() (the last node on the right). + size_type mnSize; /// Stores the count of nodes in the tree (not counting the anchor node). + allocator_type mAllocator; // To do: Use base class optimization to make this go away. + + public: + // ctor/dtor + rbtree(); + rbtree(const allocator_type& allocator); + rbtree(const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR); + rbtree(const this_type& x); + rbtree(this_type&& x); + rbtree(this_type&& x, const allocator_type& allocator); + + template + rbtree(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR); + + ~rbtree(); + + public: + // properties + const allocator_type& get_allocator() const EA_NOEXCEPT; + allocator_type& get_allocator() EA_NOEXCEPT; + void set_allocator(const allocator_type& allocator); + + const key_compare& key_comp() const { return get_compare(); } + key_compare& key_comp() { return get_compare(); } + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + void swap(this_type& x); + + public: + // iterators + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + public: + bool empty() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + + template + insert_return_type emplace(Args&&... args); + + template + iterator emplace_hint(const_iterator position, Args&&... args); + + template eastl::pair try_emplace(const key_type& k, Args&&... args); + template eastl::pair try_emplace(key_type&& k, Args&&... args); + template iterator try_emplace(const_iterator position, const key_type& k, Args&&... args); + template iterator try_emplace(const_iterator position, key_type&& k, Args&&... args); + + // Standard conversion overload to avoid the overhead of mismatched 'pair' types. + template ::value>::type> + insert_return_type insert(P&& otherValue); + + // Currently limited to value_type instead of P because it collides with insert(InputIterator, InputIterator). + // To allow this to work with templated P we need to implement a compile-time specialization for the + // case that P&& is const_iterator and have that specialization handle insert(InputIterator, InputIterator) + // instead of insert(InputIterator, InputIterator). Curiously, neither libstdc++ nor libc++ + // implement this function either, which suggests they ran into the same problem I did here + // and haven't yet resolved it (at least as of March 2014, GCC 4.8.1). + iterator insert(const_iterator hint, value_type&& value); + + /// map::insert and set::insert return a pair, while multimap::insert and + /// multiset::insert return an iterator. + insert_return_type insert(const value_type& value); + + // C++ standard: inserts value if and only if there is no element with + // key equivalent to the key of t in containers with unique keys; always + // inserts value in containers with equivalent keys. Always returns the + // iterator pointing to the element with key equivalent to the key of value. + // iterator position is a hint pointing to where the insert should start + // to search. However, there is a potential defect/improvement report on this behaviour: + // LWG issue #233 (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1780.html) + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + iterator insert(const_iterator position, const value_type& value); + + void insert(std::initializer_list ilist); + + template + void insert(InputIterator first, InputIterator last); + + // TODO(rparolin): + // insert_return_type insert(node_type&& nh); + // iterator insert(const_iterator hint, node_type&& nh); + + template pair insert_or_assign(const key_type& k, M&& obj); + template pair insert_or_assign(key_type&& k, M&& obj); + template iterator insert_or_assign(const_iterator hint, const key_type& k, M&& obj); + template iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + reverse_iterator erase(const_reverse_iterator position); + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last); + + // For some reason, multiple STL versions make a specialization + // for erasing an array of key_types. I'm pretty sure we don't + // need this, but just to be safe we will follow suit. + // The implementation is trivial. Returns void because the values + // could well be randomly distributed throughout the tree and thus + // a return value would be nearly meaningless. + void erase(const key_type* first, const key_type* last); + + void clear(); + void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + iterator find(const key_type& key); + const_iterator find(const key_type& key) const; + + /// Implements a find whereby the user supplies a comparison of a different type + /// than the tree's value_type. A useful case of this is one whereby you have + /// a container of string objects but want to do searches via passing in char pointers. + /// The problem is that without this kind of find, you need to do the expensive operation + /// of converting the char pointer to a string so it can be used as the argument to the + /// find function. + /// + /// Example usage (note that the compare uses string as first type and char* as second): + /// set strings; + /// strings.find_as("hello", less_2()); + /// + template iterator find_as(const U& u, Compare2 compare2); + template const_iterator find_as(const U& u, Compare2 compare2) const; + + iterator lower_bound(const key_type& key); + const_iterator lower_bound(const key_type& key) const; + + iterator upper_bound(const key_type& key); + const_iterator upper_bound(const key_type& key) const; + + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + node_type* DoAllocateNode(); + void DoFreeNode(node_type* pNode); + + node_type* DoCreateNodeFromKey(const key_type& key); + + template + node_type* DoCreateNode(Args&&... args); + node_type* DoCreateNode(const value_type& value); + node_type* DoCreateNode(value_type&& value); + node_type* DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent); + + node_type* DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest); + void DoNukeSubtree(node_type* pNode); + + template + eastl::pair DoInsertValue(true_type, Args&&... args); + + template + iterator DoInsertValue(false_type, Args&&... args); + + eastl::pair DoInsertValue(true_type, value_type&& value); + iterator DoInsertValue(false_type, value_type&& value); + + template + iterator DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, Args&&... args); + iterator DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, node_type* pNodeNew); + + eastl::pair DoInsertKey(true_type, const key_type& key); + iterator DoInsertKey(false_type, const key_type& key); + + iterator DoInsertValueHint(true_type, const_iterator position, const value_type& value); + iterator DoInsertValueHint(false_type, const_iterator position, const value_type& value); + + iterator DoInsertKey(true_type, const_iterator position, const key_type& key); // By design we return iterator and not a pair. + iterator DoInsertKey(false_type, const_iterator position, const key_type& key); + iterator DoInsertKeyImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key); + + node_type* DoGetKeyInsertionPositionUniqueKeys(bool& canInsert, const key_type& key); + node_type* DoGetKeyInsertionPositionNonuniqueKeys(const key_type& key); + + node_type* DoGetKeyInsertionPositionUniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key); + node_type* DoGetKeyInsertionPositionNonuniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key); + + }; // rbtree + + + + + + /////////////////////////////////////////////////////////////////////// + // rbtree_node_base functions + /////////////////////////////////////////////////////////////////////// + + EASTL_API inline rbtree_node_base* RBTreeGetMinChild(const rbtree_node_base* pNodeBase) + { + while(pNodeBase->mpNodeLeft) + pNodeBase = pNodeBase->mpNodeLeft; + return const_cast(pNodeBase); + } + + EASTL_API inline rbtree_node_base* RBTreeGetMaxChild(const rbtree_node_base* pNodeBase) + { + while(pNodeBase->mpNodeRight) + pNodeBase = pNodeBase->mpNodeRight; + return const_cast(pNodeBase); + } + + // The rest of the functions are non-trivial and are found in + // the corresponding .cpp file to this file. + + + + /////////////////////////////////////////////////////////////////////// + // rbtree_iterator functions + /////////////////////////////////////////////////////////////////////// + + template + rbtree_iterator::rbtree_iterator() + : mpNode(NULL) { } + + + template + rbtree_iterator::rbtree_iterator(const node_type* pNode) + : mpNode(static_cast(const_cast(pNode))) { } + + + template + rbtree_iterator::rbtree_iterator(const iterator& x) + : mpNode(x.mpNode) { } + + + template + typename rbtree_iterator::reference + rbtree_iterator::operator*() const + { return mpNode->mValue; } + + + template + typename rbtree_iterator::pointer + rbtree_iterator::operator->() const + { return &mpNode->mValue; } + + + template + typename rbtree_iterator::this_type& + rbtree_iterator::operator++() + { + mpNode = static_cast(RBTreeIncrement(mpNode)); + return *this; + } + + + template + typename rbtree_iterator::this_type + rbtree_iterator::operator++(int) + { + this_type temp(*this); + mpNode = static_cast(RBTreeIncrement(mpNode)); + return temp; + } + + + template + typename rbtree_iterator::this_type& + rbtree_iterator::operator--() + { + mpNode = static_cast(RBTreeDecrement(mpNode)); + return *this; + } + + + template + typename rbtree_iterator::this_type + rbtree_iterator::operator--(int) + { + this_type temp(*this); + mpNode = static_cast(RBTreeDecrement(mpNode)); + return temp; + } + + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const rbtree_iterator& a, + const rbtree_iterator& b) + { + return a.mpNode == b.mpNode; + } + + + template + inline bool operator!=(const rbtree_iterator& a, + const rbtree_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const rbtree_iterator& a, + const rbtree_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + + + /////////////////////////////////////////////////////////////////////// + // rbtree functions + /////////////////////////////////////////////////////////////////////// + + template + inline rbtree::rbtree() + : mAnchor(), + mnSize(0), + mAllocator(EASTL_RBTREE_DEFAULT_NAME) + { + reset_lose_memory(); + } + + + template + inline rbtree::rbtree(const allocator_type& allocator) + : mAnchor(), + mnSize(0), + mAllocator(allocator) + { + reset_lose_memory(); + } + + + template + inline rbtree::rbtree(const C& compare, const allocator_type& allocator) + : base_type(compare), + mAnchor(), + mnSize(0), + mAllocator(allocator) + { + reset_lose_memory(); + } + + + template + inline rbtree::rbtree(const this_type& x) + : base_type(x.get_compare()), + mAnchor(), + mnSize(0), + mAllocator(x.mAllocator) + { + reset_lose_memory(); + + if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node. + { + mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor); + mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent); + mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent); + mnSize = x.mnSize; + } + } + + + template + inline rbtree::rbtree(this_type&& x) + : base_type(x.get_compare()), + mAnchor(), + mnSize(0), + mAllocator(x.mAllocator) + { + reset_lose_memory(); + swap(x); + } + + template + inline rbtree::rbtree(this_type&& x, const allocator_type& allocator) + : base_type(x.get_compare()), + mAnchor(), + mnSize(0), + mAllocator(allocator) + { + reset_lose_memory(); + swap(x); // swap will directly or indirectly handle the possibility that mAllocator != x.mAllocator. + } + + + template + template + inline rbtree::rbtree(InputIterator first, InputIterator last, const C& compare, const allocator_type& allocator) + : base_type(compare), + mAnchor(), + mnSize(0), + mAllocator(allocator) + { + reset_lose_memory(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; first != last; ++first) + insert(eastl::move(*first)); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + clear(); + throw; + } + #endif + } + + + template + inline rbtree::~rbtree() + { + // Erase the entire tree. DoNukeSubtree is not a + // conventional erase function, as it does no rebalancing. + DoNukeSubtree((node_type*)mAnchor.mpNodeParent); + } + + + template + inline const typename rbtree::allocator_type& + rbtree::get_allocator() const EA_NOEXCEPT + { + return mAllocator; + } + + + template + inline typename rbtree::allocator_type& + rbtree::get_allocator() EA_NOEXCEPT + { + return mAllocator; + } + + + template + inline void rbtree::set_allocator(const allocator_type& allocator) + { + mAllocator = allocator; + } + + + template + inline typename rbtree::size_type + rbtree::size() const EA_NOEXCEPT + { return mnSize; } + + + template + inline bool rbtree::empty() const EA_NOEXCEPT + { return (mnSize == 0); } + + + template + inline typename rbtree::iterator + rbtree::begin() EA_NOEXCEPT + { return iterator(static_cast(mAnchor.mpNodeLeft)); } + + + template + inline typename rbtree::const_iterator + rbtree::begin() const EA_NOEXCEPT + { return const_iterator(static_cast(const_cast(mAnchor.mpNodeLeft))); } + + + template + inline typename rbtree::const_iterator + rbtree::cbegin() const EA_NOEXCEPT + { return const_iterator(static_cast(const_cast(mAnchor.mpNodeLeft))); } + + + template + inline typename rbtree::iterator + rbtree::end() EA_NOEXCEPT + { return iterator(static_cast(&mAnchor)); } + + + template + inline typename rbtree::const_iterator + rbtree::end() const EA_NOEXCEPT + { return const_iterator(static_cast(const_cast(&mAnchor))); } + + + template + inline typename rbtree::const_iterator + rbtree::cend() const EA_NOEXCEPT + { return const_iterator(static_cast(const_cast(&mAnchor))); } + + + template + inline typename rbtree::reverse_iterator + rbtree::rbegin() EA_NOEXCEPT + { return reverse_iterator(end()); } + + + template + inline typename rbtree::const_reverse_iterator + rbtree::rbegin() const EA_NOEXCEPT + { return const_reverse_iterator(end()); } + + + template + inline typename rbtree::const_reverse_iterator + rbtree::crbegin() const EA_NOEXCEPT + { return const_reverse_iterator(end()); } + + + template + inline typename rbtree::reverse_iterator + rbtree::rend() EA_NOEXCEPT + { return reverse_iterator(begin()); } + + + template + inline typename rbtree::const_reverse_iterator + rbtree::rend() const EA_NOEXCEPT + { return const_reverse_iterator(begin()); } + + + template + inline typename rbtree::const_reverse_iterator + rbtree::crend() const EA_NOEXCEPT + { return const_reverse_iterator(begin()); } + + + template + inline typename rbtree::this_type& + rbtree::operator=(const this_type& x) + { + if(this != &x) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + mAllocator = x.mAllocator; + #endif + + get_compare() = x.get_compare(); + + if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node. + { + mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor); + mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent); + mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent); + mnSize = x.mnSize; + } + } + return *this; + } + + template + inline typename rbtree::this_type& + rbtree::operator=(this_type&& x) + { + if(this != &x) + { + clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor. + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + return *this; + } + + template + inline typename rbtree::this_type& + rbtree::operator=(std::initializer_list ilist) + { + // The simplest means of doing this is to clear and insert. There probably isn't a generic + // solution that's any more efficient without having prior knowledge of the ilist contents. + clear(); + + for(typename std::initializer_list::iterator it = ilist.begin(), itEnd = ilist.end(); it != itEnd; ++it) + DoInsertValue(has_unique_keys_type(), eastl::move(*it)); + + return *this; + } + + + template + void rbtree::swap(this_type& x) + { + #if EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + if(mAllocator == x.mAllocator) // If allocators are equivalent... + #endif + { + // Most of our members can be exchaged by a basic swap: + // We leave mAllocator as-is. + eastl::swap(mnSize, x.mnSize); + eastl::swap(get_compare(), x.get_compare()); + #if !EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + eastl::swap(mAllocator, x.mAllocator); + #endif + + + // However, because our anchor node is a part of our class instance and not + // dynamically allocated, we can't do a swap of it but must do a more elaborate + // procedure. This is the downside to having the mAnchor be like this, but + // otherwise we consider it a good idea to avoid allocating memory for a + // nominal container instance. + + // We optimize for the expected most common case: both pointers being non-null. + if(mAnchor.mpNodeParent && x.mAnchor.mpNodeParent) // If both pointers are non-null... + { + eastl::swap(mAnchor.mpNodeRight, x.mAnchor.mpNodeRight); + eastl::swap(mAnchor.mpNodeLeft, x.mAnchor.mpNodeLeft); + eastl::swap(mAnchor.mpNodeParent, x.mAnchor.mpNodeParent); + + // We need to fix up the anchors to point to themselves (we can't just swap them). + mAnchor.mpNodeParent->mpNodeParent = &mAnchor; + x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor; + } + else if(mAnchor.mpNodeParent) + { + x.mAnchor.mpNodeRight = mAnchor.mpNodeRight; + x.mAnchor.mpNodeLeft = mAnchor.mpNodeLeft; + x.mAnchor.mpNodeParent = mAnchor.mpNodeParent; + x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor; + + // We need to fix up our anchor to point it itself (we can't have it swap with x). + mAnchor.mpNodeRight = &mAnchor; + mAnchor.mpNodeLeft = &mAnchor; + mAnchor.mpNodeParent = NULL; + } + else if(x.mAnchor.mpNodeParent) + { + mAnchor.mpNodeRight = x.mAnchor.mpNodeRight; + mAnchor.mpNodeLeft = x.mAnchor.mpNodeLeft; + mAnchor.mpNodeParent = x.mAnchor.mpNodeParent; + mAnchor.mpNodeParent->mpNodeParent = &mAnchor; + + // We need to fix up x's anchor to point it itself (we can't have it swap with us). + x.mAnchor.mpNodeRight = &x.mAnchor; + x.mAnchor.mpNodeLeft = &x.mAnchor; + x.mAnchor.mpNodeParent = NULL; + } // Else both are NULL and there is nothing to do. + } + #if EASTL_RBTREE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR + else + { + const this_type temp(*this); // Can't call eastl::swap because that would + *this = x; // itself call this member swap function. + x = temp; + } + #endif + } + + + template + template + inline typename rbtree::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator. + rbtree::emplace(Args&&... args) + { + return DoInsertValue(has_unique_keys_type(), eastl::forward(args)...); + } + + template + template + typename rbtree::iterator + rbtree::emplace_hint(const_iterator position, Args&&... args) + { + return DoInsertValueHint(has_unique_keys_type(), position, eastl::forward(args)...); + } + + template + template + inline eastl::pair::iterator, bool> + rbtree::try_emplace(const key_type& key, Args&&... args) + { + return DoInsertValue(has_unique_keys_type(), piecewise_construct, forward_as_tuple(key), forward_as_tuple(forward(args)...)); + } + + template + template + inline eastl::pair::iterator, bool> + rbtree::try_emplace(key_type&& key, Args&&... args) + { + return DoInsertValue(has_unique_keys_type(), piecewise_construct, forward_as_tuple(eastl::move(key)), forward_as_tuple(forward(args)...)); + } + + template + template + inline typename rbtree::iterator + rbtree::try_emplace(const_iterator position, const key_type& key, Args&&... args) + { + return DoInsertValueHint( + has_unique_keys_type(), position, + value_type(piecewise_construct, forward_as_tuple(key), forward_as_tuple(forward(args)...))); + } + + template + template + inline typename rbtree::iterator + rbtree::try_emplace(const_iterator position, key_type&& key, Args&&... args) + { + return DoInsertValueHint( + has_unique_keys_type(), position, + value_type(piecewise_construct, forward_as_tuple(key), forward_as_tuple(forward(args)...))); + } + + + template + template + inline typename rbtree::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator. + rbtree::insert(P&& otherValue) + { + // Need to use forward instead of move because P&& is a "universal reference" instead of an rvalue reference. + return emplace(eastl::forward

(otherValue)); + } + + + template + inline typename rbtree::iterator + rbtree::insert(const_iterator position, value_type&& value) + { + return DoInsertValueHint(has_unique_keys_type(), position, value_type(eastl::move(value))); + } + + + template + inline typename rbtree::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator. + rbtree::insert(const value_type& value) + { + return DoInsertValue(has_unique_keys_type(), value); + } + + + template + typename rbtree::iterator + rbtree::insert(const_iterator position, const value_type& value) + { + return DoInsertValueHint(has_unique_keys_type(), position, value); + } + + + template + template + eastl::pair::iterator, bool> + rbtree::insert_or_assign(const key_type& k, M&& obj) + { + auto iter = find(k); + + if(iter == end()) + { + return insert(value_type(piecewise_construct, forward_as_tuple(k), forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return {iter, false}; + } + } + + template + template + eastl::pair::iterator, bool> + rbtree::insert_or_assign(key_type&& k, M&& obj) + { + auto iter = find(k); + + if(iter == end()) + { + return insert(value_type(piecewise_construct, forward_as_tuple(eastl::move(k)), forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return {iter, false}; + } + } + + template + template + typename rbtree::iterator + rbtree::insert_or_assign(const_iterator hint, const key_type& k, M&& obj) + { + auto iter = find(k); + + if(iter == end()) + { + return insert(hint, value_type(piecewise_construct, forward_as_tuple(k), forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return iter; + } + } + + template + template + typename rbtree::iterator + rbtree::insert_or_assign(const_iterator hint, key_type&& k, M&& obj) + { + auto iter = find(k); + + if(iter == end()) + { + return insert(hint, value_type(piecewise_construct, forward_as_tuple(eastl::move(k)), forward_as_tuple(eastl::forward(obj)))); + } + else + { + iter->second = eastl::forward(obj); + return iter; + } + } + + template + typename rbtree::node_type* + rbtree::DoGetKeyInsertionPositionUniqueKeys(bool& canInsert, const key_type& key) + { + // This code is essentially a slightly modified copy of the the rbtree::insert + // function whereby this version takes a key and not a full value_type. + extract_key extractKey; + + node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node. + node_type* pLowerBound = (node_type*)&mAnchor; // Set it to the container end for now. + node_type* pParent; // This will be where we insert the new node. + + bool bValueLessThanNode = true; // If the tree is empty, this will result in an insertion at the front. + + // Find insertion position of the value. This will either be a position which + // already contains the value, a position which is greater than the value or + // end(), which we treat like a position which is greater than the value. + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + bValueLessThanNode = compare(key, extractKey(pCurrent->mValue)); + pLowerBound = pCurrent; + + if(bValueLessThanNode) + { + EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane. + pCurrent = (node_type*)pCurrent->mpNodeLeft; + } + else + pCurrent = (node_type*)pCurrent->mpNodeRight; + } + + pParent = pLowerBound; // pLowerBound is actually upper bound right now (i.e. it is > value instead of <=), but we will make it the lower bound below. + + if(bValueLessThanNode) // If we ended up on the left side of the last parent node... + { + if(EASTL_LIKELY(pLowerBound != (node_type*)mAnchor.mpNodeLeft)) // If the tree was empty or if we otherwise need to insert at the very front of the tree... + { + // At this point, pLowerBound points to a node which is > than value. + // Move it back by one, so that it points to a node which is <= value. + pLowerBound = (node_type*)RBTreeDecrement(pLowerBound); + } + else + { + canInsert = true; + return pLowerBound; + } + } + + // Since here we require values to be unique, we will do nothing if the value already exists. + if(compare(extractKey(pLowerBound->mValue), key)) // If the node is < the value (i.e. if value is >= the node)... + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pLowerBound->mValue))); // Validate that the compare function is sane. + canInsert = true; + return pParent; + } + + // The item already exists (as found by the compare directly above), so return false. + canInsert = false; + return pLowerBound; + } + + + template + typename rbtree::node_type* + rbtree::DoGetKeyInsertionPositionNonuniqueKeys(const key_type& key) + { + // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set). + node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node. + node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now. + extract_key extractKey; + + while(pCurrent) + { + pRangeEnd = pCurrent; + + if(compare(key, extractKey(pCurrent->mValue))) + { + EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane. + pCurrent = (node_type*)pCurrent->mpNodeLeft; + } + else + pCurrent = (node_type*)pCurrent->mpNodeRight; + } + + return pRangeEnd; + } + + + template + eastl::pair::iterator, bool> + rbtree::DoInsertValue(true_type, value_type&& value) + { + extract_key extractKey; + key_type key(extractKey(value)); + bool canInsert; + node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key); + + if(canInsert) + { + const iterator itResult(DoInsertValueImpl(pPosition, false, key, eastl::move(value))); + return pair(itResult, true); + } + + return pair(iterator(pPosition), false); + } + + + template + typename rbtree::iterator + rbtree::DoInsertValue(false_type, value_type&& value) + { + extract_key extractKey; + key_type key(extractKey(value)); + node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key); + + return DoInsertValueImpl(pPosition, false, key, eastl::move(value)); + } + + + template + template + eastl::pair::iterator, bool> + rbtree::DoInsertValue(true_type, Args&&... args) // true_type means keys are unique. + { + // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset). + // Note that we return a pair and not an iterator. This is because the C++ standard for map + // and set is to return a pair and not just an iterator. + + node_type* pNodeNew = DoCreateNode(eastl::forward(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + const key_type& key = extract_key{}(pNodeNew->mValue); + + bool canInsert; + node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key); + + if(canInsert) + { + iterator itResult(DoInsertValueImpl(pPosition, false, key, pNodeNew)); + return pair(itResult, true); + } + + DoFreeNode(pNodeNew); + return pair(iterator(pPosition), false); + } + + + template + template + typename rbtree::iterator + rbtree::DoInsertValue(false_type, Args&&... args) // false_type means keys are not unique. + { + // We have a problem here if sizeof(value_type) is too big for the stack. We may want to consider having a specialization for large value_types. + // To do: Change this so that we call DoCreateNode(eastl::forward(args)...) here and use the value from the resulting pNode to get the + // key, and make DoInsertValueImpl take that node as an argument. That way there is no value created on the stack. + + node_type* const pNodeNew = DoCreateNode(eastl::forward(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + const key_type& key = extract_key{}(pNodeNew->mValue); + + node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key); + + return DoInsertValueImpl(pPosition, false, key, pNodeNew); + } + + + template + template + typename rbtree::iterator + rbtree::DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, Args&&... args) + { + node_type* const pNodeNew = DoCreateNode(eastl::forward(args)...); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + + return DoInsertValueImpl(pNodeParent, bForceToLeft, key, pNodeNew); + } + + + template + typename rbtree::iterator + rbtree::DoInsertValueImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key, node_type* pNodeNew) + { + EASTL_ASSERT_MSG(pNodeNew != nullptr, "node to insert to the rbtree must not be null"); + + RBTreeSide side; + extract_key extractKey; + + // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal. + // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report + // suggests that we should use the insert hint position to force an ordering. So that's what we do. + if(bForceToLeft || (pNodeParent == &mAnchor) || compare(key, extractKey(pNodeParent->mValue))) + side = kRBTreeSideLeft; + else + side = kRBTreeSideRight; + + RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side); + mnSize++; + + return iterator(pNodeNew); + } + + + template + eastl::pair::iterator, bool> + rbtree::DoInsertKey(true_type, const key_type& key) // true_type means keys are unique. + { + // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset). + // Note that we return a pair and not an iterator. This is because the C++ standard for map + // and set is to return a pair and not just an iterator. + bool canInsert; + node_type* pPosition = DoGetKeyInsertionPositionUniqueKeys(canInsert, key); + + if(canInsert) + { + const iterator itResult(DoInsertKeyImpl(pPosition, false, key)); + return pair(itResult, true); + } + + return pair(iterator(pPosition), false); + } + + + template + typename rbtree::iterator + rbtree::DoInsertKey(false_type, const key_type& key) // false_type means keys are not unique. + { + node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeys(key); + + return DoInsertKeyImpl(pPosition, false, key); + } + + + + template + typename rbtree::node_type* + rbtree::DoGetKeyInsertionPositionUniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key) + { + extract_key extractKey; + + if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position... + { + iterator itNext(position.mpNode); + ++itNext; + + // To consider: Change this so that 'position' specifies the position after + // where the insertion goes and not the position before where the insertion goes. + // Doing so would make this more in line with user expectations and with LWG #233. + const bool bPositionLessThanValue = compare(extractKey(position.mpNode->mValue), key); + + if(bPositionLessThanValue) // If (value > *position)... + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(position.mpNode->mValue))); // Validate that the compare function is sane. + + const bool bValueLessThanNext = compare(key, extractKey(itNext.mpNode->mValue)); + + if(bValueLessThanNext) // If value < *itNext... + { + EASTL_VALIDATE_COMPARE(!compare(extractKey(itNext.mpNode->mValue), key)); // Validate that the compare function is sane. + + if(position.mpNode->mpNodeRight) + { + bForceToLeft = true; // Specifically insert in front of (to the left of) itNext (and thus after 'position'). + return itNext.mpNode; + } + + bForceToLeft = false; + return position.mpNode; + } + } + + bForceToLeft = false; + return NULL; // The above specified hint was not useful, then we do a regular insertion. + } + + if(mnSize && compare(extractKey(((node_type*)mAnchor.mpNodeRight)->mValue), key)) + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))); // Validate that the compare function is sane. + bForceToLeft = false; + return (node_type*)mAnchor.mpNodeRight; + } + + bForceToLeft = false; + return NULL; // The caller can do a default insert. + } + + + template + typename rbtree::node_type* + rbtree::DoGetKeyInsertionPositionNonuniqueKeysHint(const_iterator position, bool& bForceToLeft, const key_type& key) + { + extract_key extractKey; + + if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position... + { + iterator itNext(position.mpNode); + ++itNext; + + // To consider: Change this so that 'position' specifies the position after + // where the insertion goes and not the position before where the insertion goes. + // Doing so would make this more in line with user expectations and with LWG #233. + if(!compare(key, extractKey(position.mpNode->mValue)) && // If value >= *position && + !compare(extractKey(itNext.mpNode->mValue), key)) // if value <= *itNext... + { + if(position.mpNode->mpNodeRight) // If there are any nodes to the right... [this expression will always be true as long as we aren't at the end()] + { + bForceToLeft = true; // Specifically insert in front of (to the left of) itNext (and thus after 'position'). + return itNext.mpNode; + } + + bForceToLeft = false; + return position.mpNode; + } + + bForceToLeft = false; + return NULL; // The above specified hint was not useful, then we do a regular insertion. + } + + // This pathway shouldn't be commonly executed, as the user shouldn't be calling + // this hinted version of insert if the user isn't providing a useful hint. + if(mnSize && !compare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))) // If we are non-empty and the value is >= the last node... + { + bForceToLeft =false; + return (node_type*)mAnchor.mpNodeRight; + } + + bForceToLeft = false; + return NULL; + } + + + template + typename rbtree::iterator + rbtree::DoInsertValueHint(true_type, const_iterator position, const value_type& value) // true_type means keys are unique. + { + // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset). + // + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + + extract_key extractKey; + key_type key(extractKey(value)); + bool bForceToLeft; + node_type* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key); + + if(pPosition) + return DoInsertValueImpl(pPosition, bForceToLeft, key, value); + else + return DoInsertValue(has_unique_keys_type(), value).first; + } + + + template + typename rbtree::iterator + rbtree::DoInsertValueHint(false_type, const_iterator position, const value_type& value) // false_type means keys are not unique. + { + // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set). + // + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + extract_key extractKey; + key_type key(extractKey(value)); + bool bForceToLeft; + node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key); + + if(pPosition) + return DoInsertValueImpl(pPosition, bForceToLeft, key, value); + else + return DoInsertValue(has_unique_keys_type(), value); + } + + + template + typename rbtree::iterator + rbtree::DoInsertKey(true_type, const_iterator position, const key_type& key) // true_type means keys are unique. + { + bool bForceToLeft; + node_type* pPosition = DoGetKeyInsertionPositionUniqueKeysHint(position, bForceToLeft, key); + + if(pPosition) + return DoInsertKeyImpl(pPosition, bForceToLeft, key); + else + return DoInsertKey(has_unique_keys_type(), key).first; + } + + + template + typename rbtree::iterator + rbtree::DoInsertKey(false_type, const_iterator position, const key_type& key) // false_type means keys are not unique. + { + // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set). + // + // We follow the same approach as SGI STL/STLPort and use the position as + // a forced insertion position for the value when possible. + bool bForceToLeft; + node_type* pPosition = DoGetKeyInsertionPositionNonuniqueKeysHint(position, bForceToLeft, key); + + if(pPosition) + return DoInsertKeyImpl(pPosition, bForceToLeft, key); + else + return DoInsertKey(has_unique_keys_type(), key); // We are empty or we are inserting at the end. + } + + + template + typename rbtree::iterator + rbtree::DoInsertKeyImpl(node_type* pNodeParent, bool bForceToLeft, const key_type& key) + { + RBTreeSide side; + extract_key extractKey; + + // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal. + // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report + // suggests that we should use the insert hint position to force an ordering. So that's what we do. + if(bForceToLeft || (pNodeParent == &mAnchor) || compare(key, extractKey(pNodeParent->mValue))) + side = kRBTreeSideLeft; + else + side = kRBTreeSideRight; + + node_type* const pNodeNew = DoCreateNodeFromKey(key); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized. + RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side); + mnSize++; + + return iterator(pNodeNew); + } + + + template + void rbtree::insert(std::initializer_list ilist) + { + for(typename std::initializer_list::iterator it = ilist.begin(), itEnd = ilist.end(); it != itEnd; ++it) + DoInsertValue(has_unique_keys_type(), eastl::move(*it)); + } + + + template + template + void rbtree::insert(InputIterator first, InputIterator last) + { + for( ; first != last; ++first) + DoInsertValue(has_unique_keys_type(), *first); // Or maybe we should call 'insert(end(), *first)' instead. If the first-last range was sorted then this might make some sense. + } + + + template + inline void rbtree::clear() + { + // Erase the entire tree. DoNukeSubtree is not a + // conventional erase function, as it does no rebalancing. + DoNukeSubtree((node_type*)mAnchor.mpNodeParent); + reset_lose_memory(); + } + + + template + inline void rbtree::reset_lose_memory() + { + // The reset_lose_memory function is a special extension function which unilaterally + // resets the container to an empty state without freeing the memory of + // the contained objects. This is useful for very quickly tearing down a + // container built into scratch memory. + mAnchor.mpNodeRight = &mAnchor; + mAnchor.mpNodeLeft = &mAnchor; + mAnchor.mpNodeParent = NULL; + mAnchor.mColor = kRBTreeColorRed; + mnSize = 0; + } + + + template + inline typename rbtree::iterator + rbtree::erase(const_iterator position) + { + const iterator iErase(position.mpNode); + --mnSize; // Interleave this between the two references to itNext. We expect no exceptions to occur during the code below. + ++position; + RBTreeErase(iErase.mpNode, &mAnchor); + DoFreeNode(iErase.mpNode); + return iterator(position.mpNode); + } + + + template + typename rbtree::iterator + rbtree::erase(const_iterator first, const_iterator last) + { + // We expect that if the user means to clear the container, they will call clear. + if(EASTL_LIKELY((first.mpNode != mAnchor.mpNodeLeft) || (last.mpNode != &mAnchor))) // If (first != begin or last != end) ... + { + // Basic implementation: + while(first != last) + first = erase(first); + return iterator(first.mpNode); + + // Inlined implementation: + //size_type n = 0; + //while(first != last) + //{ + // const iterator itErase(first); + // ++n; + // ++first; + // RBTreeErase(itErase.mpNode, &mAnchor); + // DoFreeNode(itErase.mpNode); + //} + //mnSize -= n; + //return first; + } + + clear(); + return iterator((node_type*)&mAnchor); // Same as: return end(); + } + + + template + inline typename rbtree::reverse_iterator + rbtree::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename rbtree::reverse_iterator + rbtree::erase(const_reverse_iterator first, const_reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase((++last).base(), (++first).base())); + } + + + template + inline void rbtree::erase(const key_type* first, const key_type* last) + { + // We have no choice but to run a loop like this, as the first/last range could + // have values that are discontiguously located in the tree. And some may not + // even be in the tree. + while(first != last) + erase(*first++); + } + + + template + typename rbtree::iterator + rbtree::find(const key_type& key) + { + // To consider: Implement this instead via calling lower_bound and + // inspecting the result. The following is an implementation of this: + // const iterator it(lower_bound(key)); + // return ((it.mpNode == &mAnchor) || compare(key, extractKey(it.mpNode->mValue))) ? iterator(&mAnchor) : it; + // We don't currently implement the above because in practice people tend to call + // find a lot with trees, but very uncommonly call lower_bound. + extract_key extractKey; + + node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node. + node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now. + + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + if(EASTL_LIKELY(!compare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key... + { + pRangeEnd = pCurrent; + pCurrent = (node_type*)pCurrent->mpNodeLeft; + } + else + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane. + pCurrent = (node_type*)pCurrent->mpNodeRight; + } + } + + if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare(key, extractKey(pRangeEnd->mValue)))) + return iterator(pRangeEnd); + return iterator((node_type*)&mAnchor); + } + + + template + inline typename rbtree::const_iterator + rbtree::find(const key_type& key) const + { + typedef rbtree rbtree_type; + return const_iterator(const_cast(this)->find(key)); + } + + + template + template + typename rbtree::iterator + rbtree::find_as(const U& u, Compare2 compare2) + { + extract_key extractKey; + + node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node. + node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now. + + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + if(EASTL_LIKELY(!compare2(extractKey(pCurrent->mValue), u))) // If pCurrent is >= u... + { + pRangeEnd = pCurrent; + pCurrent = (node_type*)pCurrent->mpNodeLeft; + } + else + { + EASTL_VALIDATE_COMPARE(!compare2(u, extractKey(pCurrent->mValue))); // Validate that the compare function is sane. + pCurrent = (node_type*)pCurrent->mpNodeRight; + } + } + + if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare2(u, extractKey(pRangeEnd->mValue)))) + return iterator(pRangeEnd); + return iterator((node_type*)&mAnchor); + } + + + template + template + inline typename rbtree::const_iterator + rbtree::find_as(const U& u, Compare2 compare2) const + { + typedef rbtree rbtree_type; + return const_iterator(const_cast(this)->find_as(u, compare2)); + } + + + template + typename rbtree::iterator + rbtree::lower_bound(const key_type& key) + { + extract_key extractKey; + + node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node. + node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now. + + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + if(EASTL_LIKELY(!compare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key... + { + pRangeEnd = pCurrent; + pCurrent = (node_type*)pCurrent->mpNodeLeft; + } + else + { + EASTL_VALIDATE_COMPARE(!compare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane. + pCurrent = (node_type*)pCurrent->mpNodeRight; + } + } + + return iterator(pRangeEnd); + } + + + template + inline typename rbtree::const_iterator + rbtree::lower_bound(const key_type& key) const + { + typedef rbtree rbtree_type; + return const_iterator(const_cast(this)->lower_bound(key)); + } + + + template + typename rbtree::iterator + rbtree::upper_bound(const key_type& key) + { + extract_key extractKey; + + node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node. + node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now. + + while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree. + { + if(EASTL_LIKELY(compare(key, extractKey(pCurrent->mValue)))) // If key is < pCurrent... + { + EASTL_VALIDATE_COMPARE(!compare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane. + pRangeEnd = pCurrent; + pCurrent = (node_type*)pCurrent->mpNodeLeft; + } + else + pCurrent = (node_type*)pCurrent->mpNodeRight; + } + + return iterator(pRangeEnd); + } + + + template + inline typename rbtree::const_iterator + rbtree::upper_bound(const key_type& key) const + { + typedef rbtree rbtree_type; + return const_iterator(const_cast(this)->upper_bound(key)); + } + + + // To do: Move this validate function entirely to a template-less implementation. + template + bool rbtree::validate() const + { + // Red-black trees have the following canonical properties which we validate here: + // 1 Every node is either red or black. + // 2 Every leaf (NULL) is black by defintion. Any number of black nodes may appear in a sequence. + // 3 If a node is red, then both its children are black. Thus, on any path from + // the root to a leaf, red nodes must not be adjacent. + // 4 Every simple path from a node to a descendant leaf contains the same number of black nodes. + // 5 The mnSize member of the tree must equal the number of nodes in the tree. + // 6 The tree is sorted as per a conventional binary tree. + // 7 The comparison function is sane; it obeys strict weak ordering. If compare(a,b) is true, then compare(b,a) must be false. Both cannot be true. + + extract_key extractKey; + + if(mnSize) + { + // Verify basic integrity. + //if(!mAnchor.mpNodeParent || (mAnchor.mpNodeLeft == mAnchor.mpNodeRight)) + // return false; // Fix this for case of empty tree. + + if(mAnchor.mpNodeLeft != RBTreeGetMinChild(mAnchor.mpNodeParent)) + return false; + + if(mAnchor.mpNodeRight != RBTreeGetMaxChild(mAnchor.mpNodeParent)) + return false; + + const size_t nBlackCount = RBTreeGetBlackCount(mAnchor.mpNodeParent, mAnchor.mpNodeLeft); + size_type nIteratedSize = 0; + + for(const_iterator it = begin(); it != end(); ++it, ++nIteratedSize) + { + const node_type* const pNode = (const node_type*)it.mpNode; + const node_type* const pNodeRight = (const node_type*)pNode->mpNodeRight; + const node_type* const pNodeLeft = (const node_type*)pNode->mpNodeLeft; + + // Verify #7 above. + if(pNodeRight && compare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)) && compare(extractKey(pNode->mValue), extractKey(pNodeRight->mValue))) // Validate that the compare function is sane. + return false; + + // Verify #7 above. + if(pNodeLeft && compare(extractKey(pNodeLeft->mValue), extractKey(pNode->mValue)) && compare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue))) // Validate that the compare function is sane. + return false; + + // Verify item #1 above. + if((pNode->mColor != kRBTreeColorRed) && (pNode->mColor != kRBTreeColorBlack)) + return false; + + // Verify item #3 above. + if(pNode->mColor == kRBTreeColorRed) + { + if((pNodeRight && (pNodeRight->mColor == kRBTreeColorRed)) || + (pNodeLeft && (pNodeLeft->mColor == kRBTreeColorRed))) + return false; + } + + // Verify item #6 above. + if(pNodeRight && compare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue))) + return false; + + if(pNodeLeft && compare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue))) + return false; + + if(!pNodeRight && !pNodeLeft) // If we are at a bottom node of the tree... + { + // Verify item #4 above. + if(RBTreeGetBlackCount(mAnchor.mpNodeParent, pNode) != nBlackCount) + return false; + } + } + + // Verify item #5 above. + if(nIteratedSize != mnSize) + return false; + + return true; + } + else + { + if((mAnchor.mpNodeLeft != &mAnchor) || (mAnchor.mpNodeRight != &mAnchor)) + return false; + } + + return true; + } + + + template + inline int rbtree::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + template + inline typename rbtree::node_type* + rbtree::DoAllocateNode() + { + auto* pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), EASTL_ALIGN_OF(value_type), 0); + EASTL_ASSERT_MSG(pNode != nullptr, "the behaviour of eastl::allocators that return nullptr is not defined."); + + return pNode; + } + + + template + inline void rbtree::DoFreeNode(node_type* pNode) + { + pNode->~node_type(); + EASTLFree(mAllocator, pNode, sizeof(node_type)); + } + + + template + typename rbtree::node_type* + rbtree::DoCreateNodeFromKey(const key_type& key) + { + // Note that this function intentionally leaves the node pointers uninitialized. + // The caller would otherwise just turn right around and modify them, so there's + // no point in us initializing them to anything (except in a debug build). + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new (eastl::addressof(pNode->mValue)) value_type(pair_first_construct, key); + + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #endif + + #if EASTL_DEBUG + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = NULL; + pNode->mColor = kRBTreeColorBlack; + #endif + + return pNode; + } + + + template + typename rbtree::node_type* + rbtree::DoCreateNode(const value_type& value) + { + // Note that this function intentionally leaves the node pointers uninitialized. + // The caller would otherwise just turn right around and modify them, so there's + // no point in us initializing them to anything (except in a debug build). + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(value); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #endif + + #if EASTL_DEBUG + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = NULL; + pNode->mColor = kRBTreeColorBlack; + #endif + + return pNode; + } + + + template + typename rbtree::node_type* + rbtree::DoCreateNode(value_type&& value) + { + // Note that this function intentionally leaves the node pointers uninitialized. + // The caller would otherwise just turn right around and modify them, so there's + // no point in us initializing them to anything (except in a debug build). + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(eastl::move(value)); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #endif + + #if EASTL_DEBUG + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = NULL; + pNode->mColor = kRBTreeColorBlack; + #endif + + return pNode; + } + + + template + template + typename rbtree::node_type* + rbtree::DoCreateNode(Args&&... args) + { + // Note that this function intentionally leaves the node pointers uninitialized. + // The caller would otherwise just turn right around and modify them, so there's + // no point in us initializing them to anything (except in a debug build). + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + ::new(eastl::addressof(pNode->mValue)) value_type(eastl::forward(args)...); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #endif + + #if EASTL_DEBUG + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = NULL; + pNode->mColor = kRBTreeColorBlack; + #endif + + return pNode; + } + + + template + typename rbtree::node_type* + rbtree::DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent) + { + node_type* const pNode = DoCreateNode(pNodeSource->mValue); + + pNode->mpNodeRight = NULL; + pNode->mpNodeLeft = NULL; + pNode->mpNodeParent = pNodeParent; + pNode->mColor = pNodeSource->mColor; + + return pNode; + } + + + template + typename rbtree::node_type* + rbtree::DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest) + { + node_type* const pNewNodeRoot = DoCreateNode(pNodeSource, pNodeDest); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + // Copy the right side of the tree recursively. + if(pNodeSource->mpNodeRight) + pNewNodeRoot->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeRoot); + + node_type* pNewNodeLeft; + + for(pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeRoot; + pNodeSource; + pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeLeft) + { + pNewNodeLeft = DoCreateNode(pNodeSource, pNodeDest); + + pNodeDest->mpNodeLeft = pNewNodeLeft; + + // Copy the right side of the tree recursively. + if(pNodeSource->mpNodeRight) + pNewNodeLeft->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeLeft); + } + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + DoNukeSubtree(pNewNodeRoot); + throw; + } + #endif + + return pNewNodeRoot; + } + + + template + void rbtree::DoNukeSubtree(node_type* pNode) + { + while(pNode) // Recursively traverse the tree and destroy items as we go. + { + DoNukeSubtree((node_type*)pNode->mpNodeRight); + + node_type* const pNodeLeft = (node_type*)pNode->mpNodeLeft; + DoFreeNode(pNode); + pNode = pNodeLeft; + } + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + inline bool operator==(const rbtree& a, const rbtree& b) + { + return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin()); + } + + + // Note that in operator< we do comparisons based on the tree value_type with operator<() of the + // value_type instead of the tree's Compare function. For set/multiset, the value_type is T, while + // for map/multimap the value_type is a pair. operator< for pair can be seen by looking + // utility.h, but it basically is uses the operator< for pair.first and pair.second. The C++ standard + // appears to require this behaviour, whether intentionally or not. If anything, a good reason to do + // this is for consistency. A map and a vector that contain the same items should compare the same. + template + inline bool operator<(const rbtree& a, const rbtree& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + + template + inline bool operator!=(const rbtree& a, const rbtree& b) + { + return !(a == b); + } + + + template + inline bool operator>(const rbtree& a, const rbtree& b) + { + return b < a; + } + + + template + inline bool operator<=(const rbtree& a, const rbtree& b) + { + return !(b < a); + } + + + template + inline bool operator>=(const rbtree& a, const rbtree& b) + { + return !(a < b); + } + + + template + inline void swap(rbtree& a, rbtree& b) + { + a.swap(b); + } + + +} // namespace eastl + + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + + + + + + + + + diff --git a/include/EASTL/internal/smart_ptr.h b/include/EASTL/internal/smart_ptr.h new file mode 100644 index 0000000..6eab3f8 --- /dev/null +++ b/include/EASTL/internal/smart_ptr.h @@ -0,0 +1,264 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_SMART_PTR_H +#define EASTL_INTERNAL_SMART_PTR_H + + +#include +#include +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +namespace eastl +{ + + namespace Internal + { + // Tells if the Deleter type has a typedef for pointer to T. If so then return it, + // else return T*. The large majority of the time the pointer type will be T*. + // The C++11 Standard requires that scoped_ptr let the deleter define the pointer type. + // + // Example usage: + // typedef typename unique_pointer_type::type pointer + // + template + class unique_pointer_type + { + template + static typename U::pointer test(typename U::pointer*); + + template + static T* test(...); + + public: + typedef decltype(test::type>(0)) type; + }; + + + /////////////////////////////////////////////////////////////////////// + // is_array_cv_convertible + // + // Tells if the array pointer P1 is cv-convertible to array pointer P2. + // The two types have two be equivalent pointer types and be convertible + // when you consider const/volatile properties of them. + // + // Example usage: + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => false + // is_array_cv_convertible::value => true + // is_array_cv_convertible::value => true + // is_array_cv_convertible::value => true + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_array_cv_convertible_CONFORMANCE 1 + + template ::element_type>::type, + typename eastl::remove_cv::element_type>::type>::value> + struct is_array_cv_convertible_impl + : public eastl::is_convertible {}; // Return true if P1 is convertible to P2. + + template + struct is_array_cv_convertible_impl + : public eastl::false_type {}; // P1's underlying type is not the same as P2's, so it can't be converted, even if P2 refers to a subclass of P1. Parent == Child, but Parent[] != Child[] + + template ::value && !eastl::is_pointer::value> + struct is_array_cv_convertible + : public is_array_cv_convertible_impl {}; + + template + struct is_array_cv_convertible + : public eastl::false_type {}; // P1 is scalar not a pointer, so it can't be converted to a pointer. + + + /////////////////////////////////////////////////////////////////////// + // is_derived + // + // Given two (possibly identical) types Base and Derived, is_base_of::value == true + // if and only if Base is a direct or indirect base class of Derived. This is like is_base_of + // but returns false if Derived is the same as Base. So is_derived is true only if Derived is actually a subclass + // of Base and not Base itself. + // + // is_derived may only be applied to complete types. + // + // Example usage: + // is_derived::value => false + // is_derived::value => false + // is_derived::value => true + // is_derived::value => false + /////////////////////////////////////////////////////////////////////// + + #if EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE + #define EASTL_TYPE_TRAIT_is_derived_CONFORMANCE 1 + + template + struct is_derived : public eastl::integral_constant::value && !eastl::is_same::type, typename eastl::remove_cv::type>::value> {}; + #else + #define EASTL_TYPE_TRAIT_is_derived_CONFORMANCE 0 + + template // This returns true if Derived is unrelated to Base. That's a wrong answer, but is better for us than returning false for compilers that don't support is_base_of. + struct is_derived : public eastl::integral_constant::type, typename eastl::remove_cv::type>::value> {}; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_safe_array_conversion + // + // Say you have two array types: T* t and U* u. You want to assign the u to t but only if + // that's a safe thing to do. As shown in the logic below, the array conversion + // is safe if U* and T* are convertible, if U is an array, and if either U or T is not + // a pointer or U is not derived from T. + // + // Note: Usage of this class could be replaced with is_array_cv_convertible usage. + // To do: Do this replacement and test it. + // + /////////////////////////////////////////////////////////////////////// + + template + struct is_safe_array_conversion : public eastl::integral_constant::value && + eastl::is_array::value && + (!eastl::is_pointer::value || !is_pointer::value || !Internal::is_derived::type>::value)> {}; + + } // namespace Internal + + + + + + + + /// default_delete + /// + /// C++11 smart pointer default delete function class. + /// + /// Provides a default way to delete an object. This default is simply to call delete on the + /// object pointer. You can provide an alternative to this class or you can override this on + /// a class-by-class basis like the following: + /// template <> + /// struct smart_ptr_deleter + /// { + /// void operator()(MyClass* p) const + /// { SomeCustomFunction(p); } + /// }; + /// + template + struct default_delete + { + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION <= 4006) // GCC prior to 4.7 has a bug with noexcept here. + EA_CONSTEXPR default_delete() = default; + #else + EA_CONSTEXPR default_delete() EA_NOEXCEPT = default; + #endif + + template // Enable if T* can be constructed with U* (i.e. U* is convertible to T*). + default_delete(const default_delete&, typename eastl::enable_if::value>::type* = 0) EA_NOEXCEPT {} + + void operator()(T* p) const EA_NOEXCEPT + { delete p; } + }; + + + template + struct default_delete // Specialization for arrays. + { + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION <= 4006) // GCC prior to 4.7 has a bug with noexcept here. + EA_CONSTEXPR default_delete() = default; + #else + EA_CONSTEXPR default_delete() EA_NOEXCEPT = default; + #endif + + template // This ctor is enabled if T is equal to or a base of U, and if U is less or equal const/volatile-qualified than T. + default_delete(const default_delete&, typename eastl::enable_if::value>::type* = 0) EA_NOEXCEPT {} + + void operator()(T* p) const EA_NOEXCEPT + { delete[] p; } + }; + + + + + /// smart_ptr_deleter + /// + /// Deprecated in favor of the C++11 name: default_delete + /// + template + struct smart_ptr_deleter + { + typedef T value_type; + + void operator()(const value_type* p) const // We use a const argument type in order to be most flexible with what types we accept. + { delete const_cast(p); } + }; + + template <> + struct smart_ptr_deleter + { + typedef void value_type; + + void operator()(const void* p) const + { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type. + }; + + template <> + struct smart_ptr_deleter + { + typedef void value_type; + + void operator()(const void* p) const + { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type. + }; + + + + /// smart_array_deleter + /// + /// Deprecated in favor of the C++11 name: default_delete + /// + template + struct smart_array_deleter + { + typedef T value_type; + + void operator()(const value_type* p) const // We use a const argument type in order to be most flexible with what types we accept. + { delete[] const_cast(p); } + }; + + template <> + struct smart_array_deleter + { + typedef void value_type; + + void operator()(const void* p) const + { delete[] (char*)p; } // We don't seem to have much choice but to cast to a scalar type. + }; + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/internal/thread_support.h b/include/EASTL/internal/thread_support.h new file mode 100644 index 0000000..747d994 --- /dev/null +++ b/include/EASTL/internal/thread_support.h @@ -0,0 +1,257 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_THREAD_SUPPORT_H +#define EASTL_INTERNAL_THREAD_SUPPORT_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif +#include + +///////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE(rparolin): We need a fallback mutex implementation because the Microsoft implementation +// of std::mutex can not be included in managed-cpp code. +// +// fatal error C1189: is not supported when compiling with /clr or /clr:pure +///////////////////////////////////////////////////////////////////////////////////////////////////// +#if defined(EA_HAVE_CPP11_MUTEX) && !defined(EA_COMPILER_MANAGED_CPP) + #define EASTL_CPP11_MUTEX_ENABLED 1 +#else + #define EASTL_CPP11_MUTEX_ENABLED 0 +#endif + +#if EASTL_CPP11_MUTEX_ENABLED + EA_DISABLE_ALL_VC_WARNINGS() + #include + EA_RESTORE_ALL_VC_WARNINGS() +#endif + +#if defined(EA_PLATFORM_MICROSOFT) + // Cannot include Windows headers in our headers, as they kill builds with their #defines. +#elif defined(EA_PLATFORM_POSIX) + #include +#endif + + +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable: 4625) // copy constructor could not be generated because a base class copy constructor is inaccessible or deleted. + #pragma warning(disable: 4626) // assignment operator could not be generated because a base class assignment operator is inaccessible or deleted. + #pragma warning(disable: 4275) // non dll-interface class used as base for DLL-interface classkey 'identifier'. +#endif + + +#if defined(EA_PLATFORM_MICROSOFT) + #if defined(EA_PROCESSOR_POWERPC) + extern "C" long __stdcall _InterlockedIncrement(long volatile* Addend); + #pragma intrinsic (_InterlockedIncrement) + + extern "C" long __stdcall _InterlockedDecrement(long volatile* Addend); + #pragma intrinsic (_InterlockedDecrement) + + extern "C" long __stdcall _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp); + #pragma intrinsic (_InterlockedCompareExchange) + #else + extern "C" long _InterlockedIncrement(long volatile* Addend); + #pragma intrinsic (_InterlockedIncrement) + + extern "C" long _InterlockedDecrement(long volatile* Addend); + #pragma intrinsic (_InterlockedDecrement) + + extern "C" long _InterlockedCompareExchange(long volatile* Dest, long Exchange, long Comp); + #pragma intrinsic (_InterlockedCompareExchange) + #endif +#endif + + + +/////////////////////////////////////////////////////////////////////////////// +// EASTL_THREAD_SUPPORT_AVAILABLE +// +// Defined as 0 or 1, based on existing support. +// Identifies if thread support (e.g. atomics, mutexes) is available for use. +// The large majority of EASTL doesn't use thread support, but a few parts +// of it (e.g. shared_ptr) do. +/////////////////////////////////////////////////////////////////////////////// + +#if !defined(EASTL_THREAD_SUPPORT_AVAILABLE) + #if defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003)) + #define EASTL_THREAD_SUPPORT_AVAILABLE 1 + #elif defined(EA_COMPILER_MSVC) + #define EASTL_THREAD_SUPPORT_AVAILABLE 1 + #else + #define EASTL_THREAD_SUPPORT_AVAILABLE 0 + #endif +#endif + + +namespace eastl +{ + namespace Internal + { + /// atomic_increment + /// Returns the new value. + inline int32_t atomic_increment(int32_t* p32) EA_NOEXCEPT + { + #if defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003)) + return __sync_add_and_fetch(p32, 1); + #elif defined(EA_COMPILER_MSVC) + static_assert(sizeof(long) == sizeof(int32_t), "unexpected size"); + return _InterlockedIncrement((volatile long*)p32); + #elif defined(EA_COMPILER_GNUC) + int32_t result; + __asm__ __volatile__ ("lock; xaddl %0, %1" + : "=r" (result), "=m" (*p32) + : "0" (1), "m" (*p32) + : "memory" + ); + return result + 1; + #else + EASTL_FAIL_MSG("EASTL thread safety is not implemented yet. See EAThread for how to do this for the given platform."); + return ++*p32; + #endif + } + + /// atomic_decrement + /// Returns the new value. + inline int32_t atomic_decrement(int32_t* p32) EA_NOEXCEPT + { + #if defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003)) + return __sync_add_and_fetch(p32, -1); + #elif defined(EA_COMPILER_MSVC) + return _InterlockedDecrement((volatile long*)p32); // volatile long cast is OK because int32_t == long on Microsoft platforms. + #elif defined(EA_COMPILER_GNUC) + int32_t result; + __asm__ __volatile__ ("lock; xaddl %0, %1" + : "=r" (result), "=m" (*p32) + : "0" (-1), "m" (*p32) + : "memory" + ); + return result - 1; + #else + EASTL_FAIL_MSG("EASTL thread safety is not implemented yet. See EAThread for how to do this for the given platform."); + return --*p32; + #endif + } + + + /// atomic_compare_and_swap + /// Safely sets the value to a new value if the original value is equal to + /// a condition value. Returns true if the condition was met and the + /// assignment occurred. The comparison and value setting are done as + /// an atomic operation and thus another thread cannot intervene between + /// the two as would be the case with simple C code. + inline bool atomic_compare_and_swap(int32_t* p32, int32_t newValue, int32_t condition) + { + #if defined(EA_COMPILER_CLANG) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4003)) + return __sync_bool_compare_and_swap(p32, condition, newValue); + #elif defined(EA_COMPILER_MSVC) + return ((int32_t)_InterlockedCompareExchange((volatile long*)p32, (long)newValue, (long)condition) == condition); + #elif defined(EA_COMPILER_GNUC) + // GCC Inline ASM Constraints + // r <--> Any general purpose register + // a <--> The a register. + // 1 <--> The constraint '1' for operand 2 says that it must occupy the same location as operand 1. + // =a <--> output registers + // =r <--> output registers + + int32_t result; + __asm__ __volatile__( + "lock; cmpxchgl %3, (%1) \n" // Test *p32 against EAX, if same, then *p32 = newValue + : "=a" (result), "=r" (p32) // outputs + : "a" (condition), "r" (newValue), "1" (p32) // inputs + : "memory" // clobbered + ); + return result == condition; + #else + EASTL_FAIL_MSG("EASTL thread safety is not implemented yet. See EAThread for how to do this for the given platform."); + if(*p32 == condition) + { + *p32 = newValue; + return true; + } + return false; + #endif + } + + + // mutex + #if EASTL_CPP11_MUTEX_ENABLED + using std::mutex; + #else + class EASTL_API mutex + { + public: + mutex(); + ~mutex(); + + void lock(); + void unlock(); + + protected: + #if defined(EA_PLATFORM_MICROSOFT) + #if defined(_WIN64) + uint64_t mMutexBuffer[40 / sizeof(uint64_t)]; // CRITICAL_SECTION is 40 bytes on Win64. + #elif defined(_WIN32) + uint32_t mMutexBuffer[24 / sizeof(uint32_t)]; // CRITICAL_SECTION is 24 bytes on Win32. + #endif + #elif defined(EA_PLATFORM_POSIX) + pthread_mutex_t mMutex; + #endif + }; + #endif + + + // auto_mutex + class EASTL_API auto_mutex + { + public: + EA_FORCE_INLINE auto_mutex(mutex& mutex) : pMutex(&mutex) + { pMutex->lock(); } + + EA_FORCE_INLINE ~auto_mutex() + { pMutex->unlock(); } + + protected: + mutex* pMutex; + + auto_mutex(const auto_mutex&) = delete; + void operator=(const auto_mutex&) = delete; + }; + + + // shared_ptr_auto_mutex + class EASTL_API shared_ptr_auto_mutex : public auto_mutex + { + public: + shared_ptr_auto_mutex(const void* pSharedPtr); + + shared_ptr_auto_mutex(const shared_ptr_auto_mutex&) = delete; + void operator=(shared_ptr_auto_mutex&&) = delete; + }; + + + } // namespace Internal + +} // namespace eastl + + +#if defined(_MSC_VER) + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + + + + diff --git a/include/EASTL/internal/tuple_fwd_decls.h b/include/EASTL/internal/tuple_fwd_decls.h new file mode 100644 index 0000000..a2c773c --- /dev/null +++ b/include/EASTL/internal/tuple_fwd_decls.h @@ -0,0 +1,56 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_TUPLE_FWD_DECLS_H +#define EASTL_TUPLE_FWD_DECLS_H + +#include + +#if EASTL_TUPLE_ENABLED + +namespace eastl +{ + template + class tuple; + + template + class tuple_size; + + template + class tuple_element; + + template + using tuple_element_t = typename tuple_element::type; + + // const typename for tuple_element_t, for when tuple or TupleImpl cannot itself be const + template + using const_tuple_element_t = typename conditional< + is_lvalue_reference>::value, + add_lvalue_reference_t>>, + const tuple_element_t + >::type; + + // get + template + tuple_element_t>& get(tuple& t); + + template + const_tuple_element_t>& get(const tuple& t); + + template + tuple_element_t>&& get(tuple&& t); + + template + T& get(tuple& t); + + template + const T& get(const tuple& t); + + template + T&& get(tuple&& t); +} + +#endif // EASTL_VARIADIC_TEMPLATES_ENABLED + +#endif // EASTL_TUPLE_FWD_DECLS_H diff --git a/include/EASTL/internal/type_compound.h b/include/EASTL/internal/type_compound.h new file mode 100644 index 0000000..178a734 --- /dev/null +++ b/include/EASTL/internal/type_compound.h @@ -0,0 +1,800 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_TYPE_COMPOUND_H +#define EASTL_INTERNAL_TYPE_COMPOUND_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + + +// Until we revise the code below to handle EDG warnings, we don't have much choice but to disable them. +#if defined(__EDG_VERSION__) + #pragma diag_suppress=1931 // operand of sizeof is not a type, variable, or dereferenced pointer expression +#endif + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////// + // extent + // + // extent::value is an integral type representing the number of + // elements in the Ith dimension of array type T. + // + // For a given array type T[N], extent::value == N. + // For a given multi-dimensional array type T[M][N], extent::value == N. + // For a given multi-dimensional array type T[M][N], extent::value == M. + // For a given array type T and a given dimension I where I >= rank::value, extent::value == 0. + // For a given array type of unknown extent T[], extent::value == 0. + // For a given non-array type T and an arbitrary dimension I, extent::value == 0. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_extent_CONFORMANCE 1 // extent is conforming. + + template + struct extent_help : public eastl::integral_constant {}; + + template + struct extent_help : public eastl::integral_constant {}; + + template + struct extent_help : public eastl::extent_help { }; + + template + struct extent_help : public eastl::extent_help {}; + + template // extent uses unsigned instead of size_t. + struct extent : public eastl::extent_help { }; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR auto extent_v = extent::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_array + // + // is_array::value == true if and only if T is an array type, + // including unbounded array types. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_array_CONFORMANCE 1 // is_array is conforming; doesn't make mistakes. + + template + struct is_array : public eastl::false_type {}; + + template + struct is_array : public eastl::true_type {}; + + template + struct is_array : public eastl::true_type {}; + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + EA_CONSTEXPR bool is_array_v = is_array::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_array_of_known_bounds + // + // Not part of the C++11 Standard. + // is_array_of_known_bounds::value is true if T is an array and is + // of known bounds. is_array_of_unknown_bounds::value == true, + // while is_array_of_unknown_bounds::value = false. + // + /////////////////////////////////////////////////////////////////////// + + template + struct is_array_of_known_bounds + : public eastl::integral_constant::value != 0> {}; + + + /////////////////////////////////////////////////////////////////////// + // is_array_of_unknown_bounds + // + // Not part of the C++11 Standard. + // is_array_of_unknown_bounds::value is true if T is an array but is + // of unknown bounds. is_array_of_unknown_bounds::value == false, + // while is_array_of_unknown_bounds::value = true. + // + /////////////////////////////////////////////////////////////////////// + + template + struct is_array_of_unknown_bounds + : public eastl::integral_constant::value && (eastl::extent::value == 0)> {}; + + + /////////////////////////////////////////////////////////////////////// + // is_member_function_pointer + // + // is_member_function_pointer::value == true if and only if T is a + // pointer to member function type. + // + /////////////////////////////////////////////////////////////////////// + // We detect member functions with 0 to N arguments. We can extend this + // for additional arguments if necessary. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_member_function_pointer_CONFORMANCE 1 // is_member_function_pointer is conforming; doesn't make mistakes. + + // To do: Revise this to support C++11 variadic templates when possible. + // To do: We can probably also use remove_cv to simply the multitude of types below. + + template struct is_mem_fun_pointer_value : public false_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + template struct is_mem_fun_pointer_value : public true_type{}; + + template + struct is_member_function_pointer : public integral_constant::value>{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_member_function_pointer_v = is_member_function_pointer::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_member_pointer + // + // is_member_pointer::value == true if and only if: + // is_member_object_pointer::value == true, or + // is_member_function_pointer::value == true + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_member_pointer_CONFORMANCE 1 // is_member_pointer is conforming; doesn't make mistakes. + + template + struct is_member_pointer + : public eastl::integral_constant::value>{}; + + template + struct is_member_pointer + : public eastl::true_type{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_member_pointer_v = is_member_pointer::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_member_object_pointer + // + // is_member_object_pointer::value == true if and only if T is a + // pointer to data member type. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_member_object_pointer_CONFORMANCE 1 // is_member_object_pointer is conforming; doesn't make mistakes. + + template + struct is_member_object_pointer : public eastl::integral_constant::value && + !eastl::is_member_function_pointer::value + > {}; + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_member_object_pointer_v = is_member_object_pointer::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_pointer + // + // is_pointer::value == true if and only if T is a pointer type. + // This category includes function pointer types, but not pointer to + // member types. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_pointer_CONFORMANCE 1 // is_pointer is conforming; doesn't make mistakes. + + template struct is_pointer_helper : public false_type{}; + + template struct is_pointer_helper : public true_type{}; + template struct is_pointer_helper : public true_type{}; + template struct is_pointer_helper : public true_type{}; + template struct is_pointer_helper : public true_type{}; + + template + struct is_pointer_value : public type_and::value, type_not::value>::value> {}; + + template + struct is_pointer : public integral_constant::value>{}; + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + EA_CONSTEXPR bool is_pointer_v = is_pointer::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_convertible + // + // Given two (possible identical) types From and To, is_convertible::value == true + // if and only if an lvalue of type From can be implicitly converted to type To, + // or is_void::value == true + // + // An instance of the type predicate holds true if the expression To to = from;, where from is an object of type From, is well-formed. + // + // is_convertible may only be applied to complete types. + // Type To may not be an abstract type. + // If the conversion is ambiguous, the program is ill-formed. + // If either or both of From and To are class types, and the conversion would invoke + // non-public member functions of either From or To (such as a private constructor of To, + // or a private conversion operator of From), the program is ill-formed. + // + // Note that without compiler help, both is_convertible and is_base + // can produce compiler errors if the conversion is ambiguous. + // Example: + // struct A {}; + // struct B : A {}; + // struct C : A {}; + // struct D : B, C {}; + // is_convertible::value; // Generates compiler error. + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_convertible_to))) + #define EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE 1 // is_convertible is conforming. + + // Problem: VC++ reports that int is convertible to short, yet if you construct a short from an int then VC++ generates a warning: + // warning C4242: 'initializing' : conversion from 'int' to 'short', possible loss of data. We can deal with this by making + // is_convertible be false for conversions that could result in loss of data. Or we could make another trait called is_lossless_convertible + // and use that appropriately in our code. Or we could put the onus on the user to work around such warnings. + template + struct is_convertible : public integral_constant{}; + + #else + #define EASTL_TYPE_TRAIT_is_convertible_CONFORMANCE 1 + + template::value || eastl::is_function::value || eastl::is_array::value > + struct is_convertible_helper // Anything is convertible to void. Nothing is convertible to a function or an array. + { static const bool value = eastl::is_void::value; }; + + template + class is_convertible_helper + { + template + static void ToFunction(To1); // We try to call this function with an instance of From. It is valid if From can be converted to To. + + template + static eastl::no_type is(...); + + template + static decltype(ToFunction(eastl::declval()), eastl::yes_type()) is(int); + + public: + static const bool value = sizeof(is(0)) == 1; + }; + + template + struct is_convertible + : public integral_constant::value> {}; + + #endif + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + EA_CONSTEXPR bool is_convertible_v = is_convertible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_convertible + // + // https://en.cppreference.com/w/cpp/types/is_convertible + // + // template + // struct is_explicitly_convertible + // : public is_constructible {}; + /////////////////////////////////////////////////////////////////////// + // TODO(rparolin): implement type-trait + + + + /////////////////////////////////////////////////////////////////////// + // is_explicitly_convertible + // + // This sometime-seen extension trait is the same as is_constructible + // and so we don't define it. + // + // template + // struct is_explicitly_convertible + // : public is_constructible {}; + /////////////////////////////////////////////////////////////////////// + + + + /////////////////////////////////////////////////////////////////////// + // is_union + // + // is_union::value == true if and only if T is a union type. + // + // There is no way to tell if a type is a union without compiler help. + // As of this writing, only Metrowerks v8+ supports such functionality + // via 'msl::is_union::value'. The user can force something to be + // evaluated as a union via EASTL_DECLARE_UNION. + /////////////////////////////////////////////////////////////////////// + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_union))) + #define EASTL_TYPE_TRAIT_is_union_CONFORMANCE 1 // is_union is conforming. + + template + struct is_union : public integral_constant{}; + #else + #define EASTL_TYPE_TRAIT_is_union_CONFORMANCE 0 // is_union is not fully conforming. + + template struct is_union : public false_type{}; + #endif + + #define EASTL_DECLARE_UNION(T) namespace eastl{ template <> struct is_union : public true_type{}; template <> struct is_union : public true_type{}; } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_union_v = is_union::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_class + // + // is_class::value == true if and only if T is a class or struct + // type (and not a union type). + // + // Without specific compiler help, it is not possible to + // distinguish between unions and classes. As a result, is_class + // will erroneously evaluate to true for union types. + /////////////////////////////////////////////////////////////////////// + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_class))) + #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE 1 // is_class is conforming. + + template + struct is_class : public integral_constant{}; + #elif defined(__EDG__) + #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE EASTL_TYPE_TRAIT_is_union_CONFORMANCE + + typedef char yes_array_type[1]; + typedef char no_array_type[2]; + template static yes_array_type& is_class_helper(void (U::*)()); + template static no_array_type& is_class_helper(...); + + template + struct is_class : public integral_constant(0)) == sizeof(yes_array_type) && !is_union::value + >{}; + #elif !defined(__GNUC__) || (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) // Not GCC or GCC 3.4+ + #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE EASTL_TYPE_TRAIT_is_union_CONFORMANCE + + template static yes_type is_class_helper(void (U::*)()); + template static no_type is_class_helper(...); + + template + struct is_class : public integral_constant(0)) == sizeof(yes_type) && !is_union::value + >{}; + #else + #define EASTL_TYPE_TRAIT_is_class_CONFORMANCE 0 // is_class is not fully conforming. + + // GCC 2.x version, due to GCC being broken. + template + struct is_class : public false_type{}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_class_v = is_class::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_enum + // + // is_enum::value == true if and only if T is an enumeration type. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_enum))) + #define EASTL_TYPE_TRAIT_is_enum_CONFORMANCE 1 // is_enum is conforming. + + template + struct is_enum : public integral_constant{}; + #else + #define EASTL_TYPE_TRAIT_is_enum_CONFORMANCE 1 // is_enum is conforming. + + struct int_convertible{ int_convertible(int); }; + + template + struct is_enum_helper { template struct nest : public is_convertible{}; }; + + template <> + struct is_enum_helper { template struct nest : public false_type {}; }; + + template + struct is_enum_helper2 + { + typedef type_or::value, is_reference::value, is_class::value> selector; + typedef is_enum_helper helper_t; + typedef typename add_reference::type ref_t; + typedef typename helper_t::template nest result; + }; + + template + struct is_enum : public integral_constant::result::value>{}; + + template <> struct is_enum : public false_type {}; + template <> struct is_enum : public false_type {}; + template <> struct is_enum : public false_type {}; + template <> struct is_enum : public false_type {}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_enum_v = is_enum::value; + #endif + + #define EASTL_DECLARE_ENUM(T) namespace eastl{ template <> struct is_enum : public true_type{}; template <> struct is_enum : public true_type{}; } + + + + + + /////////////////////////////////////////////////////////////////////// + // is_polymorphic + // + // is_polymorphic::value == true if and only if T is a class or struct + // that declares or inherits a virtual function. is_polymorphic may only + // be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_polymorphic))) + #define EASTL_TYPE_TRAIT_is_polymorphic_CONFORMANCE 1 // is_polymorphic is conforming. + + template + struct is_polymorphic : public integral_constant{}; + #else + #define EASTL_TYPE_TRAIT_is_polymorphic_CONFORMANCE 1 // is_polymorphic is conforming. + + template + struct is_polymorphic_imp1 + { + typedef typename remove_cv::type t; + + struct helper_1 : public t + { + helper_1(); + ~helper_1() throw(); + char pad[64]; + }; + + struct helper_2 : public t + { + helper_2(); + virtual ~helper_2() throw(); + #ifndef _MSC_VER + virtual void foo(); + #endif + char pad[64]; + }; + + static const bool value = (sizeof(helper_1) == sizeof(helper_2)); + }; + + template + struct is_polymorphic_imp2{ static const bool value = false; }; + + template + struct is_polymorphic_selector{ template struct rebind{ typedef is_polymorphic_imp2 type; }; }; + + template <> + struct is_polymorphic_selector{ template struct rebind{ typedef is_polymorphic_imp1 type; }; }; + + template + struct is_polymorphic_value{ + typedef is_polymorphic_selector::value> selector; + typedef typename selector::template rebind binder; + typedef typename binder::type imp_type; + static const bool value = imp_type::value; + }; + + template + struct is_polymorphic : public integral_constant::value>{}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_polymorphic_v = is_polymorphic::value; + #endif + + + + + /////////////////////////////////////////////////////////////////////// + // is_object + // + // is_object::value == true if and only if: + // is_reference::value == false, and + // is_function::value == false, and + // is_void::value == false + // + // The C++ standard, section 3.9p9, states: "An object type is a + // (possibly cv-qualified) type that is not a function type, not a + // reference type, and not incomplete (except for an incompletely + // defined object type). + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_object_CONFORMANCE (EASTL_TYPE_TRAIT_is_reference_CONFORMANCE && EASTL_TYPE_TRAIT_is_void_CONFORMANCE && EASTL_TYPE_TRAIT_is_function_CONFORMANCE) + + template + struct is_object : public integral_constant::value && !is_void::value && !is_function::value + >{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_object_v = is_object::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_scalar + // + // is_scalar::value == true if and only if: + // is_arithmetic::value == true, or + // is_enum::value == true, or + // is_pointer::value == true, or + // is_member_pointer::value == true, or + // is_null_pointer::value == true + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_scalar_CONFORMANCE 1 // is_scalar is conforming. + + template + struct is_scalar : public integral_constant::value || is_enum::value || is_pointer::value || + is_member_pointer::value || + is_null_pointer::value> {}; + + template struct is_scalar : public true_type {}; + template struct is_scalar : public true_type {}; + template struct is_scalar : public true_type {}; + template struct is_scalar : public true_type {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_scalar_v = is_scalar::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_compound + // + // Compound means anything but fundamental. See C++ standard, section 3.9.2. + // + // is_compound::value == true if and only if: + // is_fundamental::value == false + // + // Thus, is_compound::value == true if and only if: + // is_floating_point::value == false, and + // is_integral::value == false, and + // is_void::value == false + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_compound_CONFORMANCE EASTL_TYPE_TRAIT_is_fundamental_CONFORMANCE + + template + struct is_compound : public integral_constant::value>{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_compound_v = is_compound::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // decay + // + // Converts the type T to its decayed equivalent. That means doing + // lvalue to rvalue, array to pointer, function to pointer conversions, + // and removal of const and volatile. + // This is the type conversion silently applied by the compiler to + // all function arguments when passed by value. + + #define EASTL_TYPE_TRAIT_decay_CONFORMANCE 1 // decay is conforming. + + template + struct decay + { + typedef typename eastl::remove_reference::type U; + + typedef typename eastl::conditional< + eastl::is_array::value, + typename eastl::remove_extent::type*, + typename eastl::conditional< + eastl::is_function::value, + typename eastl::add_pointer::type, + typename eastl::remove_cv::type + >::type + >::type type; + }; + + + // decay_t is the C++14 using typedef for typename decay::type, though + // it requires only C++11 compiler functionality to implement. + // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers. + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_DECAY_T(T) typename decay::type + #else + template + using decay_t = typename decay::type; + #define EASTL_DECAY_T(T) decay_t + #endif + + + /////////////////////////////////////////////////////////////////////// + // common_type + // + // Determines the common type among all types T..., that is the type all T... + // can be implicitly converted to. + // + // It is intended that this be specialized by the user for cases where it + // is useful to do so. Example specialization: + // template + // struct common_type{ typedef MyBaseClassB type; }; + // + // The member typedef type shall be defined as set out in 20.9.7.6,p3. All types in + // the parameter pack T shall be complete or (possibly cv) void. A program may + // specialize this trait if at least one template parameter in the specialization + // is a user-defined type. Note: Such specializations are needed when only + // explicit conversions are desired among the template arguments. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_common_type_CONFORMANCE 1 // common_type is conforming. + + template + struct common_type; + + template + struct common_type + { typedef decay_t type; }; // Question: Should we use T or decay_t here? The C++11 Standard specifically (20.9.7.6,p3) specifies that it be without decay, but libc++ uses decay. + + template + struct common_type + { + typedef decay_t() : declval())> type; // The type of a tertiary expression is set by the compiler to be the common type of the two result types. + }; + + template + struct common_type + { typedef typename common_type::type, V...>::type type; }; + + + // common_type_t is the C++14 using typedef for typename common_type::type. + // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers. + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_COMMON_TYPE_T(...) typename common_type<__VA_ARGS__>::type + #else + template + using common_type_t = typename common_type::type; + #define EASTL_COMMON_TYPE_T(...) common_type_t<__VA_ARGS__> + #endif + + /////////////////////////////////////////////////////////////////////// + // is_final + /////////////////////////////////////////////////////////////////////// + #if EA_COMPILER_HAS_FEATURE(is_final) + template + struct is_final : public integral_constant {}; + #else + // no compiler support so we always return false + template + struct is_final : public false_type {}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_final_v = is_final::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_aggregate + // + // https://en.cppreference.com/w/cpp/language/aggregate_initialization + // + // An aggregate is one of the following types: + // * array type + // * class type (typically, struct or union), that has + // * no private or protected non-static data members + // * no user-provided constructors (explicitly defaulted or deleted constructors are allowed) + // * no user-provided, inherited, or explicit constructors + // * (explicitly defaulted or deleted constructors are allowed) + // * no virtual, private, or protected (since C++17) base classes + // * no virtual member functions + // * no default member initializers + // + /////////////////////////////////////////////////////////////////////// + #if EA_COMPILER_HAS_FEATURE(is_aggregate) || defined(_MSC_VER) && (_MSC_VER >= 1916) // VS2017 15.9+ + #define EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE 1 + + template + struct is_aggregate : public integral_constant {}; + #else + #define EASTL_TYPE_TRAIT_is_aggregate_CONFORMANCE 0 + + // no compiler support so we always return false + template + struct is_aggregate : public false_type {}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_aggregate_v = is_aggregate::value; + #endif +} // namespace eastl + + +#endif // Header include guard + + + + diff --git a/include/EASTL/internal/type_fundamental.h b/include/EASTL/internal/type_fundamental.h new file mode 100644 index 0000000..a90cb44 --- /dev/null +++ b/include/EASTL/internal/type_fundamental.h @@ -0,0 +1,285 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_TYPE_FUNDAMENTAL_H +#define EASTL_INTERNAL_TYPE_FUNDAMENTAL_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +namespace eastl +{ + + + /////////////////////////////////////////////////////////////////////// + // is_void + // + // is_void::value == true if and only if T is one of the following types: + // [const][volatile] void + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_void_CONFORMANCE 1 // is_void is conforming. + + template struct is_void : public false_type{}; + + template <> struct is_void : public true_type{}; + template <> struct is_void : public true_type{}; + template <> struct is_void : public true_type{}; + template <> struct is_void : public true_type{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_void_v = is_void::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // has_void_arg + // + // utility which identifies if any of the given template arguments is void. + // + // TODO(rparolin): refactor with fold expressions when C++17 compilers are widely available. + /////////////////////////////////////////////////////////////////////// + + template + struct has_void_arg; + + template <> + struct has_void_arg<> + : public eastl::false_type {}; + + template + struct has_void_arg + { static const bool value = (eastl::is_void::value || eastl::has_void_arg::value); }; + + + /////////////////////////////////////////////////////////////////////// + // is_null_pointer + // + // C++14 type trait. Refers only to nullptr_t and not NULL (0). + // eastl::is_null_pointer::value == true + // eastl::is_null_pointer::value == true + // eastl::is_null_pointer::value == false + // eastl::is_null_pointer::value == [cannot compile] + // + /////////////////////////////////////////////////////////////////////// + + #if defined(EA_COMPILER_CPP11_ENABLED) && !defined(EA_COMPILER_NO_DECLTYPE) && !defined(_MSC_VER) // VC++'s handling of decltype(nullptr) is broken. + #define EASTL_TYPE_TRAIT_is_null_pointer_CONFORMANCE 1 + + template + struct is_null_pointer : public eastl::is_same::type, decltype(nullptr)> {}; // A C++11 compiler defines nullptr, but you need a C++11 standard library to declare std::nullptr_t. So it's safer to compare against decltype(nullptr) than to use std::nullptr_t, because we may have a C++11 compiler but C++98 library (happens with Apple frequently). + #else + #define EASTL_TYPE_TRAIT_is_null_pointer_CONFORMANCE 1 + + template + struct is_null_pointer : public eastl::is_same::type, std::nullptr_t> {}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_null_pointer_v = is_null_pointer::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_integral + // + // is_integral::value == true if and only if T is one of the following types: + // [const] [volatile] bool + // [const] [volatile] char + // [const] [volatile] signed char + // [const] [volatile] unsigned char + // [const] [volatile] wchar_t + // [const] [volatile] short + // [const] [volatile] int + // [const] [volatile] long + // [const] [volatile] long long + // [const] [volatile] unsigned short + // [const] [volatile] unsigned int + // [const] [volatile] unsigned long + // [const] [volatile] unsigned long long + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_integral_CONFORMANCE 1 // is_integral is conforming. + + template struct is_integral_helper : public false_type{}; + + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + + template <> struct is_integral_helper : public true_type{}; + template <> struct is_integral_helper : public true_type{}; + #if defined(EA_CHAR16_NATIVE) && EA_CHAR16_NATIVE + template <> struct is_integral_helper : public true_type{}; + #endif + #if defined(EA_CHAR32_NATIVE) && EA_CHAR32_NATIVE + template <> struct is_integral_helper : public true_type{}; + #endif + #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type which is already handled above... + template <> struct is_integral_helper : public true_type{}; + #endif + + template + struct is_integral : public eastl::is_integral_helper::type>{}; + + #define EASTL_DECLARE_INTEGRAL(T) \ + namespace eastl{ \ + template <> struct is_integral : public true_type{}; \ + template <> struct is_integral : public true_type{}; \ + template <> struct is_integral : public true_type{}; \ + template <> struct is_integral : public true_type{}; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_integral_v = is_integral::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_floating_point + // + // is_floating_point::value == true if and only if T is one of the following types: + // [const] [volatile] float + // [const] [volatile] double + // [const] [volatile] long double + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_floating_point_CONFORMANCE 1 // is_floating_point is conforming. + + template struct is_floating_point_helper : public false_type{}; + + template <> struct is_floating_point_helper : public true_type{}; + template <> struct is_floating_point_helper : public true_type{}; + template <> struct is_floating_point_helper : public true_type{}; + + template + struct is_floating_point : public eastl::is_floating_point_helper::type>{}; + + #define EASTL_DECLARE_FLOATING_POINT(T) \ + namespace eastl{ \ + template <> struct is_floating_point : public true_type{}; \ + template <> struct is_floating_point : public true_type{}; \ + template <> struct is_floating_point : public true_type{}; \ + template <> struct is_floating_point : public true_type{}; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_floating_point_v = is_floating_point::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_arithmetic + // + // is_arithmetic::value == true if and only if: + // is_floating_point::value == true, or + // is_integral::value == true + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_arithmetic_CONFORMANCE 1 // is_arithmetic is conforming. + + template + struct is_arithmetic + : public integral_constant::value || is_floating_point::value> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_arithmetic_v = is_arithmetic::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_fundamental + // + // is_fundamental::value == true if and only if: + // is_floating_point::value == true, or + // is_integral::value == true, or + // is_void::value == true + // is_null_pointer::value == true + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_fundamental_CONFORMANCE 1 // is_fundamental is conforming. + + template + struct is_fundamental + : public bool_constant || is_integral_v || is_floating_point_v || is_null_pointer_v> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_fundamental_v = is_fundamental::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_hat_type + // + // is_hat_type::value == true if and only if: + // underlying type is a C++/CX '^' type such as: Foo^ + // meaning the type is heap allocated and ref-counted + /////////////////////////////////////////////////////////////////////// + + template struct is_hat_type_helper : public false_type {}; + + #if (EABASE_VERSION_N > 20607 && defined(EA_COMPILER_WINRTCX_ENABLED)) || defined(__cplusplus_winrt) + template struct is_hat_type_helper : public true_type{}; + #endif + + template + struct is_hat_type : public eastl::is_hat_type_helper {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_hat_type_v = is_hat_type::value; + #endif + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/internal/type_pod.h b/include/EASTL/internal/type_pod.h new file mode 100644 index 0000000..8726a7e --- /dev/null +++ b/include/EASTL/internal/type_pod.h @@ -0,0 +1,1945 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_TYPE_POD_H +#define EASTL_INTERNAL_TYPE_POD_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include + +namespace eastl +{ + /////////////////////////////////////////////////////////////////////// + // is_empty + // + // is_empty::value == true if and only if T is an empty class or struct. + // is_empty may only be applied to complete types. + // + // is_empty cannot be used with union types until is_union can be made to work. + /////////////////////////////////////////////////////////////////////// + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_empty))) + #define EASTL_TYPE_TRAIT_is_empty_CONFORMANCE 1 // is_empty is conforming. + + template + struct is_empty : public integral_constant{}; + #else + #define EASTL_TYPE_TRAIT_is_empty_CONFORMANCE 1 // is_empty is fully conforming. + + template + struct is_empty_helper_t1 : public T { char m[64]; }; + struct is_empty_helper_t2 { char m[64]; }; + + // The inheritance in empty_helper_t1 will not work with non-class types + template + struct is_empty_helper : public eastl::false_type{}; + + template + struct is_empty_helper : public eastl::integral_constant) == sizeof(is_empty_helper_t2) + >{}; + + template + struct is_empty_helper2 + { + typedef typename eastl::remove_cv::type _T; + typedef eastl::is_empty_helper<_T, eastl::is_class<_T>::value> type; + }; + + template + struct is_empty : public eastl::is_empty_helper2::type {}; + #endif + + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_empty_v = is_empty::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_pod + // + // is_pod::value == true if and only if, for a given type T: + // - is_scalar::value == true, or + // - T is a class or struct that has no user-defined copy assignment + // operator or destructor, and T has no non-static data members M for + // which is_pod::value == false, and no members of reference type, or + // - T is the type of an array of objects E for which is_pod::value == true + // + // is_pod may only be applied to complete types. + // + // Without some help from the compiler or user, is_pod will not report + // that a struct or class is a POD, but will correctly report that + // built-in types such as int are PODs. The user can help the compiler + // by using the EASTL_DECLARE_POD macro on a class. + /////////////////////////////////////////////////////////////////////// + + #if defined(EA_COMPILER_MSVC) + #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 1 // is_pod is conforming. Actually as of VS2008 it is apparently not fully conforming, as it flags the following as a non-pod: struct Pod{ Pod(){} }; + + EA_DISABLE_VC_WARNING(4647) + template // We check for has_trivial_constructor only because the VC++ is_pod does. Is it due to some compiler bug? + struct is_pod : public eastl::integral_constant::value) || eastl::is_void::value || eastl::is_scalar::value>{}; + EA_RESTORE_VC_WARNING() + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_pod))) + #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 1 // is_pod is conforming. + + template + struct is_pod : public eastl::integral_constant::value || eastl::is_scalar::value>{}; + #else + #define EASTL_TYPE_TRAIT_is_pod_CONFORMANCE 0 // is_pod is not conforming. Can return false negatives. + + template // There's not much we can do here without some compiler extension. + struct is_pod : public eastl::integral_constant::value || eastl::is_scalar::type>::value>{}; + #endif + + template + struct is_pod : public is_pod{}; + + template + struct is_POD : public is_pod{}; // Backwards compatibility. + + #define EASTL_DECLARE_IS_POD(T, isPod) \ + namespace eastl { \ + template <> struct is_pod : public eastl::integral_constant { }; \ + template <> struct is_pod : public eastl::integral_constant { }; \ + template <> struct is_pod : public eastl::integral_constant { }; \ + template <> struct is_pod : public eastl::integral_constant { }; \ + } + + // Old style macro, for bacwards compatibility: + #define EASTL_DECLARE_POD(T) namespace eastl{ template <> struct is_pod : public true_type{}; template <> struct is_pod : public true_type{}; } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_pod_v = is_pod::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_standard_layout + // + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(EA_COMPILER_MSVC) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_standard_layout))) + #define EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE 1 // is_standard_layout is conforming. + + template + struct is_standard_layout : public eastl::integral_constant::value || eastl::is_scalar::value>{}; + #else + #define EASTL_TYPE_TRAIT_is_standard_layout_CONFORMANCE 0 // is_standard_layout is not conforming. Can return false negatives. + + template // There's not much we can do here without some compiler extension. + struct is_standard_layout : public eastl::integral_constant::value || is_scalar::value>{}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_standard_layout_v = is_standard_layout::value; + #endif + + #define EASTL_DECLARE_IS_STANDARD_LAYOUT(T, isStandardLayout) \ + namespace eastl { \ + template <> struct is_standard_layout : public eastl::integral_constant { }; \ + template <> struct is_standard_layout : public eastl::integral_constant { }; \ + template <> struct is_standard_layout : public eastl::integral_constant { }; \ + template <> struct is_standard_layout : public eastl::integral_constant { }; \ + } + + // Old style macro, for bacwards compatibility: + #define EASTL_DECLARE_STANDARD_LAYOUT(T) namespace eastl{ template <> struct is_standard_layout : public true_type{}; template <> struct is_standard_layout : public true_type{}; } + + + + /////////////////////////////////////////////////////////////////////// + // has_trivial_constructor + // + // has_trivial_constructor::value == true if and only if T is a class + // or struct that has a trivial constructor. A constructor is trivial if + // - it is implicitly defined by the compiler, and + // - is_polymorphic::value == false, and + // - T has no virtual base classes, and + // - for every direct base class of T, has_trivial_constructor::value == true, + // where B is the type of the base class, and + // - for every nonstatic data member of T that has class type or array + // of class type, has_trivial_constructor::value == true, + // where M is the type of the data member + // + // has_trivial_constructor may only be applied to complete types. + // + // Without from the compiler or user, has_trivial_constructor will not + // report that a class or struct has a trivial constructor. + // The user can use EASTL_DECLARE_TRIVIAL_CONSTRUCTOR to help the compiler. + // + // A default constructor for a class X is a constructor of class X that + // can be called without an argument. + /////////////////////////////////////////////////////////////////////// + + #if defined(_MSC_VER) && (_MSC_VER >= 1600) // VS2010+ + #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 1 // has_trivial_constructor is conforming. + + template + struct has_trivial_constructor : public eastl::integral_constant::value) && !eastl::is_hat_type::value>{}; + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 1 // has_trivial_constructor is conforming. + + template + struct has_trivial_constructor : public eastl::integral_constant::value>{}; + #else + #define EASTL_TYPE_TRAIT_has_trivial_constructor_CONFORMANCE 0 // has_trivial_constructor is not fully conforming. Can return false negatives. + + // With current compilers, this is all we can do. + template + struct has_trivial_constructor : public eastl::is_pod {}; + #endif + + #define EASTL_DECLARE_HAS_TRIVIAL_CONSTRUCTOR(T, hasTrivialConstructor) \ + namespace eastl { \ + template <> struct has_trivial_constructor : public eastl::integral_constant { }; \ + } + + // Old style macro, for bacwards compatibility: + #define EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(T) namespace eastl{ template <> struct has_trivial_constructor : public true_type{}; template <> struct has_trivial_constructor : public true_type{}; } + + + + + /////////////////////////////////////////////////////////////////////// + // has_trivial_copy + // + // has_trivial_copy::value == true if and only if T is a class or + // struct that has a trivial copy constructor. A copy constructor is + // trivial if + // - it is implicitly defined by the compiler, and + // - is_polymorphic::value == false, and + // - T has no virtual base classes, and + // - for every direct base class of T, has_trivial_copy::value == true, + // where B is the type of the base class, and + // - for every nonstatic data member of T that has class type or array + // of class type, has_trivial_copy::value == true, where M is the + // type of the data member + // + // has_trivial_copy may only be applied to complete types. + // + // Another way of looking at this is: + // A copy constructor for class X is trivial if it is implicitly + // declared and if all the following are true: + // - Class X has no virtual functions (10.3) and no virtual base classes (10.1). + // - Each direct base class of X has a trivial copy constructor. + // - For all the nonstatic data members of X that are of class type + // (or array thereof), each such class type has a trivial copy constructor; + // otherwise the copy constructor is nontrivial. + // + // Without help from the compiler or user, has_trivial_copy will not report + // that a class or struct has a trivial copy constructor. The user can + // use EASTL_DECLARE_TRIVIAL_COPY to help the compiler. + /////////////////////////////////////////////////////////////////////// + + #if defined(_MSC_VER) + #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 1 // has_trivial_copy is conforming. + + template + struct has_trivial_copy : public eastl::integral_constant::value) && !eastl::is_volatile::value && !eastl::is_hat_type::value>{}; + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 1 // has_trivial_copy is conforming. + + template + struct has_trivial_copy : public eastl::integral_constant::value) && (!eastl::is_volatile::value && !eastl::is_reference::value)>{}; + #else + #define EASTL_TYPE_TRAIT_has_trivial_copy_CONFORMANCE 0 // has_trivial_copy is not fully conforming. Can return false negatives. + + template + struct has_trivial_copy : public eastl::integral_constant::value && !eastl::is_volatile::value>{}; + #endif + + #define EASTL_DECLARE_HAS_TRIVIAL_COPY(T, hasTrivialCopy) \ + namespace eastl { \ + template <> struct has_trivial_copy : public eastl::integral_constant { }; \ + } + + // Old style macro, for bacwards compatibility: + #define EASTL_DECLARE_TRIVIAL_COPY(T) namespace eastl{ template <> struct has_trivial_copy : public true_type{}; template <> struct has_trivial_copy : public true_type{}; } + + + + + /////////////////////////////////////////////////////////////////////// + // has_trivial_assign + // + // has_trivial_assign::value == true if and only if T is a class or + // struct that has a trivial copy assignment operator. A copy assignment + // operator is trivial if: + // - it is implicitly defined by the compiler, and + // - is_polymorphic::value == false, and + // - T has no virtual base classes, and + // - for every direct base class of T, has_trivial_assign::value == true, + // where B is the type of the base class, and + // - for every nonstatic data member of T that has class type or array + // of class type, has_trivial_assign::value == true, where M is + // the type of the data member. + // + // has_trivial_assign may only be applied to complete types. + // + // Without from the compiler or user, has_trivial_assign will not + // report that a class or struct has trivial assignment. The user + // can use EASTL_DECLARE_TRIVIAL_ASSIGN to help the compiler. + /////////////////////////////////////////////////////////////////////// + + #if defined(_MSC_VER) && (_MSC_VER >= 1600) + #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 1 // has_trivial_assign is conforming. + + template + struct has_trivial_assign : public integral_constant::value) && !eastl::is_const::value && !eastl::is_volatile::value && !eastl::is_hat_type::value>{}; + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 1 // has_trivial_assign is conforming. + + template + struct has_trivial_assign : public integral_constant::value) && !eastl::is_const::value && !eastl::is_volatile::value>{}; + #else + #define EASTL_TYPE_TRAIT_has_trivial_assign_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives. + + template + struct has_trivial_assign : public integral_constant::value && !is_const::value && !is_volatile::value + >{}; + #endif + + #define EASTL_DECLARE_HAS_TRIVIAL_ASSIGN(T, hasTrivialAssign) \ + namespace eastl { \ + template <> struct has_trivial_assign : public eastl::integral_constant { }; \ + } + + // Old style macro, for bacwards compatibility: + #define EASTL_DECLARE_TRIVIAL_ASSIGN(T) namespace eastl{ template <> struct has_trivial_assign : public true_type{}; template <> struct has_trivial_assign : public true_type{}; } + + + + + /////////////////////////////////////////////////////////////////////// + // has_trivial_destructor + // + // has_trivial_destructor::value == true if and only if T is a class + // or struct that has a trivial destructor. A destructor is trivial if + // - it is implicitly defined by the compiler, and + // - for every direct base class of T, has_trivial_destructor::value == true, + // where B is the type of the base class, and + // - for every nonstatic data member of T that has class type or + // array of class type, has_trivial_destructor::value == true, + // where M is the type of the data member + // + // has_trivial_destructor may only be applied to complete types. + // + // Without from the compiler or user, has_trivial_destructor will not + // report that a class or struct has a trivial destructor. + // The user can use EASTL_DECLARE_TRIVIAL_DESTRUCTOR to help the compiler. + /////////////////////////////////////////////////////////////////////// + + #if defined(_MSC_VER) && (_MSC_VER >= 1600) + #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 1 // has_trivial_destructor is conforming. + + template + struct has_trivial_destructor : public eastl::integral_constant::value) && !eastl::is_hat_type::value>{}; + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 1 // has_trivial_destructor is conforming. + + template + struct has_trivial_destructor : public eastl::integral_constant::value>{}; + #else + #define EASTL_TYPE_TRAIT_has_trivial_destructor_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives. + + // With current compilers, this is all we can do. + template + struct has_trivial_destructor : public eastl::is_pod{}; + #endif + + #define EASTL_DECLARE_HAS_TRIVIAL_DESTRUCTOR(T, hasTrivialDestructor) \ + namespace eastl { \ + template <> struct has_trivial_destructor : public eastl::integral_constant { }; \ + } + + // Old style macro, for bacwards compatibility: + #define EASTL_DECLARE_TRIVIAL_DESTRUCTOR(T) namespace eastl{ template <> struct has_trivial_destructor : public true_type{}; template <> struct has_trivial_destructor : public true_type{}; } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool has_trivial_destructor_v = has_trivial_destructor::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // has_trivial_relocate + // + // This is an EA extension to the type traits standard. + // This trait is deprecated under conforming C++11 compilers, as C++11 + // move functionality supercedes this functionality and we want to + // migrate away from it in the future. + // + // A trivially relocatable object is one that can be safely memmove'd + // to uninitialized memory. construction, assignment, and destruction + // properties are not addressed by this trait. A type that has the + // is_fundamental trait would always have the has_trivial_relocate trait. + // A type that has the has_trivial_constructor, has_trivial_copy or + // has_trivial_assign traits would usally have the has_trivial_relocate + // trait, but this is not strictly guaranteed. + // + // The user can use EASTL_DECLARE_TRIVIAL_RELOCATE to help the compiler. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_has_trivial_relocate_CONFORMANCE 0 // is_pod is not fully conforming. Can return false negatives. + + template + struct has_trivial_relocate : public eastl::bool_constant && !eastl::is_volatile_v> {}; + + #define EASTL_DECLARE_TRIVIAL_RELOCATE(T) namespace eastl{ template <> struct has_trivial_relocate : public true_type{}; template <> struct has_trivial_relocate : public true_type{}; } + + + + + /////////////////////////////////////////////////////////////////////// + // has_nothrow_constructor + // + // has_nothrow_constructor::value == true if and only if T is a + // class or struct whose default constructor has an empty throw specification. + // + // has_nothrow_constructor may only be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 1 + + template + struct has_nothrow_constructor + : public eastl::integral_constant{}; + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER) + // Microsoft's implementation of __has_nothrow_constructor is crippled and returns true only if T is a class that has an explicit constructor. + // "Returns true if the default constructor has an empty exception specification." + #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 0 + + template // This is mistakenly returning true for an unbounded array of scalar type. + struct has_nothrow_constructor : public eastl::integral_constant::type>::value || eastl::is_reference::value>{}; + + #else + #define EASTL_TYPE_TRAIT_has_nothrow_constructor_CONFORMANCE 0 // has_nothrow_constructor is not fully conforming. Can return false negatives. + + template + struct has_nothrow_constructor // To do: Improve this to include other types that can work. + { static const bool value = eastl::is_scalar::type>::value || eastl::is_reference::value; }; + #endif + + #define EASTL_DECLARE_HAS_NOTHROW_CONSTRUCTOR(T, hasNothrowConstructor) \ + namespace eastl { \ + template <> struct has_nothrow_constructor : public eastl::integral_constant { }; \ + } + + + + /////////////////////////////////////////////////////////////////////// + // has_nothrow_copy + // + // has_nothrow_copy::value == true if and only if T is a class or + // struct whose copy constructor has an empty throw specification. + // + // has_nothrow_copy may only be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 1 + + template + struct has_nothrow_copy : public eastl::integral_constant{}; + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER) + // Microsoft's implementation of __has_nothrow_copy is crippled and returns true only if T is a class that has a copy constructor. + // "Returns true if the copy constructor has an empty exception specification." + #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 0 + + template + struct has_nothrow_copy : public eastl::integral_constant::type>::value || eastl::is_reference::value>{}; + + #else + #define EASTL_TYPE_TRAIT_has_nothrow_copy_CONFORMANCE 0 // has_nothrow_copy is not fully conforming. Can return false negatives. + + template + struct has_nothrow_copy // To do: Improve this to include other types that can work. + { static const bool value = eastl::is_scalar::type>::value || eastl::is_reference::value; }; + #endif + + #define EASTL_DECLARE_HAS_NOTHROW_COPY(T, hasNothrowCopy) \ + namespace eastl { \ + template <> struct has_nothrow_copy : public eastl::integral_constant { }; \ + } + + + + /////////////////////////////////////////////////////////////////////// + // has_nothrow_assign + // + // has_nothrow_assign::value == true if and only if T is a class or + // struct whose copy assignment operator has an empty throw specification. + // + // has_nothrow_assign may only be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 1 + + template + struct has_nothrow_assign : public eastl::integral_constant{}; + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && defined(_MSC_VER) + // Microsoft's implementation of __has_nothrow_assign is crippled and returns true only if T is a class that has an assignment operator. + // "Returns true if a copy assignment operator has an empty exception specification." + #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 0 + + template // This is mistakenly returning true for an unbounded array of scalar type. + struct has_nothrow_assign : public eastl::integral_constant::type>::value || eastl::is_reference::value>{}; + #else + #define EASTL_TYPE_TRAIT_has_nothrow_assign_CONFORMANCE 0 // has_nothrow_assign is not fully conforming. Can return false negatives. + + template + struct has_nothrow_assign // To do: Improve this to include other types that can work. + { static const bool value = eastl::is_scalar::type>::value || eastl::is_reference::value; } ; + #endif + + #define EASTL_DECLARE_HAS_NOTHROW_ASSIGN(T, hasNothrowAssign) \ + namespace eastl { \ + template <> struct has_nothrow_assign : public eastl::integral_constant { }; \ + } + + + + /////////////////////////////////////////////////////////////////////// + // has_virtual_destructor + // + // has_virtual_destructor::value == true if and only if T is a class + // or struct with a virtual destructor. + // + // has_virtual_destructor may only be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_has_virtual_destructor_CONFORMANCE 1 + + template + struct has_virtual_destructor : public eastl::integral_constant{}; + #else + #define EASTL_TYPE_TRAIT_has_virtual_destructor_CONFORMANCE 0 // has_virtual_destructor is not fully conforming. Can return false negatives. + + template + struct has_virtual_destructor : public eastl::false_type{}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool has_virtual_destructor_v = has_virtual_destructor::value; + #endif + + #define EASTL_DECLARE_HAS_VIRTUAL_DESTRUCTOR(T, hasVirtualDestructor) \ + namespace eastl { \ + template <> struct has_virtual_destructor : public eastl::integral_constant { }; \ + template <> struct has_virtual_destructor : public eastl::integral_constant { }; \ + template <> struct has_virtual_destructor : public eastl::integral_constant { }; \ + template <> struct has_virtual_destructor : public eastl::integral_constant { }; \ + } + + + /////////////////////////////////////////////////////////////////////// + // is_literal_type + // + // See the C++11 Standard, section 2.9,p10. + // A type is a literal type if it is: + // - a scalar type; or + // - a reference type referring to a literal type; or + // - an array of literal type; or + // - a class type (Clause 9) that has all of the following properties: + // - it has a trivial destructor, + // - every constructor call and full-expression in the brace-or-equal-initializer s for non-static data members (if any) is a constant expression (5.19), + // - it is an aggregate type (8.5.1) or has at least one constexpr constructor or constructor template that is not a copy or move constructor, and + // - all of its non-static data members and base classes are of literal types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_literal)) + #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 1 + + template + struct is_literal_type : public eastl::integral_constant{}; + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4006)) || (defined(_MSC_VER) && (_MSC_VER >= 1700))) // VS2012+ + #if defined(EA_COMPILER_GNUC) && (!defined(EA_COMPILER_CPP11_ENABLED) || (EA_COMPILER_VERSION < 4007)) + #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 0 // It seems that in this case GCC supports the compiler intrinsic but reports it as false when it's true. + #else + #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 1 + #endif + + template + struct is_literal_type : public eastl::integral_constant{}; + + #else + #define EASTL_TYPE_TRAIT_is_literal_type_CONFORMANCE 0 + + // It's not clear if this trait can be fully implemented without explicit compiler support. + // For now we assume that it can't be but implement something that gets it right at least + // some of the time. Recall that partial positives and false negatives are OK (though not ideal), + // while false positives are not OK for us to generate. + + template // This is not a complete implementation and will be true for only some literal types (the basic ones). + struct is_literal_type : public eastl::integral_constant::type>::type>::value>{}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_literal_type_v = is_literal_type::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_abstract + // + // is_abstract::value == true if and only if T is a class or struct + // that has at least one pure virtual function. is_abstract may only + // be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_abstract))) + #define EASTL_TYPE_TRAIT_is_abstract_CONFORMANCE 1 // is_abstract is conforming. + + template + struct is_abstract : public integral_constant{}; + #else + #define EASTL_TYPE_TRAIT_is_abstract_CONFORMANCE 0 + + template::value> + class is_abstract_helper + { + template + static eastl::yes_type test(...); + + template + static eastl::no_type test(T1(*)[1]); // The following: 'typedef SomeAbstractClass (*SomeFunctionType)[1];' is invalid (can't have an array of abstract types) and thus doesn't choose this path. + + public: + static const bool value = (sizeof(test(NULL)) == sizeof(eastl::yes_type)); + }; + + template + struct is_abstract_helper + { static const bool value = false; }; + + template + struct is_abstract + : public integral_constant::value> { }; + + #endif + + #define EASTL_DECLARE_IS_ABSTRACT(T, isAbstract) \ + namespace eastl { \ + template <> struct is_abstract : public eastl::integral_constant { }; \ + template <> struct is_abstract : public eastl::integral_constant { }; \ + template <> struct is_abstract : public eastl::integral_constant { }; \ + template <> struct is_abstract : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_abstract_v = is_abstract::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_copyable + // + // T is a trivially copyable type (3.9) T shall be a complete type, + // (possibly cv-qualified) void, or an array of unknown bound. + // + // 3.9,p3: For any trivially copyable type T, if two pointers to T + // point to distinct T objects obj1 and obj2, where neither obj1 nor + // obj2 is a base-class subobject, if the underlying bytes making + // up obj1 are copied into obj2, obj2 shall subsequently hold the + // same value as obj1. In other words, you can memcpy/memmove it. + /////////////////////////////////////////////////////////////////////// + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(_MSC_VER) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 5003)) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_trivially_copyable))) + #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 1 + + // https://connect.microsoft.com/VisualStudio/feedback/details/808827/c-std-is-trivially-copyable-produces-wrong-result-for-arrays + // + // From Microsoft: + // We're working on fixing this. When overhauling in VC 2013, I incorrectly believed that is_trivially_copyable was a synonym + // for is_trivially_copy_constructible. I've asked the compiler team to provide a compiler hook with 100% accurate answers. (Currently, the + // compiler hook has incorrect answers for volatile scalars, volatile data members, and various scenarios for defaulted/deleted/private + // special member functions - I wrote an exhaustive test case to exercise the complicated Standardese.) When the compiler hook is fixed, + // I'll change to invoke it. + // + // Microsoft broken VS2013 STL implementation: + // template + // struct is_trivially_copyable + // : is_trivially_copy_constructible<_Ty>::type + // { // determine whether _Ty has a trivial copy constructor + // }; + // + + template + struct is_trivially_copyable { static const bool value = __is_trivially_copyable(T); }; + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_GNUC)) + #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 1 + + // Micrsoft (prior to VS2012) and GCC have __has_trivial_copy, but it may not be identical with the goals of this type trait. + template + struct is_trivially_copyable : public integral_constant::type>::value) && (!eastl::is_void::value && !eastl::is_volatile::value && !eastl::is_reference::value)>{}; + #else + #define EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE 0 // Generates false negatives. + + template + struct is_trivially_copyable { static const bool value = eastl::is_scalar::type>::value; }; + #endif + + #define EASTL_DECLARE_IS_TRIVIALLY_COPYABLE(T, isTriviallyCopyable) \ + namespace eastl { \ + template <> struct is_trivially_copyable : public eastl::integral_constant { }; \ + template <> struct is_trivially_copyable : public eastl::integral_constant { }; \ + template <> struct is_trivially_copyable : public eastl::integral_constant { }; \ + template <> struct is_trivially_copyable : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_copyable_v = is_trivially_copyable::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_constructible + // + // See the C++11 Standard, section 20.9.4.3,p6. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE 1 + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_constructible))) + template + struct is_constructible : public bool_constant<__is_constructible(T, Args...) > {}; + #else + // We implement a copy of move here has move_internal. We are currently stuck doing this because our move + // implementation is in and currently #includes us, and so we have a header + // chicken-and-egg problem. To do: Resolve this, probably by putting eastl::move somewhere else. + template + inline typename eastl::remove_reference::type&& move_internal(T&& x) EA_NOEXCEPT + { return ((typename eastl::remove_reference::type&&)x); } + + template + typename first_type_select()...)))>::type is(T&&, Args&& ...); + + template + struct can_construct_scalar_helper + { + static eastl::true_type can(T); + static eastl::false_type can(...); + }; + + template + eastl::false_type is(argument_sink, Args&& ...); + + // Except for scalars and references (handled below), check for constructibility via decltype. + template + struct is_constructible_helper_2 // argument_sink will catch all T that is not constructible from the Args and denote false_type + : public eastl::identity(), eastl::declval()...))>::type {}; + + template + struct is_constructible_helper_2 + : public eastl::is_scalar {}; + + template // We handle the case of multiple arguments below (by disallowing them). + struct is_constructible_helper_2 + : public eastl::identity::can(eastl::declval()))>::type {}; + + // Scalars and references can be constructed only with 0 or 1 argument. e.g the following is an invalid expression: int(17, 23) + template + struct is_constructible_helper_2 + : public eastl::false_type {}; + + template + struct is_constructible_helper_1 + : public is_constructible_helper_2::value || eastl::is_reference::value, T, Args...> {}; + + // Unilaterally dismiss void, abstract, unknown bound arrays, and function types as not constructible. + template + struct is_constructible_helper_1 + : public false_type {}; + + // is_constructible + template + struct is_constructible + : public is_constructible_helper_1<(eastl::is_abstract::type>::value || + eastl::is_array_of_unknown_bounds::value || + eastl::is_function::type>::value || + eastl::has_void_arg::value), + T, Args...> {}; + + // Array types are constructible if constructed with no arguments and if their element type is default-constructible + template + struct is_constructible_helper_2 + : public eastl::is_constructible::type> {}; + + // Arrays with arguments are not constructible. e.g. the following is an invalid expression: int[3](37, 34, 12) + template + struct is_constructible_helper_2 + : public eastl::false_type {}; + + #endif + + + // You need to manually declare const/volatile variants individually if you want them. + #define EASTL_DECLARE_IS_CONSTRUCTIBLE(T, U, isConstructible) \ + namespace eastl { \ + template <> struct is_constructible : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_constructible_v = is_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_constructible + // + // is_constructible::value is true and the variable definition + // for is_constructible, as defined below, is known to call no operation + // that is not trivial (3.9, 12). T and all types in the parameter pack + // Args shall be complete types, (possibly cv-qualified) void, or arrays + // of unknown bound. + // + // Note: + // C++11's is_trivially_constructible sounds the same as the pre-standard + // has_trivial_constructor type trait (which we also support here). However, + // the definition of has_trivial_constructor has never been formally standardized + // and so we can't just blindly equate the two to each other. Since we are + // moving forward with C++11 and deprecating the old type traits, we leave + // the old ones as-is, though we defer to them in cases where we don't seem + // to have a good alternative. + // + /////////////////////////////////////////////////////////////////////// + + #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) + + #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 0 + + // In this version we allow only zero or one argument (Arg). We can add more arguments + // by creating a number of extra specializations. It's probably not possible to + // simplify the implementation with recursive templates because ctor argument + // presence is specific. + // + // To consider: we can fold the two implementations below by making a macro that's defined + // has __is_trivially_constructible(T) or eastl::has_trivial_copy::value, depending on + // whether the __is_trivially_constructible compiler intrinsic is available. + + // If the compiler has this trait built-in (which ideally all compilers would have since it's necessary for full conformance) use it. + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_trivially_constructible)) + + template + struct is_trivially_constructible + : public eastl::false_type {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant {}; + + #else + + template + struct is_trivially_constructible + : public eastl::false_type {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_constructor::type>::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + #endif + + #else + + // If the compiler has this trait built-in (which ideally all compilers would have since it's necessary for full conformance) use it. + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_trivially_constructible)) + #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 1 + + // We have a problem with clang here as of clang 3.4: __is_trivially_constructible(int[]) is false, yet I believe it should be true. + // Until it gets resolved, what we do is check for is_constructible along with __is_trivially_constructible(). + template + struct is_trivially_constructible + : public eastl::integral_constant::value && __is_trivially_constructible(T, Args...)> {}; + + #else + + #define EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE 0 // This is 0 but in fact it will work for most real-world cases due to the has_trivial_constructor specialization below. + + template + struct is_trivially_constructible + : public eastl::false_type {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_constructor::type>::value> {}; + + // It's questionable whether we can use has_trivial_copy here, as it could theoretically create a false-positive. + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + template + struct is_trivially_constructible + : public eastl::integral_constant::value && eastl::has_trivial_copy::value> {}; + + #endif + + #endif + + + #define EASTL_DECLARE_IS_TRIVIALLY_CONSTRUCTIBLE(T, isTriviallyConstructible) \ + namespace eastl { \ + template <> struct is_trivially_constructible : public eastl::integral_constant { }; \ + template <> struct is_trivially_constructible : public eastl::integral_constant { }; \ + template <> struct is_trivially_constructible : public eastl::integral_constant { }; \ + template <> struct is_trivially_constructible : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_constructible_v = is_trivially_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_default_constructible + // + // is_trivially_constructible::value is true. + // This is thus identical to is_trivially_constructible. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_trivially_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE + + template + struct is_trivially_default_constructible + : public eastl::is_trivially_constructible {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_default_constructible_v = is_trivially_default_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_trivial + // + // is_trivial::value == true if T is a scalar type, a trivially copyable + // class with a trivial default constructor, or array of such type/class, + // possibly cv-qualified), provides the member constant value equal true. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_trivial_CONFORMANCE ((EASTL_TYPE_TRAIT_is_trivially_default_constructible_CONFORMANCE && EASTL_TYPE_TRAIT_is_trivially_copyable_CONFORMANCE) ? 1 : 0) + + #if defined(_MSC_VER) && _MSC_VER == 1800 + template + struct is_trivial_helper + : public eastl::integral_constant::value && eastl::is_trivially_default_constructible::value>{}; + + template + struct is_trivial_helper + : public false_type{}; + + template + struct is_trivial + : public is_trivial_helper<(EA_ALIGN_OF(T) > EA_PLATFORM_MIN_MALLOC_ALIGNMENT), T>::type{}; + #else + // All other compilers seem to be able to handle aligned types passed as value + template + struct is_trivial + : public eastl::integral_constant::value && eastl::is_trivially_default_constructible::value> {}; + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivial_v = is_trivial::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_constructible + // + // is_constructible::value is true and the variable definition + // for is_constructible, as defined below, is known not to throw any + // exceptions (5.3.7). T and all types in the parameter pack Args shall + // be complete types, (possibly cv-qualified) void, or arrays of unknown bound. + // + /////////////////////////////////////////////////////////////////////// + #if defined(EA_COMPILER_NO_NOEXCEPT) + + #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE 0 + + template + struct is_nothrow_constructible + : public eastl::false_type {}; + + template + struct is_nothrow_constructible + : public eastl::integral_constant::value> {}; + + template + struct is_nothrow_constructible + : public eastl::integral_constant::value> {}; + + template + struct is_nothrow_constructible + : public eastl::integral_constant::value> {}; + + template + struct is_nothrow_constructible + : public eastl::integral_constant::value> {}; + + template + struct is_nothrow_constructible + : public eastl::integral_constant::value> {}; + + #else + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4008) + #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE 0 // GCC up to v4.7's noexcept is broken and fails to generate true for the case of compiler-generated constructors. + #else + #define EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE + #endif + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // *_noexcept_wrapper implements a workaround for VS2015 preview. A standards conforming noexcept operator allows variadic template expansion. + // There appears to be an issue with VS2015 preview that prevents variadic template expansion into a noexcept operator that is passed directly + // to a template parameter. + // + // The fix hoists the noexcept expression into a separate struct and caches the result of the expression. This result is then passed to integral_constant. + // + // Example code from Clang libc++ + // template + // struct __libcpp_is_nothrow_constructible<[>is constructible*/true, /*is reference<]false, _Tp, _Args...> + // : public integral_constant()...))> { }; + // + + template + struct is_nothrow_constructible_helper_noexcept_wrapper + { static const bool value = noexcept(T(eastl::declval()...)); }; + + template + struct is_nothrow_constructible_helper; + + template + struct is_nothrow_constructible_helper + : public eastl::integral_constant::value> {}; + + template + struct is_nothrow_constructible_helper + : public eastl::integral_constant()))> {}; + + template + struct is_nothrow_constructible_helper + : public eastl::integral_constant {}; + + template + struct is_nothrow_constructible_helper + : public eastl::false_type {}; + + template + struct is_nothrow_constructible + : public eastl::is_nothrow_constructible_helper::value, T, Args...> {}; + + template + struct is_nothrow_constructible + : public eastl::is_nothrow_constructible_helper::value, T> {}; + #endif + + #define EASTL_DECLARE_IS_NOTHROW_CONSTRUCTIBLE(T, isNothrowConstructible) \ + namespace eastl{ \ + template <> struct is_nothrow_constructible : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_nothrow_constructible_v = is_nothrow_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_default_constructible + // + // is_constructible::value is true. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE + + template + struct is_default_constructible + : public eastl::is_constructible {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_default_constructible_v = is_default_constructible::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_default_constructible + /////////////////////////////////////////////////////////////////////// + // TODO(rparolin): implement type-trait + + + + /////////////////////////////////////////////////////////////////////// + // is_copy_constructible + // + // is_constructible::value is true. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE + + template + struct is_copy_constructible + : public eastl::is_constructible::type>::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_copy_constructible_v = is_copy_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_copy_constructible + // + // is_trivially_constructible::value is true. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_trivially_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE + + template + struct is_trivially_copy_constructible + : public eastl::is_trivially_constructible::type>::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_copy_constructible_v = is_trivially_copy_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_copy_constructible + // + // is_nothrow_-constructible::value is true. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_nothrow_copy_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE + + template + struct is_nothrow_copy_constructible + : public is_nothrow_constructible::type>::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_nothrow_copy_constructible_v = is_nothrow_copy_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_move_constructible + // + // is_constructible::value is true. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_constructible_CONFORMANCE + + template + struct is_move_constructible + : public eastl::is_constructible::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_move_constructible_v = is_move_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_move_constructible + // + // is_trivially_constructible::value is true. + // T shall be a complete type, (possibly cv-qualified) void, or an + // array of unknown bound. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_trivially_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_constructible_CONFORMANCE + + template + struct is_trivially_move_constructible + : public eastl::is_trivially_constructible::type> {}; + + #define EASTL_DECLARE_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T, isTrivallyMoveConstructible) \ + namespace eastl{ \ + template <> struct is_trivially_move_constructible : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_move_constructible_v = is_trivially_move_constructible::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_assignable + // + // The expression declval() = declval() is well-formed when treated as an unevaluated operand. + // Access checking is performed as if in a context unrelated to T and U. Only the validity of + // the immediate context of the assignment expression is considered. The compilation of the expression + // can result in side effects such as the instantiation of class template specializations and function + // template specializations, the generation of implicitly-defined functions, and so on. Such side + // effects are not in the "immediate context" and can result in the program being ill-formed. + // + // Note: + // This type trait has a misleading and counter-intuitive name. It does not indicate whether an instance + // of U can be assigned to an instance of T (e.g. t = u). Instead it indicates whether the assignment can be + // done after adding rvalue references to both, as in add_rvalue_reference::type = add_rvalue_reference::type. + // A counterintuitive result of this is that is_assignable::value == false. The is_copy_assignable + // trait indicates if a type can be assigned to its own type, though there isn't a standard C++ way to tell + // if an arbitrary type is assignable to another type. + // http://stackoverflow.com/questions/19920213/why-is-stdis-assignable-counter-intuitive + // + // Note: + // A true is_assignable value doesn't guarantee that the expression is compile-able, the compiler checks + // only that the assignment matches before compilation. In particular, if you have templated operator= + // for a class, the compiler will always say is_assignable is true, regardless of what's being tested + // on the right hand side of the expression. It may actually turn out during compilation that the + // templated operator= fails to compile because in practice it doesn't accept every possible type for + // the right hand side of the expression. + // + // Expected results: + // is_assignable::value == false + // is_assignable::value == true + // is_assignable::value == false + // is_assignable::value == false + // is_assignable::value == false + // is_assignable::value == false + // is_assignable::value == false + // is_assignable::value == false + // is_assignable::value == false + // is_assignable::value == false + // is_assignable::value == false + // is_assignable::value == true + // is_assignable::value == false + // + // Note: + // Our implementation here yields different results than does the std::is_assignable from Dinkumware-based Standard + // Libraries, but yields similar results to the std::is_assignable from GCC's libstdc++ and clang's libc++. It may + // possibly be that the Dinkumware results are intentionally different for some practical purpose or because they + // represent the spirit or the Standard but not the letter of the Standard. + // + /////////////////////////////////////////////////////////////////////// + #define EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE 1 + + template + struct is_assignable_helper + { + template + static eastl::no_type is(...); + + template + static decltype(eastl::declval() = eastl::declval(), eastl::yes_type()) is(int); + + static const bool value = (sizeof(is(0)) == sizeof(eastl::yes_type)); + }; + + template + struct is_assignable : + public eastl::integral_constant::value> {}; + + // The main purpose of this function is to help the non-conforming case above. + // Note: We don't handle const/volatile variations here, as we expect the user to + // manually specify any such variations via this macro. + // Example usage: + // EASTL_DECLARE_IS_ASSIGNABLE(int, int, false) + // + #define EASTL_DECLARE_IS_ASSIGNABLE(T, U, isAssignable) \ + namespace eastl { \ + template <> struct is_assignable : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_assignable_v = is_assignable::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_lvalue_assignable + // + // This is an EASTL extension function which is like is_assignable but + // works for arbitrary assignments and not just rvalue assignments. + // This function provides an intuitive assignability test, as opposed + // to is_assignable. + // + // Note: is_lvalue_assignable === is_copy_assignable + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_lvalue_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE + + template + struct is_lvalue_assignable + : public eastl::is_assignable::type, + typename eastl::add_lvalue_reference::type>::type> {}; + + #define EASTL_DECLARE_IS_LVALUE_ASSIGNABLE(T, U, isLvalueAssignable) \ + namespace eastl { \ + template <> struct is_lvalue_assignable : public eastl::integral_constant { }; \ + } + + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_assignable + // + // is_assignable::value is true and the assignment, as defined by + // is_assignable, is known to call no operation that is not trivial (3.9, 12). + // T and U shall be complete types, (possibly cv-qualified) void, or + // arrays of unknown bound + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_trivially_assignable)) + #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE 1 + + template + struct is_trivially_assignable + : eastl::integral_constant {}; + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) && (_MSC_VER >= 1800)) + #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE + + // This code path is attempting to work around the issue with VS2013 __is_trivially_assignable compiler intrinsic documented in the link + // below. todo: Re-evaluate in VS2014. + // + // https://connect.microsoft.com/VisualStudio/feedback/details/806233/std-is-trivially-copyable-const-int-n-and-std-is-trivially-copyable-int-n-incorrect + + template + struct is_trivially_assignable_helper; + + template + struct is_trivially_assignable_helper : eastl::integral_constant{}; + + template + struct is_trivially_assignable_helper : false_type{}; + + template + struct is_trivially_assignable + : eastl::integral_constant::value, T, U >::value> {}; + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_GNUC)) + #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE + + // Micrsoft (up till at least VS2012) and GCC have __has_trivial_assign, but it may not be identical with the goals of this type trait. + // The Microsoft type trait headers suggest that a future version of VS will have a __is_trivially_assignable intrinsic, but we + // need to come up with something in the meantime. To do: Re-evalulate this for VS2013+ when it becomes available. + template + struct is_trivially_assignable + : eastl::integral_constant::value && + (eastl::is_pod::type>::value || __has_trivial_assign(typename eastl::remove_reference::type))> {}; + #else + + #define EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE 0 // Generates false negatives. + + template + struct is_trivially_assignable + : public eastl::false_type {}; + + template + struct is_trivially_assignable + : public eastl::integral_constant::value> {}; + + template + struct is_trivially_assignable + : public eastl::integral_constant::value> {}; + + template + struct is_trivially_assignable + : public eastl::integral_constant::value> {}; + + template + struct is_trivially_assignable + : public eastl::integral_constant::value> {}; + + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_assignable_v = is_trivially_assignable::value; + #endif + + // The main purpose of this function is to help the non-conforming case above. + // Note: We don't handle const/volatile variations here, as we expect the user to + // manually specify any such variations via this macro. + // Example usage: + // EASTL_DECLARE_IS_TRIVIALLY_ASSIGNABLE(int, int, false) + // + #define EASTL_DECLARE_IS_TRIVIALLY_ASSIGNABLE(T, U, isTriviallyAssignable) \ + namespace eastl { \ + template <> struct is_trivially_assignable : public eastl::integral_constant { }; \ + } + + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_assignable + // + // is_assignable::value is true and the assignment is known + // not to throw any exceptions (5.3.7). T and U shall be complete + // types, (possibly cv-qualified) void, or arrays of unknown bound. + // + /////////////////////////////////////////////////////////////////////// + + #if defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+ + #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 1 + + template + struct is_nothrow_assignable + : eastl::integral_constant {}; + + #elif defined(EA_COMPILER_NO_NOEXCEPT) || defined(__EDG_VERSION__) // EDG mis-compiles the conforming code below and so must be placed here. + #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 0 + + template + struct is_nothrow_assignable + : public false_type {}; + + // Note that the following are crippled in that they support only assignment of T types to other T types. + template + struct is_nothrow_assignable + : public eastl::integral_constant::value> {}; + + template + struct is_nothrow_assignable + : public eastl::integral_constant::value> {}; + + template + struct is_nothrow_assignable + : public eastl::integral_constant::value> {}; + + #else + #define EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE 1 + + template + struct is_nothrow_assignable_helper; + + template + struct is_nothrow_assignable_helper + : public false_type {}; + + template + struct is_nothrow_assignable_helper // Set to true if the assignment (same as is_assignable) cannot generate an exception. + : public eastl::integral_constant() = eastl::declval()) > + { + }; + + template + struct is_nothrow_assignable + : public eastl::is_nothrow_assignable_helper::value, T, U> + { + }; + #endif + + #define EASTL_DECLARE_IS_NOTHROW_ASSIGNABLE(T, isNothrowAssignable) \ + namespace eastl{ \ + template <> struct is_nothrow_assignable : public eastl::integral_constant { }; \ + template <> struct is_nothrow_assignable : public eastl::integral_constant { }; \ + template <> struct is_nothrow_assignable : public eastl::integral_constant { }; \ + template <> struct is_nothrow_assignable : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_nothrow_assignable_v = is_nothrow_assignable::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_copy_assignable + // + // is_assignable::value is true. T shall be a complete type, + // (possibly cv -qualified) void, or an array of unknown bound. + // + // This (and not is_assignable) is the type trait you use to tell if you + // can do an arbitrary assignment. is_assignable tells if you can do an + // assignment specifically to an rvalue and not in general. + // http://stackoverflow.com/a/19921030/725009 + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE + + template + struct is_copy_assignable + : public eastl::is_assignable::type, + typename eastl::add_lvalue_reference::type>::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_copy_assignable_v = is_copy_assignable::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_copy_assignable + // + // is_trivially_assignable::value is true. T shall be a + // complete type, (possibly cv-qualified) void, or an array of unknown bound. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_trivially_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE + +#if EASTL_TYPE_TRAIT_is_trivially_copy_assignable_CONFORMANCE + template + struct is_trivially_copy_assignable + : public eastl::is_trivially_assignable::type, + typename eastl::add_lvalue_reference::type>::type> {}; +#else + template + struct is_trivially_copy_assignable + : public integral_constant::value || eastl::is_pod::value || eastl::is_trivially_assignable::type, typename eastl::add_lvalue_reference::type>::type>::value + > {}; +#endif + + #define EASTL_DECLARE_IS_TRIVIALLY_COPY_ASSIGNABLE(T, isTriviallyCopyAssignable) \ + namespace eastl { \ + template <> struct is_trivially_copy_assignable : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_copy_assignable_v = is_trivially_copy_assignable::value; + #endif + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_copy_assignable + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_nothrow_copy_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE + + template + struct is_nothrow_copy_assignable + : public eastl::is_nothrow_assignable::type, + typename eastl::add_lvalue_reference::type>::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_nothrow_copy_assignable_v = is_nothrow_copy_assignable::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_move_assignable + // + // is_assignable::value is true. T shall be a complete type, + // (possibly cv -qualified) void, or an array of unknown bound. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_assignable_CONFORMANCE + + template + struct is_move_assignable + : public eastl::is_assignable::type, + typename eastl::add_rvalue_reference::type> {}; + + #define EASTL_DECLARE_IS_MOVE_ASSIGNABLE(T, isMoveAssignable) \ + namespace eastl{ \ + template <> struct is_move_assignable : public eastl::integral_constant { }; \ + template <> struct is_move_assignable : public eastl::integral_constant { }; \ + template <> struct is_move_assignable : public eastl::integral_constant { }; \ + template <> struct is_move_assignable : public eastl::integral_constant { }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_move_assignable_v = is_move_assignable::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_move_assignable + // + // is_trivially_-assignable::value is true. T shall be a complete type, + // (possibly cv-qualified) void, or an array of unknown bound. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_trivially_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_trivially_assignable_CONFORMANCE + + template + struct is_trivially_move_assignable + : public eastl::is_trivially_assignable::type, + typename eastl::add_rvalue_reference::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_move_assignable_v = is_trivially_move_assignable::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_move_assignable + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_nothrow_move_assignable_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_assignable_CONFORMANCE + + template + struct is_nothrow_move_assignable + : public eastl::is_nothrow_assignable::type, + typename eastl::add_rvalue_reference::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_nothrow_move_assignable_v = is_nothrow_move_assignable::value; + #endif + + /////////////////////////////////////////////////////////////////////// + // is_destructible + // + // For a complete type T and given + // template + // struct test { U u; }; + // test::~test() is not deleted (C++11 "= delete"). + // T shall be a complete type, (possibly cv-qualified) void, or an array of unknown bound. + // + /////////////////////////////////////////////////////////////////////// + + #if 0 // defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+ -- Disabled due to __is_destructible being broken in VC++ versions up to at least VS2013. A ticket will be submitted for this + #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 1 + + template + struct is_destructible + : integral_constant {}; + + #elif defined(EA_COMPILER_NO_DECLTYPE) || defined(EA_COMPILER_NO_FUNCTION_TEMPLATE_DEFAULT_ARGS) || defined(_MSC_VER) || defined(__EDG_VERSION__) // VS2012 and EDG mis-compile the conforming code below and so must be placed here. + #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 0 + + // This implementation works for almost all cases, with the primary exception being the + // case that the user declared the destructor as deleted. To deal with that case the + // user needs to use EASTL_DECLARE_IS_NOT_DESTRUCTIBLE to cause is_destructible::value + // to be false. + + template + struct is_destructible + : public eastl::integral_constant::value && + !eastl::is_void::value && + !eastl::is_function::value && + !eastl::is_abstract::value> {}; + #else + #define EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE 1 + + template + struct destructible_test_helper{ U u; }; + + template + eastl::false_type destructible_test_function(...); + + template >().~destructible_test_helper())> + eastl::true_type destructible_test_function(int); + + template ::value || // Exclude these types from being considered destructible. + eastl::is_void::value || + eastl::is_function::value || + eastl::is_abstract::value> + struct is_destructible_helper + : public eastl::identity(0))>::type {}; // Need to wrap decltype with identity because some compilers otherwise don't like the bare decltype usage. + + template + struct is_destructible_helper + : public eastl::false_type {}; + + template + struct is_destructible + : public is_destructible_helper {}; + + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_destructible_v = is_destructible::value; + #endif + + #define EASTL_DECLARE_IS_DESTRUCTIBLE(T, isDestructible) \ + namespace eastl{ \ + template <> struct is_destructible : public eastl::integral_constant{}; \ + template <> struct is_destructible : public eastl::integral_constant{}; \ + template <> struct is_destructible : public eastl::integral_constant{}; \ + template <> struct is_destructible : public eastl::integral_constant{}; \ + } + + + + /////////////////////////////////////////////////////////////////////// + // is_trivially_destructible + // + // is_destructible::value is true and the indicated destructor is + // known to be trivial. T shall be a complete type, (possibly cv-qualified) + // void, or an array of unknown bound. + // + // A destructor is trivial if it is not user-provided and if: + // - the destructor is not virtual, + // - all of the direct base classes of its class have trivial destructors, and + // - for all of the non-static data members of its class that are of + // class type (or array thereof), each such class has a trivial destructor. + // + /////////////////////////////////////////////////////////////////////// + + #if 0 // defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+ -- Disabled due to __is_trivially_destructible being broken in VC++ versions up to at least VS2013. A ticket will be submitted for this + #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE 1 + + template + struct is_trivially_destructible + : integral_constant {}; + + #elif EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)) + #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE + + template + struct is_trivially_destructible // Can't use just __has_trivial_destructor(T) because some compilers give it slightly different meaning, and are just plain broken, such as VC++'s __has_trivial_destructor, which says false for fundamental types. + : public integral_constant::value && ((__has_trivial_destructor(T) && !eastl::is_hat_type::value)|| eastl::is_scalar::type>::value)> {}; + + #else + #define EASTL_TYPE_TRAIT_is_trivially_destructible_CONFORMANCE 0 + + template + struct is_trivially_destructible_helper + : public integral_constant::value || eastl::is_scalar::value || eastl::is_reference::value) && !eastl::is_void::value> {}; + + template + struct is_trivially_destructible + : public eastl::is_trivially_destructible_helper::type> {}; + #endif + + #define EASTL_DECLARE_IS_TRIVIALLY_DESTRUCTIBLE(T, isTriviallyDestructible) \ + namespace eastl{ \ + template <> struct is_trivially_destructible : public eastl::integral_constant{}; \ + template <> struct is_trivially_destructible : public eastl::integral_constant{}; \ + template <> struct is_trivially_destructible : public eastl::integral_constant{}; \ + template <> struct is_trivially_destructible : public eastl::integral_constant{}; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_trivially_destructible_v = is_trivially_destructible::value; + #endif + + + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_destructible + // + // is_destructible::value is true and the indicated destructor is + // known not to throw any exceptions (5.3.7). T shall be a complete type, + // (possibly cv-qualified) void, or an array of unknown bound. + // + /////////////////////////////////////////////////////////////////////// + + #if 0 // defined(_MSC_VER) && (_MSC_VER >= 1800) // VS2013+ -- Disabled due to __is_nothrow_destructible being broken in VC++ versions up to at least VS2013. A ticket will be submitted for this + #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE ((_MSC_VER >= 1900) ? 1 : 0) // VS2013 (1800) doesn't support noexcept and so can't support all usage of this properly (in particular default exception specifications defined in [C++11 Standard, 15.4 paragraph 14]. + + template + struct is_nothrow_destructible + : integral_constant {}; + + #elif defined(EA_COMPILER_NO_NOEXCEPT) + #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE 0 + + template + struct is_nothrow_destructible_helper + : public eastl::integral_constant::value || eastl::is_reference::value> {}; + + template + struct is_nothrow_destructible + : public eastl::is_nothrow_destructible_helper::type> {}; + + #else + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4008) + #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE 0 // GCC up to v4.7's noexcept is broken and fails to generate true for the case of compiler-generated destructors. + #else + #define EASTL_TYPE_TRAIT_is_nothrow_destructible_CONFORMANCE EASTL_TYPE_TRAIT_is_destructible_CONFORMANCE + #endif + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // *_noexcept_wrapper implements a workaround for VS2015. A standards conforming noexcept operator allows variadic template expansion. + // There appears to be an issue with VS2015 that prevents variadic template expansion into a noexcept operator that is passed directly + // to a template parameter. + // + // The fix hoists the noexcept expression into a separate struct and caches the result of the expression. This result is then passed to integral_constant. + // + // Example code from Clang libc++ + // template + // struct __libcpp_is_nothrow_constructible<[>is constructible*/true, /*is reference<]false, _Tp, _Args...> + // : public integral_constant()...))> { }; + // + + template + struct is_nothrow_destructible_helper_noexcept_wrapper + { static const bool value = noexcept(eastl::declval().~T()); }; + + template + struct is_nothrow_destructible_helper; + + template + struct is_nothrow_destructible_helper + : public eastl::false_type {}; + + template + struct is_nothrow_destructible_helper // If the expression T::~T is a noexcept expression then it's nothrow. + : public eastl::integral_constant::value > {}; + + template + struct is_nothrow_destructible // A type needs to at least be destructible before it could be nothrow destructible. + : public eastl::is_nothrow_destructible_helper::value> {}; + + template // An array is nothrow destructible if its element type is nothrow destructible. + struct is_nothrow_destructible // To consider: Replace this with a remove_all_extents pathway. + : public eastl::is_nothrow_destructible {}; + + template + struct is_nothrow_destructible // A reference type cannot throw while being destructed. It's just a reference. + : public eastl::true_type {}; + + template + struct is_nothrow_destructible // An rvalue reference type cannot throw while being destructed. + : public eastl::true_type {}; + + #endif + + #define EASTL_DECLARE_IS_NOTHROW_DESTRUCTIBLE(T, isNoThrowDestructible) \ + namespace eastl{ \ + template <> struct is_nothrow_destructible { static const bool value = isNoThrowDestructible; }; \ + template <> struct is_nothrow_destructible { static const bool value = isNoThrowDestructible; }; \ + template <> struct is_nothrow_destructible { static const bool value = isNoThrowDestructible; }; \ + template <> struct is_nothrow_destructible { static const bool value = isNoThrowDestructible; }; \ + } + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_nothrow_destructible_v = is_nothrow_destructible::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_default_constructible + // + /////////////////////////////////////////////////////////////////////// + #define EASTL_TYPE_TRAIT_is_nothrow_default_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE + + template + struct is_nothrow_default_constructible + : public eastl::is_nothrow_constructible {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_nothrow_default_constructible_v = is_nothrow_default_constructible::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_nothrow_move_constructible + // + /////////////////////////////////////////////////////////////////////// + #define EASTL_TYPE_TRAIT_is_nothrow_move_constructible_CONFORMANCE EASTL_TYPE_TRAIT_is_nothrow_constructible_CONFORMANCE + + template + struct is_nothrow_move_constructible + : public eastl::is_nothrow_constructible::type> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_nothrow_move_constructible_v = is_nothrow_move_constructible::value; + #endif + + +} // namespace eastl + + +#endif // Header include guard diff --git a/include/EASTL/internal/type_properties.h b/include/EASTL/internal/type_properties.h new file mode 100644 index 0000000..5276f87 --- /dev/null +++ b/include/EASTL/internal/type_properties.h @@ -0,0 +1,380 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_TYPE_PROPERTIES_H +#define EASTL_INTERNAL_TYPE_PROPERTIES_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include +#include + + +namespace eastl +{ + + + /////////////////////////////////////////////////////////////////////// + // underlying_type + // + // Defines a member typedef type of type that is the underlying type for the enumeration T. + // Requires explicit compiler support to implement. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && ((defined(_MSC_VER) && (_MSC_VER >= 1700)) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4007)) || defined(EA_COMPILER_CLANG)) // VS2012+ + #define EASTL_TYPE_TRAIT_underlying_type_CONFORMANCE 1 // underlying_type is conforming. + + template + struct underlying_type{ typedef __underlying_type(T) type; }; + + #else + #define EASTL_TYPE_TRAIT_underlying_type_CONFORMANCE 0 + + template + struct underlying_type{ typedef int type; }; // This is of course wrong, but we emulate libstdc++ and typedef it as int. + #endif + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + using underlying_type_t = typename underlying_type::type; + #endif + + + /////////////////////////////////////////////////////////////////////// + // has_unique_object_representations + // + // If T is TriviallyCopyable and if any two objects of type T with the same + // value have the same object representation, value is true. For any other + // type, value is false. + // + // http://en.cppreference.com/w/cpp/types/has_unique_object_representations + /////////////////////////////////////////////////////////////////////// + #if EASTL_HAS_UNIQUE_OBJECT_REPRESENTATIONS_AVAILABLE + #define EASTL_TYPE_TRAIT_has_unique_object_representations_CONFORMANCE 1 + + template + struct has_unique_object_representations + : public integral_constant>)> + { + }; + + #else + #define EASTL_TYPE_TRAIT_has_unique_object_representations_CONFORMANCE 0 + + template + struct has_unique_object_representations + : public integral_constant>>> // only integral types (floating point types excluded). + { + }; + + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR auto has_unique_object_representations_v = has_unique_object_representations::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_signed + // + // is_signed::value == true if and only if T is one of the following types: + // [const] [volatile] char (maybe) + // [const] [volatile] signed char + // [const] [volatile] short + // [const] [volatile] int + // [const] [volatile] long + // [const] [volatile] long long + // [const] [volatile] float + // [const] [volatile] double + // [const] [volatile] long double + // + // Used to determine if a integral type is signed or unsigned. + // Given that there are some user-made classes which emulate integral + // types, we provide the EASTL_DECLARE_SIGNED macro to allow you to + // set a given class to be identified as a signed type. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_signed_CONFORMANCE 1 // is_signed is conforming. + + template struct is_signed_helper : public false_type{}; + + template <> struct is_signed_helper : public true_type{}; + template <> struct is_signed_helper : public true_type{}; + template <> struct is_signed_helper : public true_type{}; + template <> struct is_signed_helper : public true_type{}; + template <> struct is_signed_helper : public true_type{}; + template <> struct is_signed_helper : public true_type{}; + template <> struct is_signed_helper : public true_type{}; + template <> struct is_signed_helper : public true_type{}; + + #if (CHAR_MAX == SCHAR_MAX) + template <> struct is_signed_helper : public true_type{}; + #endif + #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type... + #if defined(__WCHAR_MAX__) && ((__WCHAR_MAX__ == 2147483647) || (__WCHAR_MAX__ == 32767)) // GCC defines __WCHAR_MAX__ for most platforms. + template <> struct is_signed_helper : public true_type{}; + #endif + #endif + + template + struct is_signed : public eastl::is_signed_helper::type>{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_signed_v = is_signed::value; + #endif + + #define EASTL_DECLARE_SIGNED(T) \ + namespace eastl{ \ + template <> struct is_signed : public true_type{}; \ + template <> struct is_signed : public true_type{}; \ + template <> struct is_signed : public true_type{}; \ + template <> struct is_signed : public true_type{}; \ + } + + + + /////////////////////////////////////////////////////////////////////// + // is_unsigned + // + // is_unsigned::value == true if and only if T is one of the following types: + // [const] [volatile] char (maybe) + // [const] [volatile] unsigned char + // [const] [volatile] unsigned short + // [const] [volatile] unsigned int + // [const] [volatile] unsigned long + // [const] [volatile] unsigned long long + // + // Used to determine if a integral type is signed or unsigned. + // Given that there are some user-made classes which emulate integral + // types, we provide the EASTL_DECLARE_UNSIGNED macro to allow you to + // set a given class to be identified as an unsigned type. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_unsigned_CONFORMANCE 1 // is_unsigned is conforming. + + template struct is_unsigned_helper : public false_type{}; + + template <> struct is_unsigned_helper : public true_type{}; + template <> struct is_unsigned_helper : public true_type{}; + template <> struct is_unsigned_helper : public true_type{}; + template <> struct is_unsigned_helper : public true_type{}; + template <> struct is_unsigned_helper : public true_type{}; + + #if (CHAR_MAX == UCHAR_MAX) + template <> struct is_unsigned_helper : public true_type{}; + #endif + #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type... + #if defined(_MSC_VER) || (defined(__WCHAR_MAX__) && ((__WCHAR_MAX__ == 4294967295U) || (__WCHAR_MAX__ == 65535))) // GCC defines __WCHAR_MAX__ for most platforms. + template <> struct is_unsigned_helper : public true_type{}; + #endif + #endif + + template + struct is_unsigned : public eastl::is_unsigned_helper::type>{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_unsigned_v = is_unsigned::value; + #endif + + #define EASTL_DECLARE_UNSIGNED(T) \ + namespace eastl{ \ + template <> struct is_unsigned : public true_type{}; \ + template <> struct is_unsigned : public true_type{}; \ + template <> struct is_unsigned : public true_type{}; \ + template <> struct is_unsigned : public true_type{}; \ + } + + + + /////////////////////////////////////////////////////////////////////// + // alignment_of + // + // alignment_of::value is an integral value representing, in bytes, + // the memory alignment of objects of type T. + // + // alignment_of may only be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_alignment_of_CONFORMANCE 1 // alignment_of is conforming. + + template + struct alignment_of_value{ static const size_t value = EASTL_ALIGN_OF(T); }; + + template + struct alignment_of : public integral_constant::value>{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR size_t alignment_of_v = alignment_of::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_aligned + // + // Defined as true if the type has alignment requirements greater + // than default alignment, which is taken to be 8. This allows for + // doing specialized object allocation and placement for such types. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_aligned_CONFORMANCE 1 // is_aligned is conforming. + + template + struct is_aligned_value{ static const bool value = (EASTL_ALIGN_OF(T) > 8); }; + + template + struct is_aligned : public integral_constant::value>{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR size_t is_aligned_v = is_aligned::value; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // rank + // + // rank::value is an integral value representing the number of + // dimensions possessed by an array type. For example, given a + // multi-dimensional array type T[M][N], std::tr1::rank::value == 2. + // For a given non-array type T, std::tr1::rank::value == 0. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_rank_CONFORMANCE 1 // rank is conforming. + + template + struct rank : public eastl::integral_constant {}; + + template + struct rank : public eastl::integral_constant::value + 1> {}; + + template + struct rank : public eastl::integral_constant::value + 1> {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR auto rank_v = rank::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_base_of + // + // Given two (possibly identical) types Base and Derived, is_base_of::value == true + // if and only if Base is a direct or indirect base class of Derived, + // or Base and Derived are the same type. + // + // is_base_of may only be applied to complete types. + // + /////////////////////////////////////////////////////////////////////// + + #if EASTL_COMPILER_INTRINSIC_TYPE_TRAITS_AVAILABLE && (defined(_MSC_VER) || defined(EA_COMPILER_GNUC) || (defined(EA_COMPILER_CLANG) && EA_COMPILER_HAS_FEATURE(is_base_of))) + #define EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE 1 // is_base_of is conforming. + + template + struct is_base_of : public eastl::integral_constant::value>{}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EASTL_CPP17_INLINE_VARIABLE EA_CONSTEXPR bool is_base_of_v = is_base_of::value; + #endif + #else + // Not implemented yet. + // This appears to be implementable. + #define EASTL_TYPE_TRAIT_is_base_of_CONFORMANCE 0 + #endif + + + + /////////////////////////////////////////////////////////////////////// + // is_lvalue_reference + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_lvalue_reference_CONFORMANCE 1 // is_lvalue_reference is conforming. + + template struct is_lvalue_reference : public eastl::false_type {}; + template struct is_lvalue_reference : public eastl::true_type {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_lvalue_reference_v = is_lvalue_reference::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // is_rvalue_reference + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_is_rvalue_reference_CONFORMANCE 1 // is_rvalue_reference is conforming. + + template struct is_rvalue_reference : public eastl::false_type {}; + template struct is_rvalue_reference : public eastl::true_type {}; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR bool is_rvalue_reference_v = is_rvalue_reference::value; + #endif + + + /////////////////////////////////////////////////////////////////////// + // result_of + // + /////////////////////////////////////////////////////////////////////// + #define EASTL_TYPE_TRAIT_result_of_CONFORMANCE 1 // result_of is conforming. + + template struct result_of; + + template + struct result_of + { typedef decltype(eastl::declval()(eastl::declval()...)) type; }; + + + // result_of_t is the C++14 using typedef for typename result_of::type. + // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers. + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_RESULT_OF_T(T) typename result_of::type + #else + template + using result_of_t = typename result_of::type; + #define EASTL_RESULT_OF_T(T) result_of_t + #endif + + + /////////////////////////////////////////////////////////////////////// + // has_equality + // + // Determines if the specified type can be tested for equality. + // + /////////////////////////////////////////////////////////////////////// + template > + struct has_equality : eastl::false_type {}; + + template + struct has_equality() == eastl::declval())>> : eastl::true_type + { + }; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + EA_CONSTEXPR auto has_equality_v = has_equality::value; + #endif + +} // namespace eastl + + +#endif // Header include guard diff --git a/include/EASTL/internal/type_transformations.h b/include/EASTL/internal/type_transformations.h new file mode 100644 index 0000000..cffa65e --- /dev/null +++ b/include/EASTL/internal/type_transformations.h @@ -0,0 +1,606 @@ +///////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +///////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTERNAL_TYPE_TRANFORMATIONS_H +#define EASTL_INTERNAL_TYPE_TRANFORMATIONS_H + + +#include +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once +#endif + +#include + + +namespace eastl +{ + + /////////////////////////////////////////////////////////////////////// + // add_const + // + // Add const to a type. + // + // Tor a given type T, add_const::type is equivalent to T + // const if is_const::value == false, and + // - is_void::value == true, or + // - is_object::value == true. + // + // Otherwise, add_const::type is equivalent to T. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_add_const_CONFORMANCE 1 // add_const is conforming. + + template ::value || eastl::is_reference::value || eastl::is_function::value> + struct add_const_helper + { typedef T type; }; + + template + struct add_const_helper + { typedef const T type; }; + + template + struct add_const + { typedef typename eastl::add_const_helper::type type; }; + + // add_const_t is the C++17 using typedef for typename add_const::type. + // We provide a backwards-compatible means to access it through a macro for pre-C++11 compilers. + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_ADD_CONST_T(T) typename add_const::type + #else + template + using add_const_t = typename add_const::type; + #define EASTL_ADD_CONST_T(T) add_const_t + #endif + + + /////////////////////////////////////////////////////////////////////// + // add_volatile + // + // Add volatile to a type. + // + // For a given type T, add_volatile::type is equivalent to T volatile + // if is_volatile::value == false, and + // - is_void::value == true, or + // - is_object::value == true. + // + // Otherwise, add_volatile::type is equivalent to T. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_add_volatile_CONFORMANCE 1 // add_volatile is conforming. + + template ::value || eastl::is_reference::value || eastl::is_function::value> + struct add_volatile_helper + { typedef T type; }; + + template + struct add_volatile_helper + { typedef volatile T type; }; + + template struct add_volatile + { typedef typename eastl::add_volatile_helper::type type; }; + + template using add_volatile_t = typename add_volatile::type; + + + /////////////////////////////////////////////////////////////////////// + // add_cv + // + // The add_cv transformation trait adds const and volatile qualification + // to the type to which it is applied. For a given type T, + // add_volatile::type is equivalent to add_const::type>::type. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_add_cv_CONFORMANCE 1 // add_cv is conforming. + + template + struct add_cv + { + typedef typename add_const::type>::type type; + }; + + template using add_cv_t = typename add_cv::type; + + + /////////////////////////////////////////////////////////////////////// + // make_signed + // + // Used to convert an integral type to its signed equivalent, if not already. + // T shall be a (possibly const and/or volatile-qualified) integral type + // or enumeration but not a bool type.; + // + // The user can define their own make_signed overrides for their own + // types by making a template specialization like done below and adding + // it to the user's code. + /////////////////////////////////////////////////////////////////////// + + // To do: This implementation needs to be updated to support C++11 conformance (recognition of enums) and + // to support volatile-qualified types. It will probably be useful to have it fail for unsupported types. + #define EASTL_TYPE_TRAIT_make_signed_CONFORMANCE 0 // make_signed is only partially conforming. + + template struct make_signed { typedef T type; }; + + template <> struct make_signed { typedef signed char type; }; + template <> struct make_signed { typedef const signed char type; }; + template <> struct make_signed { typedef signed short type; }; + template <> struct make_signed { typedef const signed short type; }; + template <> struct make_signed { typedef signed int type; }; + template <> struct make_signed { typedef const signed int type; }; + template <> struct make_signed { typedef signed long type; }; + template <> struct make_signed { typedef const signed long type; }; + template <> struct make_signed { typedef signed long long type; }; + template <> struct make_signed { typedef const signed long long type; }; + + #if (defined(CHAR_MAX) && defined(UCHAR_MAX) && (CHAR_MAX == UCHAR_MAX)) // If char is unsigned, we convert char to signed char. However, if char is signed then make_signed returns char itself and not signed char. + template <> struct make_signed { typedef signed char type; }; + template <> struct make_signed { typedef signed char type; }; + #endif + + #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type... + #if (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 4294967295U)) // If wchar_t is a 32 bit unsigned value... + template<> + struct make_signed + { typedef int32_t type; }; + #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 65535)) // If wchar_t is a 16 bit unsigned value... + template<> + struct make_signed + { typedef int16_t type; }; + #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 255)) // If wchar_t is an 8 bit unsigned value... + template<> + struct make_signed + { typedef int8_t type; }; + #endif + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + using make_signed_t = typename make_signed::type; + #endif + + + /////////////////////////////////////////////////////////////////////// + // add_signed + // + // This is not a C++11 type trait, and is here for backwards compatibility + // only. Use the C++11 make_unsigned type trait instead. + /////////////////////////////////////////////////////////////////////// + + template + struct add_signed : public make_signed + { typedef typename eastl::make_signed::type type; }; + + + + + /////////////////////////////////////////////////////////////////////// + // make_unsigned + // + // Used to convert an integral type to its signed equivalent, if not already. + // T shall be a (possibly const and/or volatile-qualified) integral type + // or enumeration but not a bool type.; + // + // The user can define their own make_signed overrides for their own + // types by making a template specialization like done below and adding + // it to the user's code. + /////////////////////////////////////////////////////////////////////// + + // To do: This implementation needs to be updated to support C++11 conformance (recognition of enums) and + // to support volatile-qualified types. It will probably be useful to have it fail for unsupported types. + #define EASTL_TYPE_TRAIT_make_unsigned_CONFORMANCE 0 // make_unsigned is only partially conforming. + + template struct make_unsigned { typedef T type; }; + + template <> struct make_unsigned { typedef unsigned char type; }; + template <> struct make_unsigned { typedef const unsigned char type; }; + template <> struct make_unsigned { typedef unsigned short type; }; + template <> struct make_unsigned { typedef const unsigned short type; }; + template <> struct make_unsigned { typedef unsigned int type; }; + template <> struct make_unsigned { typedef const unsigned int type; }; + template <> struct make_unsigned { typedef unsigned long type; }; + template <> struct make_unsigned { typedef const unsigned long type; }; + template <> struct make_unsigned { typedef unsigned long long type; }; + template <> struct make_unsigned { typedef const unsigned long long type; }; + + #if (CHAR_MIN < 0) // If char is signed, we convert char to unsigned char. However, if char is unsigned then make_unsigned returns char itself and not unsigned char. + template <> struct make_unsigned { typedef unsigned char type; }; + template <> struct make_unsigned { typedef unsigned char type; }; + #endif + + #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type... + #if (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ != 4294967295U)) // If wchar_t is a 32 bit signed value... + template<> + struct make_unsigned + { typedef uint32_t type; }; + #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ != 65535)) // If wchar_t is a 16 bit signed value... + template<> + struct make_unsigned + { typedef uint16_t type; }; + #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ != 255)) // If wchar_t is an 8 bit signed value... + template<> + struct make_unsigned + { typedef uint8_t type; }; + #endif + #endif + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + using make_unsigned_t = typename make_unsigned::type; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // add_unsigned + // + // This is not a C++11 type trait, and is here for backwards compatibility + // only. Use the C++11 make_unsigned type trait instead. + // + // Adds unsigned-ness to the given type. + // Modifies only integral values; has no effect on others. + // add_unsigned::type is unsigned int + // add_unsigned::type is unsigned int + // + /////////////////////////////////////////////////////////////////////// + + template + struct add_unsigned : public make_unsigned + { typedef typename eastl::make_signed::type type; }; + + + + /////////////////////////////////////////////////////////////////////// + // remove_pointer + // + // Remove pointer from a type. + // + // The remove_pointer transformation trait removes top-level indirection + // by pointer (if any) from the type to which it is applied. Pointers to + // members are not affected. For a given type T, remove_pointer::type + // is equivalent to T. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_remove_pointer_CONFORMANCE 1 + + template struct remove_pointer { typedef T type; }; + template struct remove_pointer { typedef T type; }; + template struct remove_pointer { typedef T type; }; + template struct remove_pointer { typedef T type; }; + template struct remove_pointer { typedef T type; }; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + using remove_pointer_t = typename remove_pointer::type; + #endif + + + /////////////////////////////////////////////////////////////////////// + // add_pointer + // + // Add pointer to a type. + // Provides the member typedef type which is the type T*. If T is a + // reference type, then type is a pointer to the referred type. + // + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_add_pointer_CONFORMANCE 1 + + template + struct add_pointer { typedef typename eastl::remove_reference::type* type; }; + + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + using add_pointer_t = typename add_pointer::type; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // remove_extent + // + // The remove_extent transformation trait removes a dimension from an array. + // For a given non-array type T, remove_extent::type is equivalent to T. + // For a given array type T[N], remove_extent::type is equivalent to T. + // For a given array type const T[N], remove_extent::type is equivalent to const T. + // For example, given a multi-dimensional array type T[M][N], remove_extent::type is equivalent to T[N]. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_remove_extent_CONFORMANCE 1 // remove_extent is conforming. + + template struct remove_extent { typedef T type; }; + template struct remove_extent { typedef T type; }; + template struct remove_extent { typedef T type; }; + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + using remove_extent_t = typename remove_extent::type; + #endif + + + /////////////////////////////////////////////////////////////////////// + // remove_all_extents + // + // The remove_all_extents transformation trait removes all dimensions from an array. + // For a given non-array type T, remove_all_extents::type is equivalent to T. + // For a given array type T[N], remove_all_extents::type is equivalent to T. + // For a given array type const T[N], remove_all_extents::type is equivalent to const T. + // For example, given a multi-dimensional array type T[M][N], remove_all_extents::type is equivalent to T. + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_remove_all_extents_CONFORMANCE 1 // remove_all_extents is conforming. + + template struct remove_all_extents { typedef T type; }; + template struct remove_all_extents { typedef typename eastl::remove_all_extents::type type; }; + template struct remove_all_extents { typedef typename eastl::remove_all_extents::type type; }; + + #if !defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + template + using remove_all_extents_t = typename remove_all_extents::type; + #endif + + + + /////////////////////////////////////////////////////////////////////// + // aligned_storage + // + // The aligned_storage transformation trait provides a type that is + // suitably aligned to store an object whose size is does not exceed length + // and whose alignment is a divisor of alignment. When using aligned_storage, + // length must be non-zero, and alignment must >= alignment_of::value + // for some type T. We require the alignment value to be a power-of-two. + // + // GCC versions prior to 4.4 don't properly support this with stack-based + // variables. The EABase EA_ALIGN_MAX_AUTOMATIC define identifies the + // extent to which stack (automatic) variables can be aligned for the + // given compiler/platform combination. + // + // Example usage: + // aligned_storage::type widget; + // Widget* pWidget = new(&widget) Widget; + // + // aligned_storage::type widgetAlignedTo64; + // Widget* pWidget = new(&widgetAlignedTo64) Widget; + // + // aligned_storage::type widgetArray[37]; + // Widget* pWidgetArray = new(widgetArray) Widget[37]; + /////////////////////////////////////////////////////////////////////// + + #define EASTL_TYPE_TRAIT_aligned_storage_CONFORMANCE 1 // aligned_storage is conforming. + + #if defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION >= 4008) + // New versions of GCC do not support using 'alignas' with a value greater than 128. + // However, this code using the GNU standard alignment attribute works properly. + template + struct aligned_storage + { + struct type { unsigned char mCharData[N]; } EA_ALIGN(Align); + }; + #elif (EABASE_VERSION_N >= 20040) && !defined(EA_COMPILER_NO_ALIGNAS) // If C++11 alignas is supported... + template + struct aligned_storage + { + typedef struct { + alignas(Align) unsigned char mCharData[N]; + } type; + }; + + #elif defined(EA_COMPILER_MSVC) || (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION < 4007)) || defined(EA_COMPILER_EDG) // At some point GCC fixed their attribute(align) to support non-literals, though it's not clear what version aside from being no later than 4.7 and no earlier than 4.2. + // Some compilers don't allow you to to use EA_ALIGNED with anything by a numeric literal, + // so we can't use the simpler code like we do further below for other compilers. We support + // only up to so much of an alignment value here. + template + struct aligned_storage_helper { struct type{ unsigned char mCharData[N]; }; }; + + template struct aligned_storage_helper { struct EA_ALIGN( 2) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN( 4) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN( 8) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN( 16) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN( 32) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN( 64) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN( 128) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN( 256) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN( 512) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN(1024) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN(2048) type{ unsigned char mCharData[N]; }; }; + template struct aligned_storage_helper { struct EA_ALIGN(4096) type{ unsigned char mCharData[N]; }; }; + + template + struct aligned_storage + { + typedef typename aligned_storage_helper::type type; + }; + + #else + template + struct aligned_storage + { + union type + { + unsigned char mCharData[N]; + struct EA_ALIGN(Align) mStruct{ }; + }; + }; + #endif + + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + #define EASTL_ALIGNED_STORAGE_T(N, Align) typename eastl::aligned_storage_t::type + #else + template + using aligned_storage_t = typename aligned_storage::type; + #define EASTL_ALIGNED_STORAGE_T(N, Align) eastl::aligned_storage_t + #endif + + + + /////////////////////////////////////////////////////////////////////// + // aligned_union + // + // The member typedef type shall be a POD type suitable for use as + // uninitialized storage for any object whose type is listed in Types; + // its size shall be at least Len. The static member alignment_value + // shall be an integral constant of type std::size_t whose value is + // the strictest alignment of all types listed in Types. + // Note that the resulting type is not a C/C++ union, but simply memory + // block (of pod type) that can be used to placement-new an actual + // C/C++ union of the types. The actual union you declare can be a non-POD union. + // + // Example usage: + // union MyUnion { + // char c; + // int i; + // float f; + // + // MyUnion(float fValue) : f(fValue) {} + // }; + // + // aligned_union::type myUnionStorage; + // MyUnion* pMyUnion = new(&myUnionStorage) MyUnion(21.4f); + // pMyUnion->i = 37; + // + /////////////////////////////////////////////////////////////////////// + + #if defined(EA_COMPILER_NO_VARIADIC_TEMPLATES) || !EASTL_TYPE_TRAIT_static_max_CONFORMANCE + #define EASTL_TYPE_TRAIT_aligned_union_CONFORMANCE 0 // aligned_union is not conforming, as it supports only a two-member unions. + + // To consider: Expand this to include more possible types. We may want to convert this to be a recursive + // template instead of like below. + template + struct aligned_union + { + static const size_t size0 = eastl::static_max::value; + static const size_t size1 = eastl::static_max::value; + static const size_t size2 = eastl::static_max::value; + static const size_t size = eastl::static_max::value; + + static const size_t alignment0 = eastl::static_max::value; + static const size_t alignment1 = eastl::static_max::value; + static const size_t alignment_value = eastl::static_max::value; + + typedef typename eastl::aligned_storage::type type; + }; + + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + // To do: define macro. + #else + template + using aligned_union_t = typename aligned_union::type; + #endif + #else + #define EASTL_TYPE_TRAIT_aligned_union_CONFORMANCE 1 // aligned_union is conforming. + + template + struct aligned_union + { + static const size_t size = eastl::static_max::value; + static const size_t alignment_value = eastl::static_max::value; + + typedef typename eastl::aligned_storage::type type; + }; + + #if defined(EA_COMPILER_NO_TEMPLATE_ALIASES) + // To do: define macro. + #else + template + using aligned_union_t = typename aligned_union::type; + #endif + + #endif + + + /////////////////////////////////////////////////////////////////////// + // union_cast + // + // Safely converts between unrelated types that have a binary equivalency. + // This appoach is required by strictly conforming C++ compilers because + // directly using a C or C++ cast between unrelated types is fraught with + // the possibility of undefined runtime behavior due to type aliasing. + // The Source and Dest types must be POD types due to the use of a union + // in C++ versions prior to C++11. C++11 relaxes the definition of a POD + // such that it allows a classes with trivial default constructors whereas + // previous versions did not, so beware of this when writing portable code. + // + // Example usage: + // float f32 = 1.234f; + // uint32_t n32 = union_cast(f32); + // + // Example possible mis-usage: + // The following is valid only if you are aliasing the pointer value and + // not what it points to. Most of the time the user intends the latter, + // which isn't strictly possible. + // Widget* pWidget = CreateWidget(); + // Foo* pFoo = union_cast(pWidget); + /////////////////////////////////////////////////////////////////////// + + template + DestType union_cast(SourceType sourceValue) + { + EASTL_CT_ASSERT((sizeof(DestType) == sizeof(SourceType)) && + (EA_ALIGN_OF(DestType) == EA_ALIGN_OF(SourceType))); // To support differening alignments, we would need to use a memcpy-based solution or find a way to make the two union members align with each other. + //EASTL_CT_ASSERT(is_pod::value && is_pod::value); // Disabled because we don't want to restrict what the user can do, as some compiler's definitions of is_pod aren't up to C++11 Standards. + //EASTL_CT_ASSERT(!is_pointer::value && !is_pointer::value); // Disabled because it's valid to alias pointers as long as you are aliasong the pointer value and not what it points to. + + union { + SourceType sourceValue; + DestType destValue; + } u; + u.sourceValue = sourceValue; + + return u.destValue; + } + + + + /////////////////////////////////////////////////////////////////////// + // void_t + // + // Maps a sequence of any types to void. This utility class is used in + // template meta programming to simplify compile time reflection mechanisms + // required by the standard library. + // + // http://en.cppreference.com/w/cpp/types/void_t + // + // Example: + // template + // struct is_iterable : false_type {}; + // + // template + // struct is_iterable().begin()), + // decltype(declval().end())>> : true_type {}; + // + /////////////////////////////////////////////////////////////////////// + #if EASTL_VARIABLE_TEMPLATES_ENABLED + template + using void_t = void; + #endif + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/intrusive_hash_map.h b/include/EASTL/intrusive_hash_map.h new file mode 100644 index 0000000..37f1618 --- /dev/null +++ b/include/EASTL/intrusive_hash_map.h @@ -0,0 +1,98 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_INTRUSIVE_HASH_MAP_H +#define EASTL_INTRUSIVE_HASH_MAP_H + + +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// intrusive_hash_map + /// + /// Template parameters: + /// Key The key object (key in the key/value pair). T must contain a member of type Key named mKey. + /// T The type of object the map holds (a.k.a. value). + /// bucketCount The number of buckets to use. Best if it's a prime number. + /// Hash Hash function. See functional.h for examples of hash functions. + /// Equal Equality testing predicate; tells if two elements are equal. + /// + template , typename Equal = eastl::equal_to > + class intrusive_hash_map : public intrusive_hashtable + { + public: + typedef intrusive_hashtable base_type; + typedef intrusive_hash_map this_type; + + public: + explicit intrusive_hash_map(const Hash& h = Hash(), const Equal& eq = Equal()) + : base_type(h, eq) + { + // Empty + } + + // To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea? + //intrusive_hash_map(std::initializer_list ilist); + + }; // intrusive_hash_map + + + + + /// intrusive_hash_multimap + /// + /// Implements a intrusive_hash_multimap, which is the same thing as a intrusive_hash_map + /// except that contained elements need not be unique. See the documentation + /// for intrusive_hash_map for details. + /// + /// Template parameters: + /// Key The key object (key in the key/value pair). T must contain a member of type Key named mKey. + /// T The type of object the map holds (a.k.a. value). + /// bucketCount The number of buckets to use. Best if it's a prime number. + /// Hash Hash function. See functional.h for examples of hash functions. + /// Equal Equality testing predicate; tells if two elements are equal. + /// + template , typename Equal = eastl::equal_to > + class intrusive_hash_multimap : public intrusive_hashtable + { + public: + typedef intrusive_hashtable base_type; + typedef intrusive_hash_multimap this_type; + + public: + explicit intrusive_hash_multimap(const Hash& h = Hash(), const Equal& eq = Equal()) + : base_type(h, eq) + { + // Empty + } + + // To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea? + //intrusive_hash_multimap(std::initializer_list ilist); + + }; // intrusive_hash_multimap + + + + +} // namespace eastl + + +#endif // Header include guard + + + + + + diff --git a/include/EASTL/intrusive_hash_set.h b/include/EASTL/intrusive_hash_set.h new file mode 100644 index 0000000..a25d03a --- /dev/null +++ b/include/EASTL/intrusive_hash_set.h @@ -0,0 +1,100 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +#ifndef EASTL_INTRUSIVE_HASH_SET_H +#define EASTL_INTRUSIVE_HASH_SET_H + + +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// intrusive_hash_set + /// + /// Template parameters: + /// T The type of object the set holds (a.k.a. value). + /// bucketCount The number of buckets to use. Best if it's a prime number. + /// Hash Hash function. See functional.h for examples of hash functions. + /// Equal Equality testing predicate; tells if two elements are equal. + /// + template , typename Equal = eastl::equal_to > + class intrusive_hash_set : public intrusive_hashtable + { + public: + typedef intrusive_hashtable base_type; + typedef intrusive_hash_set this_type; + + public: + explicit intrusive_hash_set(const Hash& h = Hash(), const Equal& eq = Equal()) + : base_type(h, eq) + { + // Empty + } + + // To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea? + //intrusive_hash_set(std::initializer_list ilist); + + }; // intrusive_hash_set + + + + + /// intrusive_hash_multiset + /// + /// Implements a intrusive_hash_multiset, which is the same thing as a intrusive_hash_set + /// except that contained elements need not be unique. See the documentation + /// for intrusive_hash_set for details. + /// + /// Template parameters: + /// T The type of object the set holds (a.k.a. value). + /// bucketCount The number of buckets to use. Best if it's a prime number. + /// Hash Hash function. See functional.h for examples of hash functions. + /// Equal Equality testing predicate; tells if two elements are equal. + /// + template , typename Equal = eastl::equal_to > + class intrusive_hash_multiset : public intrusive_hashtable + { + public: + typedef intrusive_hashtable base_type; + typedef intrusive_hash_multiset this_type; + + public: + explicit intrusive_hash_multiset(const Hash& h = Hash(), const Equal& eq = Equal()) + : base_type(h, eq) + { + // Empty + } + + // To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea? + //intrusive_hash_multiset(std::initializer_list ilist); + + }; // intrusive_hash_multiset + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + diff --git a/include/EASTL/intrusive_list.h b/include/EASTL/intrusive_list.h new file mode 100644 index 0000000..18d7e93 --- /dev/null +++ b/include/EASTL/intrusive_list.h @@ -0,0 +1,1315 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + + +/////////////////////////////////////////////////////////////////////////////// +// The intrusive list container is similar to a list, with the primary +// different being that intrusive lists allow you to control memory +// allocation. +// +// * Intrusive lists store the nodes directly in the data items. This +// is done by deriving the object from intrusive_list_node. +// +// * The container does no memory allocation -- it works entirely with +// the submitted nodes. This does mean that it is the client's job to +// free the nodes in an intrusive list, though. +// +// * Valid node pointers can be converted back to iterators in O(1). +// This is because objects in the list are also nodes in the list. +// +// * intrusive_list does not support copy construction or assignment; +// the push, pop, and insert operations take ownership of the +// passed object. +// +// Usage notes: +// +// * You can use an intrusive_list directly with the standard nodes +// if you have some other way of converting the node pointer back +// to your data pointer. +// +// * Remember that the list destructor doesn't deallocate nodes -- it can't. +// +// * The size is not cached; this makes size() linear time but splice() is +// constant time. This does mean that you can remove() an element without +// having to figure out which list it is in, however. +// +// * You can insert a node into multiple intrusive_lists. One way to do so +// is to (ab)use inheritance: +// +// struct NodeA : public intrusive_list_node {}; +// struct NodeB : public intrusive_list_node {}; +// struct Object : public NodeA, nodeB {}; +// +// intrusive_list listA; +// intrusive_list listB; +// +// listA.push_back(obj); +// listB.push_back(obj); +// +// * find() vs. locate() +// The find(v) algorithm returns an iterator p such that *p == v; intrusive_list::locate(v) +// returns an iterator p such that &*p == &v. intrusive_list<> doesn't have find() mainly +// because list<> doesn't have it either, but there's no reason it couldn't. intrusive_list +// uses the name 'find' because: +// - So as not to confuse the member function with the well-defined free function from algorithm.h. +// - Because it is not API-compatible with eastl::find(). +// - Because it simply locates an object within the list based on its node entry and doesn't perform before any value-based searches or comparisons. +// +// Differences between intrusive_list and std::list: +// +// Issue std::list intrusive_list +// -------------------------------------------------------------- +// Automatic node ctor/dtor Yes No +// Can memmove() container Maybe* No +// Same item in list twice Yes(copy/byref) No +// Can store non-copyable items No Yes +// size() O(1) or O(n) O(n) +// clear() O(n) O(1) +// erase(range) O(n) O(1) +// splice(range) O(1) or O(n) O(1) +// Convert reference to iterator No O(1) +// Remove without container No O(1) +// Nodes in mixed allocators No Yes +// +// *) Not required by standard but can be done with some STL implementations. +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTRUSIVE_LIST_H +#define EASTL_INTRUSIVE_LIST_H + + +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// intrusive_list_node + /// + /// By design this must be a POD, as user structs will be inheriting from + /// it and they may wish to remain POD themselves. However, if the + /// EASTL_VALIDATE_INTRUSIVE_LIST option is enabled + /// + struct intrusive_list_node + { + intrusive_list_node* mpNext; + intrusive_list_node* mpPrev; + + #if EASTL_VALIDATE_INTRUSIVE_LIST + intrusive_list_node() // Implemented inline because GCC can't deal with member functions + { // of may-alias classes being defined outside the declaration. + mpNext = mpPrev = NULL; + } + + ~intrusive_list_node() + { + #if EASTL_ASSERT_ENABLED + if(mpNext || mpPrev) + EASTL_FAIL_MSG("~intrusive_list_node(): List is non-empty."); + #endif + } + #endif + } EASTL_MAY_ALIAS; // It's not clear if this really should be needed. An old GCC compatible compiler is generating some crashing optimized code when strict aliasing is enabled, but analysis of it seems to blame the compiler. However, this topic can be tricky. + + + + /// intrusive_list_iterator + /// + template + class intrusive_list_iterator + { + public: + typedef intrusive_list_iterator this_type; + typedef intrusive_list_iterator iterator; + typedef intrusive_list_iterator const_iterator; + typedef T value_type; + typedef T node_type; + typedef ptrdiff_t difference_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category; + + public: + pointer mpNode; // Needs to be public for operator==() to work + + public: + intrusive_list_iterator(); + explicit intrusive_list_iterator(pointer pNode); // Note that you can also construct an iterator from T via this, since value_type == node_type. + intrusive_list_iterator(const iterator& x); + + reference operator*() const; + pointer operator->() const; + + intrusive_list_iterator& operator++(); + intrusive_list_iterator& operator--(); + + intrusive_list_iterator operator++(int); + intrusive_list_iterator operator--(int); + + }; // class intrusive_list_iterator + + + + /// intrusive_list_base + /// + class intrusive_list_base + { + public: + typedef eastl_size_t size_type; // See config.h for the definition of this, which defaults to size_t. + typedef ptrdiff_t difference_type; + + protected: + intrusive_list_node mAnchor; ///< Sentinel node (end). All data nodes are linked in a ring from this node. + + public: + intrusive_list_base(); + ~intrusive_list_base(); + + bool empty() const EA_NOEXCEPT; + eastl_size_t size() const EA_NOEXCEPT; ///< Returns the number of elements in the list; O(n). + void clear() EA_NOEXCEPT; ///< Clears the list; O(1). No deallocation occurs. + void pop_front(); ///< Removes an element from the front of the list; O(1). The element must exist, but is not deallocated. + void pop_back(); ///< Removes an element from the back of the list; O(1). The element must exist, but is not deallocated. + EASTL_API void reverse() EA_NOEXCEPT; ///< Reverses a list so that front and back are swapped; O(n). + + EASTL_API bool validate() const; ///< Scans a list for linkage inconsistencies; O(n) time, O(1) space. Returns false if errors are detected, such as loops or branching. + + }; // class intrusive_list_base + + + + /// intrusive_list + /// + /// Example usage: + /// struct IntNode : public eastl::intrusive_list_node { + /// int mX; + /// IntNode(int x) : mX(x) { } + /// }; + /// + /// IntNode nodeA(0); + /// IntNode nodeB(1); + /// + /// intrusive_list intList; + /// intList.push_back(nodeA); + /// intList.push_back(nodeB); + /// intList.remove(nodeA); + /// + template + class intrusive_list : public intrusive_list_base + { + public: + typedef intrusive_list this_type; + typedef intrusive_list_base base_type; + typedef T node_type; + typedef T value_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + typedef T& reference; + typedef const T& const_reference; + typedef T* pointer; + typedef const T* const_pointer; + typedef intrusive_list_iterator iterator; + typedef intrusive_list_iterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + + public: + intrusive_list(); ///< Creates an empty list. + intrusive_list(const this_type& x); ///< Creates an empty list; ignores the argument. + //intrusive_list(std::initializer_list ilist); To consider: Is this feasible, given how initializer_list works by creating a temporary array? Even if it is feasible, is it a good idea? + + this_type& operator=(const this_type& x); ///< Clears the list; ignores the argument. + void swap(this_type&); ///< Swaps the contents of two intrusive lists; O(1). + + iterator begin() EA_NOEXCEPT; ///< Returns an iterator pointing to the first element in the list. + const_iterator begin() const EA_NOEXCEPT; ///< Returns a const_iterator pointing to the first element in the list. + const_iterator cbegin() const EA_NOEXCEPT; ///< Returns a const_iterator pointing to the first element in the list. + + iterator end() EA_NOEXCEPT; ///< Returns an iterator pointing one-after the last element in the list. + const_iterator end() const EA_NOEXCEPT; ///< Returns a const_iterator pointing one-after the last element in the list. + const_iterator cend() const EA_NOEXCEPT; ///< Returns a const_iterator pointing one-after the last element in the list. + + reverse_iterator rbegin() EA_NOEXCEPT; ///< Returns a reverse_iterator pointing at the end of the list (start of the reverse sequence). + const_reverse_iterator rbegin() const EA_NOEXCEPT; ///< Returns a const_reverse_iterator pointing at the end of the list (start of the reverse sequence). + const_reverse_iterator crbegin() const EA_NOEXCEPT; ///< Returns a const_reverse_iterator pointing at the end of the list (start of the reverse sequence). + + reverse_iterator rend() EA_NOEXCEPT; ///< Returns a reverse_iterator pointing at the start of the list (end of the reverse sequence). + const_reverse_iterator rend() const EA_NOEXCEPT; ///< Returns a const_reverse_iterator pointing at the start of the list (end of the reverse sequence). + const_reverse_iterator crend() const EA_NOEXCEPT; ///< Returns a const_reverse_iterator pointing at the start of the list (end of the reverse sequence). + + reference front(); ///< Returns a reference to the first element. The list must be non-empty. + const_reference front() const; ///< Returns a const reference to the first element. The list must be non-empty. + reference back(); ///< Returns a reference to the last element. The list must be non-empty. + const_reference back() const; ///< Returns a const reference to the last element. The list must be non-empty. + + void push_front(value_type& x); ///< Adds an element to the front of the list; O(1). The element is not copied. The element must not be in any other list. + void push_back(value_type& x); ///< Adds an element to the back of the list; O(1). The element is not copied. The element must not be in any other list. + + bool contains(const value_type& x) const; ///< Returns true if the given element is in the list; O(n). Equivalent to (locate(x) != end()). + + iterator locate(value_type& x); ///< Converts a reference to an object in the list back to an iterator, or returns end() if it is not part of the list. O(n) + const_iterator locate(const value_type& x) const; ///< Converts a const reference to an object in the list back to a const iterator, or returns end() if it is not part of the list. O(n) + + iterator insert(const_iterator pos, value_type& x); ///< Inserts an element before the element pointed to by the iterator. O(1) + iterator erase(const_iterator pos); ///< Erases the element pointed to by the iterator. O(1) + iterator erase(const_iterator pos, const_iterator last); ///< Erases elements within the iterator range [pos, last). O(1) + + reverse_iterator erase(const_reverse_iterator pos); + reverse_iterator erase(const_reverse_iterator pos, const_reverse_iterator last); + + static void remove(value_type& value); ///< Erases an element from a list; O(1). Note that this is static so you don't need to know which list the element, although it must be in some list. + + void splice(const_iterator pos, value_type& x); + ///< Moves the given element into this list before the element pointed to by pos; O(1). + ///< Required: x must be in some list or have first/next pointers that point it itself. + + void splice(const_iterator pos, intrusive_list& x); + ///< Moves the contents of a list into this list before the element pointed to by pos; O(1). + ///< Required: &x != this (same as std::list). + + void splice(const_iterator pos, intrusive_list& x, const_iterator i); + ///< Moves the given element pointed to i within the list x into the current list before + ///< the element pointed to by pos; O(1). + + void splice(const_iterator pos, intrusive_list& x, const_iterator first, const_iterator last); + ///< Moves the range of elements [first, last) from list x into the current list before + ///< the element pointed to by pos; O(1). + ///< Required: pos must not be in [first, last). (same as std::list). + + public: + // Sorting functionality + // This is independent of the global sort algorithms, as lists are + // linked nodes and can be sorted more efficiently by moving nodes + // around in ways that global sort algorithms aren't privy to. + + void merge(this_type& x); + + template + void merge(this_type& x, Compare compare); + + void unique(); + + template + void unique(BinaryPredicate); + + void sort(); + + template + void sort(Compare compare); + + public: + // bool validate() const; // Inherited from parent. + int validate_iterator(const_iterator i) const; + + }; // intrusive_list + + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_list_node + /////////////////////////////////////////////////////////////////////// + + // Moved to be inline within the class because the may-alias attribute is + // triggering what appears to be a bug in GCC that effectively requires + // may-alias structs to implement inline member functions within the class + // declaration. We don't have a .cpp file for + // #if EASTL_VALIDATE_INTRUSIVE_LIST + // inline intrusive_list_node::intrusive_list_node() + // { + // mpNext = mpPrev = NULL; + // } + // + // inline intrusive_list_node::~intrusive_list_node() + // { + // #if EASTL_ASSERT_ENABLED + // if(mpNext || mpPrev) + // EASTL_FAIL_MSG("~intrusive_list_node(): List is non-empty."); + // #endif + // } + // #endif + + + /////////////////////////////////////////////////////////////////////// + // intrusive_list_iterator + /////////////////////////////////////////////////////////////////////// + + template + inline intrusive_list_iterator::intrusive_list_iterator() + { + #if EASTL_DEBUG + mpNode = NULL; + #endif + } + + + template + inline intrusive_list_iterator::intrusive_list_iterator(pointer pNode) + : mpNode(pNode) + { + // Empty + } + + + template + inline intrusive_list_iterator::intrusive_list_iterator(const iterator& x) + : mpNode(x.mpNode) + { + // Empty + } + + + template + inline typename intrusive_list_iterator::reference + intrusive_list_iterator::operator*() const + { + return *mpNode; + } + + + template + inline typename intrusive_list_iterator::pointer + intrusive_list_iterator::operator->() const + { + return mpNode; + } + + + template + inline typename intrusive_list_iterator::this_type& + intrusive_list_iterator::operator++() + { + mpNode = static_cast(mpNode->mpNext); + return *this; + } + + + template + inline typename intrusive_list_iterator::this_type + intrusive_list_iterator::operator++(int) + { + intrusive_list_iterator it(*this); + mpNode = static_cast(mpNode->mpNext); + return it; + } + + + template + inline typename intrusive_list_iterator::this_type& + intrusive_list_iterator::operator--() + { + mpNode = static_cast(mpNode->mpPrev); + return *this; + } + + + template + inline typename intrusive_list_iterator::this_type + intrusive_list_iterator::operator--(int) + { + intrusive_list_iterator it(*this); + mpNode = static_cast(mpNode->mpPrev); + return it; + } + + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const intrusive_list_iterator& a, + const intrusive_list_iterator& b) + { + return a.mpNode == b.mpNode; + } + + + template + inline bool operator!=(const intrusive_list_iterator& a, + const intrusive_list_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const intrusive_list_iterator& a, + const intrusive_list_iterator& b) + { + return a.mpNode != b.mpNode; + } + + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_list_base + /////////////////////////////////////////////////////////////////////// + + inline intrusive_list_base::intrusive_list_base() + { + mAnchor.mpNext = mAnchor.mpPrev = &mAnchor; + } + + inline intrusive_list_base::~intrusive_list_base() + { + #if EASTL_VALIDATE_INTRUSIVE_LIST + clear(); + mAnchor.mpNext = mAnchor.mpPrev = NULL; + #endif + } + + + inline bool intrusive_list_base::empty() const EA_NOEXCEPT + { + return mAnchor.mpPrev == &mAnchor; + } + + + inline intrusive_list_base::size_type intrusive_list_base::size() const EA_NOEXCEPT + { + const intrusive_list_node* p = &mAnchor; + size_type n = (size_type)-1; + + do { + ++n; + p = p->mpNext; + } while(p != &mAnchor); + + return n; + } + + + inline void intrusive_list_base::clear() EA_NOEXCEPT + { + #if EASTL_VALIDATE_INTRUSIVE_LIST + // Need to clear out all the next/prev pointers in the elements; + // this makes this operation O(n) instead of O(1). + intrusive_list_node* pNode = mAnchor.mpNext; + + while(pNode != &mAnchor) + { + intrusive_list_node* const pNextNode = pNode->mpNext; + pNode->mpNext = pNode->mpPrev = NULL; + pNode = pNextNode; + } + #endif + + mAnchor.mpNext = mAnchor.mpPrev = &mAnchor; + } + + + inline void intrusive_list_base::pop_front() + { + #if EASTL_VALIDATE_INTRUSIVE_LIST + intrusive_list_node* const pNode = mAnchor.mpNext; + #endif + + mAnchor.mpNext->mpNext->mpPrev = &mAnchor; + mAnchor.mpNext = mAnchor.mpNext->mpNext; + + #if EASTL_VALIDATE_INTRUSIVE_LIST + if(pNode != &mAnchor) + pNode->mpNext = pNode->mpPrev = NULL; + #if EASTL_ASSERT_ENABLED + else + EASTL_FAIL_MSG("intrusive_list::pop_front(): empty list."); + #endif + #endif + } + + + inline void intrusive_list_base::pop_back() + { + #if EASTL_VALIDATE_INTRUSIVE_LIST + intrusive_list_node* const pNode = mAnchor.mpPrev; + #endif + + mAnchor.mpPrev->mpPrev->mpNext = &mAnchor; + mAnchor.mpPrev = mAnchor.mpPrev->mpPrev; + + #if EASTL_VALIDATE_INTRUSIVE_LIST + if(pNode != &mAnchor) + pNode->mpNext = pNode->mpPrev = NULL; + #if EASTL_ASSERT_ENABLED + else + EASTL_FAIL_MSG("intrusive_list::pop_back(): empty list."); + #endif + #endif + } + + + + + /////////////////////////////////////////////////////////////////////// + // intrusive_list + /////////////////////////////////////////////////////////////////////// + + template + inline intrusive_list::intrusive_list() + { + } + + + template + inline intrusive_list::intrusive_list(const this_type& /*x*/) + : intrusive_list_base() + { + // We intentionally ignore argument x. + // To consider: Shouldn't this function simply not exist? Is there a useful purpose for having this function? + // There should be a comment here about it, though my first guess is that this exists to quell VC++ level 4/-Wall compiler warnings. + } + + + template + inline typename intrusive_list::this_type& intrusive_list::operator=(const this_type& /*x*/) + { + // We intentionally ignore argument x. + // See notes above in the copy constructor about questioning the existence of this function. + return *this; + } + + + template + inline typename intrusive_list::iterator intrusive_list::begin() EA_NOEXCEPT + { + return iterator(static_cast(mAnchor.mpNext)); + } + + + template + inline typename intrusive_list::const_iterator intrusive_list::begin() const EA_NOEXCEPT + { + return const_iterator(static_cast(mAnchor.mpNext)); + } + + + template + inline typename intrusive_list::const_iterator intrusive_list::cbegin() const EA_NOEXCEPT + { + return const_iterator(static_cast(mAnchor.mpNext)); + } + + + template + inline typename intrusive_list::iterator intrusive_list::end() EA_NOEXCEPT + { + return iterator(static_cast(&mAnchor)); + } + + + template + inline typename intrusive_list::const_iterator intrusive_list::end() const EA_NOEXCEPT + { + return const_iterator(static_cast(&mAnchor)); + } + + + template + inline typename intrusive_list::const_iterator intrusive_list::cend() const EA_NOEXCEPT + { + return const_iterator(static_cast(&mAnchor)); + } + + + template + inline typename intrusive_list::reverse_iterator intrusive_list::rbegin() EA_NOEXCEPT + { + return reverse_iterator(iterator(static_cast(&mAnchor))); + } + + + template + inline typename intrusive_list::const_reverse_iterator intrusive_list::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(static_cast(&mAnchor))); + } + + + template + inline typename intrusive_list::const_reverse_iterator intrusive_list::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(static_cast(&mAnchor))); + } + + + template + inline typename intrusive_list::reverse_iterator intrusive_list::rend() EA_NOEXCEPT + { + return reverse_iterator(iterator(static_cast(mAnchor.mpNext))); + } + + + template + inline typename intrusive_list::const_reverse_iterator intrusive_list::rend() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(static_cast(mAnchor.mpNext))); + } + + + template + inline typename intrusive_list::const_reverse_iterator intrusive_list::crend() const EA_NOEXCEPT + { + return const_reverse_iterator(const_iterator(static_cast(mAnchor.mpNext))); + } + + + template + inline typename intrusive_list::reference intrusive_list::front() + { + #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED + if(mAnchor.mpNext == &mAnchor) + EASTL_FAIL_MSG("intrusive_list::front(): empty list."); + #endif + + return *static_cast(mAnchor.mpNext); + } + + + template + inline typename intrusive_list::const_reference intrusive_list::front() const + { + #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED + if(mAnchor.mpNext == &mAnchor) + EASTL_FAIL_MSG("intrusive_list::front(): empty list."); + #endif + + return *static_cast(mAnchor.mpNext); + } + + + template + inline typename intrusive_list::reference intrusive_list::back() + { + #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED + if(mAnchor.mpNext == &mAnchor) + EASTL_FAIL_MSG("intrusive_list::back(): empty list."); + #endif + + return *static_cast(mAnchor.mpPrev); + } + + + template + inline typename intrusive_list::const_reference intrusive_list::back() const + { + #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED + if(mAnchor.mpNext == &mAnchor) + EASTL_FAIL_MSG("intrusive_list::back(): empty list."); + #endif + + return *static_cast(mAnchor.mpPrev); + } + + + template + inline void intrusive_list::push_front(value_type& x) + { + #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED + if(x.mpNext || x.mpPrev) + EASTL_FAIL_MSG("intrusive_list::push_front(): element already on a list."); + #endif + + x.mpNext = mAnchor.mpNext; + x.mpPrev = &mAnchor; + mAnchor.mpNext = &x; + x.mpNext->mpPrev = &x; + } + + + template + inline void intrusive_list::push_back(value_type& x) + { + #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED + if(x.mpNext || x.mpPrev) + EASTL_FAIL_MSG("intrusive_list::push_back(): element already on a list."); + #endif + + x.mpPrev = mAnchor.mpPrev; + x.mpNext = &mAnchor; + mAnchor.mpPrev = &x; + x.mpPrev->mpNext = &x; + } + + + template + inline bool intrusive_list::contains(const value_type& x) const + { + for(const intrusive_list_node* p = mAnchor.mpNext; p != &mAnchor; p = p->mpNext) + { + if(p == &x) + return true; + } + + return false; + } + + + template + inline typename intrusive_list::iterator intrusive_list::locate(value_type& x) + { + for(intrusive_list_node* p = (T*)mAnchor.mpNext; p != &mAnchor; p = p->mpNext) + { + if(p == &x) + return iterator(static_cast(p)); + } + + return iterator((T*)&mAnchor); + } + + + template + inline typename intrusive_list::const_iterator intrusive_list::locate(const value_type& x) const + { + for(const intrusive_list_node* p = mAnchor.mpNext; p != &mAnchor; p = p->mpNext) + { + if(p == &x) + return const_iterator(static_cast(p)); + } + + return const_iterator((T*)&mAnchor); + } + + + template + inline typename intrusive_list::iterator intrusive_list::insert(const_iterator pos, value_type& x) + { + #if EASTL_VALIDATE_INTRUSIVE_LIST && EASTL_ASSERT_ENABLED + if(x.mpNext || x.mpPrev) + EASTL_FAIL_MSG("intrusive_list::insert(): element already on a list."); + #endif + + intrusive_list_node& next = *const_cast(pos.mpNode); + intrusive_list_node& prev = *static_cast(next.mpPrev); + prev.mpNext = next.mpPrev = &x; + x.mpPrev = &prev; + x.mpNext = &next; + + return iterator(&x); + } + + + template + inline typename intrusive_list::iterator + intrusive_list::erase(const_iterator pos) + { + intrusive_list_node& prev = *static_cast(pos.mpNode->mpPrev); + intrusive_list_node& next = *static_cast(pos.mpNode->mpNext); + prev.mpNext = &next; + next.mpPrev = &prev; + + #if EASTL_VALIDATE_INTRUSIVE_LIST + iterator ii(const_cast(pos.mpNode)); + ii.mpNode->mpPrev = ii.mpNode->mpNext = NULL; + #endif + + return iterator(static_cast(&next)); + } + + + template + inline typename intrusive_list::iterator + intrusive_list::erase(const_iterator first, const_iterator last) + { + intrusive_list_node& prev = *static_cast(first.mpNode->mpPrev); + intrusive_list_node& next = *const_cast(last.mpNode); + + #if EASTL_VALIDATE_INTRUSIVE_LIST + // need to clear out all the next/prev pointers in the elements; + // this makes this operation O(n) instead of O(1), sadly, although + // it's technically amortized O(1) since you could count yourself + // as paying this cost with each insert. + intrusive_list_node* pCur = const_cast(first.mpNode); + + while(pCur != &next) + { + intrusive_list_node* const pCurNext = pCur->mpNext; + pCur->mpPrev = pCur->mpNext = NULL; + pCur = pCurNext; + } + #endif + + prev.mpNext = &next; + next.mpPrev = &prev; + + return iterator(const_cast(last.mpNode)); + } + + + template + inline typename intrusive_list::reverse_iterator + intrusive_list::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + inline typename intrusive_list::reverse_iterator + intrusive_list::erase(const_reverse_iterator first, const_reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + return reverse_iterator(erase((++last).base(), (++first).base())); + } + + + template + void intrusive_list::swap(intrusive_list& x) + { + // swap anchors + intrusive_list_node temp(mAnchor); + mAnchor = x.mAnchor; + x.mAnchor = temp; + + // Fixup node pointers into the anchor, since the addresses of + // the anchors must stay the same with each list. + if(mAnchor.mpNext == &x.mAnchor) + mAnchor.mpNext = mAnchor.mpPrev = &mAnchor; + else + mAnchor.mpNext->mpPrev = mAnchor.mpPrev->mpNext = &mAnchor; + + if(x.mAnchor.mpNext == &mAnchor) + x.mAnchor.mpNext = x.mAnchor.mpPrev = &x.mAnchor; + else + x.mAnchor.mpNext->mpPrev = x.mAnchor.mpPrev->mpNext = &x.mAnchor; + + #if EASTL_VALIDATE_INTRUSIVE_LIST + temp.mpPrev = temp.mpNext = NULL; + #endif + } + + + template + void intrusive_list::splice(const_iterator pos, value_type& value) + { + // Note that splice(pos, x, pos) and splice(pos+1, x, pos) + // are valid and need to be handled correctly. + + if(pos.mpNode != &value) + { + // Unlink item from old list. + intrusive_list_node& oldNext = *value.mpNext; + intrusive_list_node& oldPrev = *value.mpPrev; + oldNext.mpPrev = &oldPrev; + oldPrev.mpNext = &oldNext; + + // Relink item into new list. + intrusive_list_node& newNext = *const_cast(pos.mpNode); + intrusive_list_node& newPrev = *newNext.mpPrev; + + newPrev.mpNext = &value; + newNext.mpPrev = &value; + value.mpPrev = &newPrev; + value.mpNext = &newNext; + } + } + + + template + void intrusive_list::splice(const_iterator pos, intrusive_list& x) + { + // Note: &x == this is prohibited, so self-insertion is not a problem. + if(x.mAnchor.mpNext != &x.mAnchor) // If the list 'x' isn't empty... + { + intrusive_list_node& next = *const_cast(pos.mpNode); + intrusive_list_node& prev = *static_cast(next.mpPrev); + intrusive_list_node& insertPrev = *static_cast(x.mAnchor.mpNext); + intrusive_list_node& insertNext = *static_cast(x.mAnchor.mpPrev); + + prev.mpNext = &insertPrev; + insertPrev.mpPrev = &prev; + insertNext.mpNext = &next; + next.mpPrev = &insertNext; + x.mAnchor.mpPrev = x.mAnchor.mpNext = &x.mAnchor; + } + } + + + template + void intrusive_list::splice(const_iterator pos, intrusive_list& /*x*/, const_iterator i) + { + // Note: &x == this is prohibited, so self-insertion is not a problem. + + // Note that splice(pos, x, pos) and splice(pos + 1, x, pos) + // are valid and need to be handled correctly. + + // We don't need to check if the source list is empty, because + // this function expects a valid iterator from the source list, + // and thus the list cannot be empty in such a situation. + + iterator ii(const_cast(i.mpNode)); // Make a temporary non-const version. + + if(pos != ii) + { + // Unlink item from old list. + intrusive_list_node& oldNext = *ii.mpNode->mpNext; + intrusive_list_node& oldPrev = *ii.mpNode->mpPrev; + oldNext.mpPrev = &oldPrev; + oldPrev.mpNext = &oldNext; + + // Relink item into new list. + intrusive_list_node& newNext = *const_cast(pos.mpNode); + intrusive_list_node& newPrev = *newNext.mpPrev; + + newPrev.mpNext = ii.mpNode; + newNext.mpPrev = ii.mpNode; + ii.mpNode->mpPrev = &newPrev; + ii.mpNode->mpNext = &newNext; + } + } + + + template + void intrusive_list::splice(const_iterator pos, intrusive_list& /*x*/, const_iterator first, const_iterator last) + { + // Note: &x == this is prohibited, so self-insertion is not a problem. + if(first != last) + { + intrusive_list_node& insertPrev = *const_cast(first.mpNode); + intrusive_list_node& insertNext = *static_cast(last.mpNode->mpPrev); + + // remove from old list + insertNext.mpNext->mpPrev = insertPrev.mpPrev; + insertPrev.mpPrev->mpNext = insertNext.mpNext; + + // insert into this list + intrusive_list_node& next = *const_cast(pos.mpNode); + intrusive_list_node& prev = *static_cast(next.mpPrev); + + prev.mpNext = &insertPrev; + insertPrev.mpPrev = &prev; + insertNext.mpNext = &next; + next.mpPrev = &insertNext; + } + } + + + template + inline void intrusive_list::remove(value_type& value) + { + intrusive_list_node& prev = *value.mpPrev; + intrusive_list_node& next = *value.mpNext; + prev.mpNext = &next; + next.mpPrev = &prev; + + #if EASTL_VALIDATE_INTRUSIVE_LIST + value.mpPrev = value.mpNext = NULL; + #endif + } + + + template + void intrusive_list::merge(this_type& x) + { + if(this != &x) + { + iterator first(begin()); + iterator firstX(x.begin()); + const iterator last(end()); + const iterator lastX(x.end()); + + while((first != last) && (firstX != lastX)) + { + if(*firstX < *first) + { + iterator next(firstX); + + splice(first, x, firstX, ++next); + firstX = next; + } + else + ++first; + } + + if(firstX != lastX) + splice(last, x, firstX, lastX); + } + } + + + template + template + void intrusive_list::merge(this_type& x, Compare compare) + { + if(this != &x) + { + iterator first(begin()); + iterator firstX(x.begin()); + const iterator last(end()); + const iterator lastX(x.end()); + + while((first != last) && (firstX != lastX)) + { + if(compare(*firstX, *first)) + { + iterator next(firstX); + + splice(first, x, firstX, ++next); + firstX = next; + } + else + ++first; + } + + if(firstX != lastX) + splice(last, x, firstX, lastX); + } + } + + + template + void intrusive_list::unique() + { + iterator first(begin()); + const iterator last(end()); + + if(first != last) + { + iterator next(first); + + while(++next != last) + { + if(*first == *next) + erase(next); + else + first = next; + next = first; + } + } + } + + + template + template + void intrusive_list::unique(BinaryPredicate predicate) + { + iterator first(begin()); + const iterator last(end()); + + if(first != last) + { + iterator next(first); + + while(++next != last) + { + if(predicate(*first, *next)) + erase(next); + else + first = next; + next = first; + } + } + } + + + template + void intrusive_list::sort() + { + // We implement the algorithm employed by Chris Caulfield whereby we use recursive + // function calls to sort the list. The sorting of a very large list may fail due to stack overflow + // if the stack is exhausted. The limit depends on the platform and the avaialble stack space. + + // Easier-to-understand version of the 'if' statement: + // iterator i(begin()); + // if((i != end()) && (++i != end())) // If the size is >= 2 (without calling the more expensive size() function)... + + // Faster, more inlinable version of the 'if' statement: + if((static_cast(mAnchor.mpNext) != &mAnchor) && + (static_cast(mAnchor.mpNext) != static_cast(mAnchor.mpPrev))) + { + // Split the array into 2 roughly equal halves. + this_type leftList; // This should cause no memory allocation. + this_type rightList; + + // We find an iterator which is in the middle of the list. The fastest way to do + // this is to iterate from the base node both forwards and backwards with two + // iterators and stop when they meet each other. Recall that our size() function + // is not O(1) but is instead O(n), at least when EASTL_LIST_SIZE_CACHE is disabled. + #if EASTL_LIST_SIZE_CACHE + iterator mid(begin()); + eastl::advance(mid, size() / 2); + #else + iterator mid(begin()), tail(end()); + + while((mid != tail) && (++mid != tail)) + --tail; + #endif + + // Move the left half of this into leftList and the right half into rightList. + leftList.splice(leftList.begin(), *this, begin(), mid); + rightList.splice(rightList.begin(), *this); + + // Sort the sub-lists. + leftList.sort(); + rightList.sort(); + + // Merge the two halves into this list. + splice(begin(), leftList); + merge(rightList); + } + } + + + template + template + void intrusive_list::sort(Compare compare) + { + // We implement the algorithm employed by Chris Caulfield whereby we use recursive + // function calls to sort the list. The sorting of a very large list may fail due to stack overflow + // if the stack is exhausted. The limit depends on the platform and the avaialble stack space. + + // Easier-to-understand version of the 'if' statement: + // iterator i(begin()); + // if((i != end()) && (++i != end())) // If the size is >= 2 (without calling the more expensive size() function)... + + // Faster, more inlinable version of the 'if' statement: + if((static_cast(mAnchor.mpNext) != &mAnchor) && + (static_cast(mAnchor.mpNext) != static_cast(mAnchor.mpPrev))) + { + // Split the array into 2 roughly equal halves. + this_type leftList; // This should cause no memory allocation. + this_type rightList; + + // We find an iterator which is in the middle of the list. The fastest way to do + // this is to iterate from the base node both forwards and backwards with two + // iterators and stop when they meet each other. Recall that our size() function + // is not O(1) but is instead O(n), at least when EASTL_LIST_SIZE_CACHE is disabled. + #if EASTL_LIST_SIZE_CACHE + iterator mid(begin()); + eastl::advance(mid, size() / 2); + #else + iterator mid(begin()), tail(end()); + + while((mid != tail) && (++mid != tail)) + --tail; + #endif + + // Move the left half of this into leftList and the right half into rightList. + leftList.splice(leftList.begin(), *this, begin(), mid); + rightList.splice(rightList.begin(), *this); + + // Sort the sub-lists. + leftList.sort(compare); + rightList.sort(compare); + + // Merge the two halves into this list. + splice(begin(), leftList); + merge(rightList, compare); + } + } + + + template + inline int intrusive_list::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + bool operator==(const intrusive_list& a, const intrusive_list& b) + { + // If we store an mSize member for intrusive_list, we want to take advantage of it here. + typename intrusive_list::const_iterator ia = a.begin(); + typename intrusive_list::const_iterator ib = b.begin(); + typename intrusive_list::const_iterator enda = a.end(); + typename intrusive_list::const_iterator endb = b.end(); + + while((ia != enda) && (ib != endb) && (*ia == *ib)) + { + ++ia; + ++ib; + } + return (ia == enda) && (ib == endb); + } + + template + bool operator!=(const intrusive_list& a, const intrusive_list& b) + { + return !(a == b); + } + + template + bool operator<(const intrusive_list& a, const intrusive_list& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + bool operator>(const intrusive_list& a, const intrusive_list& b) + { + return b < a; + } + + template + bool operator<=(const intrusive_list& a, const intrusive_list& b) + { + return !(b < a); + } + + template + bool operator>=(const intrusive_list& a, const intrusive_list& b) + { + return !(a < b); + } + + template + void swap(intrusive_list& a, intrusive_list& b) + { + a.swap(b); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + diff --git a/include/EASTL/intrusive_ptr.h b/include/EASTL/intrusive_ptr.h new file mode 100644 index 0000000..af4e686 --- /dev/null +++ b/include/EASTL/intrusive_ptr.h @@ -0,0 +1,426 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_INTRUSIVE_PTR_H +#define EASTL_INTRUSIVE_PTR_H + + +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + // We provide default implementations of AddRef and Release in the eastl namespace. + // The user can override these on a per-class basis by defining their own specialized + // intrusive_ptr_add_ref and intrusive_ptr_release functions. User-defined specializations + // do not need to exist in the eastl namespace, but should preferably be in the namespace + // of the templated class T. + template + void intrusive_ptr_add_ref(T* p) + { + p->AddRef(); + } + + template + void intrusive_ptr_release(T* p) + { + p->Release(); + } + + + ////////////////////////////////////////////////////////////////////////////// + /// intrusive_ptr + /// + /// This is a class that acts like the C++ auto_ptr class except that instead + /// of deleting its member data when it goes out of scope, it releases its + /// member data when it goes out of scope. This class thus requires that the + /// templated data type have an AddRef and Release function (or whatever is + /// configured to be the two refcount functions). + /// + /// This class is useful for automatically releasing an object when this + /// class goes out of scope. See below for some usage. + /// You should be careful about putting instances of this class as members of + /// another class. If you do so, then the intrusive_ptr destructor will only + /// be called if the object that owns it is destructed. This creates a potential + /// chicken-and-egg situation. What if the intrusive_ptr member contains a + /// pointer to an object that has a reference on the object that owns the + /// intrusive_ptr member? The answer is that the neither object can ever be + /// destructed. The solution is to: + /// 1) Be very careful about what objects you put into member intrusive_ptr objects. + /// 2) Clear out your intrusive_ptr members in your shutdown function. + /// 3) Simply don't use intrusive_ptr objects as class members. + /// + /// Example usage: + /// intrusive_ptr pWidget = new Widget; + /// pWidget = new Widget; + /// pWidget->Reset(); + /// + template + class intrusive_ptr + { + protected: + // Friend declarations. + template friend class intrusive_ptr; + typedef intrusive_ptr this_type; + + T* mpObject; + + public: + /// element_type + /// This typedef is present for consistency with the C++ standard library + /// auto_ptr template. It allows users to refer to the templated type via + /// a typedef. This is sometimes useful to be able to do. + /// + /// Example usage: + /// intrusive_ptr ip; + /// void DoSomething(intrusive_ptr::element_type someType); + /// + typedef T element_type; + + /// intrusive_ptr + /// Default constructor. The member object is set to NULL. + intrusive_ptr() + : mpObject(NULL) + { + // Empty + } + + /// intrusive_ptr + /// Provides a constructor which takes ownership of a pointer. + /// The incoming pointer is AddRefd. + /// + /// Example usage: + /// intrusive_ptr pWidget(new Widget); + intrusive_ptr(T* p, bool bAddRef = true) + : mpObject(p) + { + if(mpObject && bAddRef) + intrusive_ptr_add_ref(mpObject); // Intentionally do not prefix the call with eastl:: but instead allow namespace lookup to resolve the namespace. + } + + /// intrusive_ptr + /// Construction from self type. + intrusive_ptr(const intrusive_ptr& ip) + : mpObject(ip.mpObject) + { + if(mpObject) + intrusive_ptr_add_ref(mpObject); + } + + + /// intrusive_ptr + /// move constructor + intrusive_ptr(intrusive_ptr&& ip) + : mpObject(nullptr) + { + swap(ip); + } + + /// intrusive_ptr + /// Provides a constructor which copies a pointer from another intrusive_ptr. + /// The incoming pointer is AddRefd. The source intrusive_ptr object maintains + /// its AddRef on the pointer. + /// + /// Example usage: + /// intrusive_ptr pWidget1; + /// intrusive_ptr pWidget2(pWidget1); + template + intrusive_ptr(const intrusive_ptr& ip) + : mpObject(ip.mpObject) + { + if(mpObject) + intrusive_ptr_add_ref(mpObject); + } + + /// intrusive_ptr + /// Releases the owned pointer. + ~intrusive_ptr() + { + if(mpObject) + intrusive_ptr_release(mpObject); + } + + + /// operator= + /// Assignment to self type. + intrusive_ptr& operator=(const intrusive_ptr& ip) + { + return operator=(ip.mpObject); + } + + + /// operator= + /// Move assignment operator + intrusive_ptr& operator=(intrusive_ptr&& ip) + { + swap(ip); + return *this; + } + + + /// operator = + /// Assigns an intrusive_ptr object to this intrusive_ptr object. + /// The incoming pointer is AddRefd. The source intrusive_ptr object + /// maintains its AddRef on the pointer. If there is an existing member + /// pointer, it is Released before the incoming pointer is assigned. + /// If the incoming pointer is equal to the existing pointer, no + /// action is taken. The incoming pointer is AddRefd before any + /// member pointer is Released. + template + intrusive_ptr& operator=(const intrusive_ptr& ip) + { + return operator=(ip.mpObject); + } + + /// operator= + /// Assigns an intrusive_ptr object to this intrusive_ptr object. + /// The incoming pointer is AddRefd. If there is an existing member + /// pointer, it is Released before the incoming pointer is assigned. + /// If the incoming pointer is equal to the existing pointer, no + /// action is taken. The incoming pointer is AddRefd before any + /// member pointer is Released. + intrusive_ptr& operator=(T* pObject) + { + if(pObject != mpObject) + { + T* const pTemp = mpObject; // Create temporary to prevent possible problems with re-entrancy. + if(pObject) + intrusive_ptr_add_ref(pObject); + mpObject = pObject; + if(pTemp) + intrusive_ptr_release(pTemp); + } + return *this; + } + + /// operator * + /// Returns a reference to the contained object. + T& operator *() const + { + return *mpObject; + } + + /// operator * + /// Returns a pointer to the contained object, allowing the + /// user to use this container as if it were contained pointer itself. + T* operator ->() const + { + return mpObject; + } + + /// get() + /// Returns a pointer to the contained object. + T* get() const + { + return mpObject; + } + + /// reset + /// Releases the owned object and clears our reference to it. + void reset() + { + T* const pTemp = mpObject; + mpObject = NULL; + if(pTemp) + intrusive_ptr_release(pTemp); + } + + /// swap + /// Exchanges the owned pointer beween two intrusive_ptr objects. + void swap(this_type& ip) + { + T* const pTemp = mpObject; + mpObject = ip.mpObject; + ip.mpObject = pTemp; + } + + /// attach + /// Sets an intrusive_ptr pointer without calling AddRef() on + /// the pointed object. The intrusive_ptr thus eventually only does a + /// Release() on the object. This is useful for assuming a reference + /// that someone else has handed you and making sure it is always + /// released, even if you return in the middle of a function or an + /// exception is thrown. + /// + void attach(T* pObject) + { + T* const pTemp = mpObject; + mpObject = pObject; + if(pTemp) + intrusive_ptr_release(pTemp); + } + + /// detach + /// Surrenders the reference held by an intrusive_ptr pointer -- + /// it returns the current reference and nulls the pointer. If the returned + /// pointer is non-null it must be released. This is useful in functions + /// that must return a reference while possibly being aborted by a return + /// or thrown exception: + /// + /// bool GetFoo(T** pp){ + /// intrusive_ptr p(PrivateGetFoo()); + /// if(p->Method()) + /// return false; + /// *pp = p.detach(); + /// return true; + /// } + T* detach() + { + T* const pTemp = mpObject; + mpObject = NULL; + return pTemp; + } + + /// Implicit operator bool + /// Allows for using a intrusive_ptr as a boolean. + /// Example usage: + /// intrusive_ptr ptr = new Widget; + /// if(ptr) + /// ++*ptr; + /// + /// Note that below we do not use operator bool(). The reason for this + /// is that booleans automatically convert up to short, int, float, etc. + /// The result is that this: if(intrusivePtr == 1) would yield true (bad). + typedef T* (this_type::*bool_)() const; + operator bool_() const + { + if(mpObject) + return &this_type::get; + return NULL; + } + + /// operator! + /// This returns the opposite of operator bool; it returns true if + /// the owned pointer is null. Some compilers require this and some don't. + /// intrusive_ptr ptr = new Widget; + /// if(!ptr) + /// assert(false); + bool operator!() const + { + return (mpObject == NULL); + } + + }; // class intrusive_ptr + + + /// get_pointer + /// returns intrusive_ptr::get() via the input intrusive_ptr. + template + inline T* get_pointer(const intrusive_ptr& intrusivePtr) + { + return intrusivePtr.get(); + } + + /// swap + /// Exchanges the owned pointer beween two intrusive_ptr objects. + /// This non-member version is useful for compatibility of intrusive_ptr + /// objects with the C++ Standard Library and other libraries. + template + inline void swap(intrusive_ptr& intrusivePtr1, intrusive_ptr& intrusivePtr2) + { + intrusivePtr1.swap(intrusivePtr2); + } + + + template + bool operator==(intrusive_ptr const& iPtr1, intrusive_ptr const& iPtr2) + { + return (iPtr1.get() == iPtr2.get()); + } + + template + bool operator!=(intrusive_ptr const& iPtr1, intrusive_ptr const& iPtr2) + { + return (iPtr1.get() != iPtr2.get()); + } + + template + bool operator==(intrusive_ptr const& iPtr1, T* p) + { + return (iPtr1.get() == p); + } + + template + bool operator!=(intrusive_ptr const& iPtr1, T* p) + { + return (iPtr1.get() != p); + } + + template + bool operator==(T* p, intrusive_ptr const& iPtr2) + { + return (p == iPtr2.get()); + } + + template + bool operator!=(T* p, intrusive_ptr const& iPtr2) + { + return (p != iPtr2.get()); + } + + template + bool operator<(intrusive_ptr const& iPtr1, intrusive_ptr const& iPtr2) + { + return ((uintptr_t)iPtr1.get() < (uintptr_t)iPtr2.get()); + } + + + /// static_pointer_cast + /// Returns an intrusive_ptr static-casted from a intrusive_ptr. + template + intrusive_ptr static_pointer_cast(const intrusive_ptr& intrusivePtr) + { + return static_cast(intrusivePtr.get()); + } + + + #if EASTL_RTTI_ENABLED + + /// dynamic_pointer_cast + /// Returns an intrusive_ptr dynamic-casted from a intrusive_ptr. + template + intrusive_ptr dynamic_pointer_cast(const intrusive_ptr& intrusivePtr) + { + return dynamic_cast(intrusivePtr.get()); + } + + #endif + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/iterator.h b/include/EASTL/iterator.h new file mode 100644 index 0000000..e12cd01 --- /dev/null +++ b/include/EASTL/iterator.h @@ -0,0 +1,1205 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_ITERATOR_H +#define EASTL_ITERATOR_H + + +#include +#include +#include + +#ifdef _MSC_VER + #pragma warning(push, 0) +#endif + +#include + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +// If the user has specified that we use std iterator +// categories instead of EASTL iterator categories, +// then #include . +#if EASTL_STD_ITERATOR_CATEGORY_ENABLED + #ifdef _MSC_VER + #pragma warning(push, 0) + #endif + #include + #ifdef _MSC_VER + #pragma warning(pop) + #endif +#endif + + +#ifdef _MSC_VER + #pragma warning(push) // VC++ generates a bogus warning that you cannot code away. + #pragma warning(disable: 4619) // There is no warning number 'number'. + #pragma warning(disable: 4217) // Member template functions cannot be used for copy-assignment or copy-construction. +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + /// iterator_status_flag + /// + /// Defines the validity status of an iterator. This is primarily used for + /// iterator validation in debug builds. These are implemented as OR-able + /// flags (as opposed to mutually exclusive values) in order to deal with + /// the nature of iterator status. In particular, an iterator may be valid + /// but not dereferencable, as in the case with an iterator to container end(). + /// An iterator may be valid but also dereferencable, as in the case with an + /// iterator to container begin(). + /// + enum iterator_status_flag + { + isf_none = 0x00, /// This is called none and not called invalid because it is not strictly the opposite of invalid. + isf_valid = 0x01, /// The iterator is valid, which means it is in the range of [begin, end]. + isf_current = 0x02, /// The iterator is valid and points to the same element it did when created. For example, if an iterator points to vector::begin() but an element is inserted at the front, the iterator is valid but not current. Modification of elements in place do not make iterators non-current. + isf_can_dereference = 0x04 /// The iterator is dereferencable, which means it is in the range of [begin, end). It may or may not be current. + }; + + + + // The following declarations are taken directly from the C++ standard document. + // input_iterator_tag, etc. + // iterator + // iterator_traits + // reverse_iterator + + // Iterator categories + // Every iterator is defined as belonging to one of the iterator categories that + // we define here. These categories come directly from the C++ standard. + #if !EASTL_STD_ITERATOR_CATEGORY_ENABLED // If we are to use our own iterator category definitions... + struct input_iterator_tag { }; + struct output_iterator_tag { }; + struct forward_iterator_tag : public input_iterator_tag { }; + struct bidirectional_iterator_tag : public forward_iterator_tag { }; + struct random_access_iterator_tag : public bidirectional_iterator_tag { }; + struct contiguous_iterator_tag : public random_access_iterator_tag { }; // Extension to the C++ standard. Contiguous ranges are more than random access, they are physically contiguous. + #endif + + + // struct iterator + template + struct iterator + { + typedef Category iterator_category; + typedef T value_type; + typedef Distance difference_type; + typedef Pointer pointer; + typedef Reference reference; + }; + + + // struct iterator_traits + template + struct iterator_traits + { + typedef typename Iterator::iterator_category iterator_category; + typedef typename Iterator::value_type value_type; + typedef typename Iterator::difference_type difference_type; + typedef typename Iterator::pointer pointer; + typedef typename Iterator::reference reference; + }; + + template + struct iterator_traits + { + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; // To consider: Change this to contiguous_iterator_tag for the case that + typedef T value_type; // EASTL_ITC_NS is "eastl" instead of "std". + typedef ptrdiff_t difference_type; + typedef T* pointer; + typedef T& reference; + }; + + template + struct iterator_traits + { + typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; + typedef T value_type; + typedef ptrdiff_t difference_type; + typedef const T* pointer; + typedef const T& reference; + }; + + + + + /// is_iterator_wrapper + /// + /// Tells if an Iterator type is a wrapper type as opposed to a regular type. + /// Relies on the class declaring a typedef called wrapped_iterator_type. + /// + /// Examples of wrapping iterators: + /// reverse_iterator + /// generic_iterator + /// move_iterator + /// Examples of non-wrapping iterators: + /// iterator + /// list::iterator + /// char* + /// + /// Example behavior: + /// is_iterator_wrapper(int*)::value => false + /// is_iterator_wrapper(eastl::array*)::value => false + /// is_iterator_wrapper(eastl::vector::iterator)::value => false + /// is_iterator_wrapper(eastl::generic_iterator)::value => true + /// is_iterator_wrapper(eastl::move_iterator::iterator>)::value => true + /// + template + class is_iterator_wrapper + { + template + static eastl::no_type test(...); + + template + static eastl::yes_type test(typename U::wrapped_iterator_type*, typename eastl::enable_if::value>::type* = 0); + + public: + EA_DISABLE_VC_WARNING(6334) + static const bool value = (sizeof(test(NULL)) == sizeof(eastl::yes_type)); + EA_RESTORE_VC_WARNING() + }; + + + /// unwrap_iterator + /// + /// Takes a wrapper Iterator (e.g. move_iterator, reverse_iterator, generic_iterator) instance + /// and returns the wrapped iterator type. If Iterator is not a wrapper (including being a pointer), + /// or is not an iterator, then this function returns it as-is. + /// unwrap_iterator unwraps only a single layer of iterator at a time. You need to call it twice, + /// for example, to unwrap two layers of iterators. + /// + /// Example usage: + /// int* pInt = unwrap_iterator(&pIntArray[15]); + /// int* pInt = unwrap_iterator(generic_iterator(&pIntArray[15])); + /// MyVector::iterator it = unwrap_iterator(myVector.begin()); + /// MyVector::iterator it = unwrap_iterator(move_iterator(myVector.begin())); + /// + template + struct is_iterator_wrapper_helper + { + typedef Iterator iterator_type; + + static iterator_type get_base(Iterator it) + { return it; } + }; + + + template + struct is_iterator_wrapper_helper + { + typedef typename Iterator::iterator_type iterator_type; + + static iterator_type get_base(Iterator it) + { return it.base(); } + }; + + template + inline typename is_iterator_wrapper_helper::value>::iterator_type unwrap_iterator(Iterator it) + { return eastl::is_iterator_wrapper_helper::value>::get_base(it); } + + + + /// reverse_iterator + /// + /// From the C++ standard: + /// Bidirectional and random access iterators have corresponding reverse + /// iterator adaptors that iterate through the data structure in the + /// opposite direction. They have the same signatures as the corresponding + /// iterators. The fundamental relation between a reverse iterator and its + /// corresponding iterator i is established by the identity: + /// &*(reverse_iterator(i)) == &*(i - 1). + /// This mapping is dictated by the fact that while there is always a pointer + /// past the end of an array, there might not be a valid pointer before the + /// beginning of an array. + /// + template + class reverse_iterator : public iterator::iterator_category, + typename eastl::iterator_traits::value_type, + typename eastl::iterator_traits::difference_type, + typename eastl::iterator_traits::pointer, + typename eastl::iterator_traits::reference> + { + public: + typedef Iterator iterator_type; + typedef iterator_type wrapped_iterator_type; // This is not in the C++ Standard; it's used by use to identify it as a wrapping iterator type. + typedef typename eastl::iterator_traits::pointer pointer; + typedef typename eastl::iterator_traits::reference reference; + typedef typename eastl::iterator_traits::difference_type difference_type; + + protected: + Iterator mIterator; + + public: + EA_CPP14_CONSTEXPR reverse_iterator() // It's important that we construct mIterator, because if Iterator + : mIterator() { } // is a pointer, there's a difference between doing it and not. + + EA_CPP14_CONSTEXPR explicit reverse_iterator(iterator_type i) + : mIterator(i) { } + + EA_CPP14_CONSTEXPR reverse_iterator(const reverse_iterator& ri) + : mIterator(ri.mIterator) { } + + template + EA_CPP14_CONSTEXPR reverse_iterator(const reverse_iterator& ri) + : mIterator(ri.base()) { } + + // This operator= isn't in the standard, but the the C++ + // library working group has tentatively approved it, as it + // allows const and non-const reverse_iterators to interoperate. + template + EA_CPP14_CONSTEXPR reverse_iterator& operator=(const reverse_iterator& ri) + { mIterator = ri.base(); return *this; } + + EA_CPP14_CONSTEXPR iterator_type base() const + { return mIterator; } + + EA_CPP14_CONSTEXPR reference operator*() const + { + iterator_type i(mIterator); + return *--i; + } + + EA_CPP14_CONSTEXPR pointer operator->() const + { return &(operator*()); } + + EA_CPP14_CONSTEXPR reverse_iterator& operator++() + { --mIterator; return *this; } + + EA_CPP14_CONSTEXPR reverse_iterator operator++(int) + { + reverse_iterator ri(*this); + --mIterator; + return ri; + } + + EA_CPP14_CONSTEXPR reverse_iterator& operator--() + { ++mIterator; return *this; } + + EA_CPP14_CONSTEXPR reverse_iterator operator--(int) + { + reverse_iterator ri(*this); + ++mIterator; + return ri; + } + + EA_CPP14_CONSTEXPR reverse_iterator operator+(difference_type n) const + { return reverse_iterator(mIterator - n); } + + EA_CPP14_CONSTEXPR reverse_iterator& operator+=(difference_type n) + { mIterator -= n; return *this; } + + EA_CPP14_CONSTEXPR reverse_iterator operator-(difference_type n) const + { return reverse_iterator(mIterator + n); } + + EA_CPP14_CONSTEXPR reverse_iterator& operator-=(difference_type n) + { mIterator += n; return *this; } + + // http://cplusplus.github.io/LWG/lwg-defects.html#386, + // http://llvm.org/bugs/show_bug.cgi?id=17883 + // random_access_iterator operator[] is merely required to return something convertible to reference. + // reverse_iterator operator[] can't necessarily know what to return as the underlying iterator + // operator[] may return something other than reference. + EA_CPP14_CONSTEXPR reference operator[](difference_type n) const + { return mIterator[-n - 1]; } + }; + + + // The C++ library working group has tentatively approved the usage of two + // template parameters (Iterator1 and Iterator2) in order to allow reverse_iterators + // and const_reverse iterators to be comparable. This is a similar issue to the + // C++ defect report #179 regarding comparison of container iterators and const_iterators. + // + // libstdc++ reports that std::relops breaks the usage of two iterator types and if we + // want to support relops then we need to also make versions of each of below with + // a single template parameter to placate std::relops. But relops is hardly used due to + // the troubles it causes and so we are avoiding support here until somebody complains about it. + template + EA_CPP14_CONSTEXPR inline bool + operator==(const reverse_iterator& a, const reverse_iterator& b) + { return a.base() == b.base(); } + + + template + EA_CPP14_CONSTEXPR inline bool + operator<(const reverse_iterator& a, const reverse_iterator& b) + { return a.base() > b.base(); } + + + template + EA_CPP14_CONSTEXPR inline bool + operator!=(const reverse_iterator& a, const reverse_iterator& b) + { return a.base() != b.base(); } + + + template + EA_CPP14_CONSTEXPR inline bool + operator>(const reverse_iterator& a, const reverse_iterator& b) + { return a.base() < b.base(); } + + + template + EA_CPP14_CONSTEXPR inline bool + operator<=(const reverse_iterator& a, const reverse_iterator& b) + { return a.base() >= b.base(); } + + + template + EA_CPP14_CONSTEXPR inline bool + operator>=(const reverse_iterator& a, const reverse_iterator& b) + { return a.base() <= b.base(); } + + + template + EA_CPP14_CONSTEXPR inline typename reverse_iterator::difference_type + operator-(const reverse_iterator& a, const reverse_iterator& b) + { return b.base() - a.base(); } + + + template + EA_CPP14_CONSTEXPR inline reverse_iterator + operator+(typename reverse_iterator::difference_type n, const reverse_iterator& a) + { return reverse_iterator(a.base() - n); } + + + /// is_reverse_iterator + /// + /// This is a type traits extension utility. + /// Given an iterator, tells if it's a reverse_iterator vs anything else. + /// If it's a reverse iterator wrapped by another iterator then value is false. + /// To consider: Detect that if it's a move_iterator and unwrap + /// move_iterator so we can detect that underneath it's reverse_iterator. + /// + template + struct is_reverse_iterator + : public eastl::false_type {}; + + template + struct is_reverse_iterator< eastl::reverse_iterator > + : public eastl::true_type {}; + + + + /// unwrap_reverse_iterator + /// + /// Returns Iterator::get_base() if it's a reverse_iterator, else returns Iterator as-is. + /// + /// Example usage: + /// vector intVector; + /// eastl::reverse_iterator::iterator> reverseIterator(intVector.begin()); + /// vector::iterator it = unwrap_reverse_iterator(reverseIterator); + /// + /// Disabled until there is considered a good use for it. + /// template + /// inline typename eastl::is_iterator_wrapper_helper::value>::iterator_type unwrap_reverse_iterator(Iterator it) + /// { return eastl::is_iterator_wrapper_helper::value>::get_base(it); } + + + + /// move_iterator + /// + /// From the C++11 Standard, section 24.5.3.1: + /// Class template move_iterator is an iterator adaptor with the same behavior as the underlying iterator + /// except that its dereference operator implicitly converts the value returned by the underlying iterator's + /// dereference operator to an rvalue reference. Some generic algorithms can be called with move iterators to + /// replace copying with moving. + + template + class move_iterator // Don't inherit from iterator. + { + public: + typedef Iterator iterator_type; + typedef iterator_type wrapped_iterator_type; // This is not in the C++ Standard; it's used by use to identify it as a wrapping iterator type. + typedef iterator_traits traits_type; + typedef typename traits_type::iterator_category iterator_category; + typedef typename traits_type::value_type value_type; + typedef typename traits_type::difference_type difference_type; + typedef Iterator pointer; + typedef value_type&& reference; + + protected: + iterator_type mIterator; + + public: + move_iterator() + : mIterator() + { + } + + explicit move_iterator(iterator_type mi) + : mIterator(mi) { } + + template + move_iterator(const move_iterator& mi) + : mIterator(mi.base()) + { + } + + iterator_type base() const + { return mIterator; } + + reference operator*() const + { return eastl::move(*mIterator); } + + pointer operator->() const + { return mIterator; } + + move_iterator& operator++() + { + ++mIterator; + return *this; + } + + move_iterator operator++(int) + { + move_iterator tempMoveIterator = *this; + ++mIterator; + return tempMoveIterator; + } + + move_iterator& operator--() + { + --mIterator; + return *this; + } + + move_iterator operator--(int) + { + move_iterator tempMoveIterator = *this; + --mIterator; + return tempMoveIterator; + } + + move_iterator operator+(difference_type n) const + { return move_iterator(mIterator + n); } + + move_iterator& operator+=(difference_type n) + { + mIterator += n; + return *this; + } + + move_iterator operator-(difference_type n) const + { return move_iterator(mIterator - n); } + + move_iterator& operator-=(difference_type n) + { + mIterator -= n; + return *this; + } + + reference operator[](difference_type n) const + { return eastl::move(mIterator[n]); } + }; + + template + inline bool + operator==(const move_iterator& a, const move_iterator& b) + { return a.base() == b.base(); } + + + template + inline bool + operator!=(const move_iterator& a, const move_iterator& b) + { return !(a == b); } + + + template + inline bool + operator<(const move_iterator& a, const move_iterator& b) + { return a.base() < b.base(); } + + + template + inline bool + operator<=(const move_iterator& a, const move_iterator& b) + { return !(b < a); } + + + template + inline bool + operator>(const move_iterator& a, const move_iterator& b) + { return b < a; } + + + template + inline bool + operator>=(const move_iterator& a, const move_iterator& b) + { return !(a < b); } + + + template + inline auto + operator-(const move_iterator& a, const move_iterator& b) -> decltype(a.base() - b.base()) + { return a.base() - b.base(); } + + + template + inline move_iterator + operator+(typename move_iterator::difference_type n, const move_iterator& a) + { return a + n; } + + + template + inline move_iterator make_move_iterator(Iterator i) + { return move_iterator(i); } + + + // make_move_if_noexcept_iterator returns move_iterator if the Iterator is of a noexcept type; + // otherwise returns Iterator as-is. The point of this is to be able to avoid moves that can generate exceptions and instead + // fall back to copies or whatever the default IteratorType::operator* returns for use by copy/move algorithms. + // To consider: merge the conditional expression usage here with the one used by move_if_noexcept, as they are the same condition. + #if EASTL_EXCEPTIONS_ENABLED + template ::value_type>::value || + !eastl::is_copy_constructible::value_type>::value, + eastl::move_iterator, Iterator>::type> + inline IteratorType make_move_if_noexcept_iterator(Iterator i) + { return IteratorType(i); } + #else + // Else there are no exceptions and thus we always return a move_iterator. + template + inline eastl::move_iterator make_move_if_noexcept_iterator(Iterator i) + { return eastl::move_iterator(i); } + #endif + + + + /// is_move_iterator + /// + /// This is a type traits extension utility. + /// Given an iterator, tells if it's a move iterator vs anything else. + /// Example usage (though somewhat useless): + /// template + /// bool IsMoveIterator() { return typename eastl::is_move_iterator::value; } + /// + template + struct is_move_iterator + : public eastl::false_type {}; + + template + struct is_move_iterator< eastl::move_iterator > + : public eastl::true_type {}; + + + /// unwrap_move_iterator + /// + /// Returns Iterator::get_base() if it's a move_iterator, else returns Iterator as-is. + /// + /// Example usage: + /// vector intVector; + /// eastl::move_iterator::iterator> moveIterator(intVector.begin()); + /// vector::iterator it = unwrap_move_iterator(moveIterator); + /// + template + inline typename eastl::is_iterator_wrapper_helper::value>::iterator_type unwrap_move_iterator(Iterator it) + { return eastl::is_iterator_wrapper_helper::value>::get_base(it); } + + + + + /// back_insert_iterator + /// + /// A back_insert_iterator is simply a class that acts like an iterator but when you + /// assign a value to it, it calls push_back on the container with the value. + /// + template + class back_insert_iterator : public iterator + { + public: + typedef back_insert_iterator this_type; + typedef Container container_type; + typedef typename Container::const_reference const_reference; + + protected: + Container& container; + + public: + //back_insert_iterator(); // Not valid. Must construct with a Container. + + //back_insert_iterator(const this_type& x) // Compiler-implemented + // : container(x.container) { } + + explicit back_insert_iterator(Container& x) + : container(x) { } + + back_insert_iterator& operator=(const_reference value) + { container.push_back(value); return *this; } + + back_insert_iterator& operator=(typename Container::value_type&& value) + { container.push_back(eastl::move(value)); return *this; } + + back_insert_iterator& operator*() + { return *this; } + + back_insert_iterator& operator++() + { return *this; } // This is by design. + + back_insert_iterator operator++(int) + { return *this; } // This is by design. + + protected: + void operator=(const this_type&){} // Declared to avoid compiler warnings about inability to generate this function. + }; + + + /// back_inserter + /// + /// Creates an instance of a back_insert_iterator. + /// + template + inline back_insert_iterator + back_inserter(Container& x) + { return back_insert_iterator(x); } + + + + + /// front_insert_iterator + /// + /// A front_insert_iterator is simply a class that acts like an iterator but when you + /// assign a value to it, it calls push_front on the container with the value. + /// + template + class front_insert_iterator : public iterator + { + public: + typedef front_insert_iterator this_type; + typedef Container container_type; + typedef typename Container::const_reference const_reference; + + protected: + Container& container; + + public: + //front_insert_iterator(); // Not valid. Must construct with a Container. + + //front_insert_iterator(const this_type& x) // Compiler-implemented + // : container(x.container) { } + + explicit front_insert_iterator(Container& x) + : container(x) { } + + front_insert_iterator& operator=(const_reference value) + { container.push_front(value); return *this; } + + front_insert_iterator& operator*() + { return *this; } + + front_insert_iterator& operator++() + { return *this; } // This is by design. + + front_insert_iterator operator++(int) + { return *this; } // This is by design. + + protected: + void operator=(const this_type&){} // Declared to avoid compiler warnings about inability to generate this function. + }; + + + /// front_inserter + /// + /// Creates an instance of a front_insert_iterator. + /// + template + inline front_insert_iterator + front_inserter(Container& x) + { return front_insert_iterator(x); } + + + + + /// insert_iterator + /// + /// An insert_iterator is like an iterator except that when you assign a value to it, + /// the insert_iterator inserts the value into the container and increments the iterator. + /// + /// insert_iterator is an iterator adaptor that functions as an OutputIterator: + /// assignment through an insert_iterator inserts an object into a container. + /// Specifically, if ii is an insert_iterator, then ii keeps track of a container c and + /// an insertion point p; the expression *ii = x performs the insertion container.insert(p, x). + /// + /// If you assign through an insert_iterator several times, then you will be inserting + /// several elements into the underlying container. In the case of a sequence, they will + /// appear at a particular location in the underlying sequence, in the order in which + /// they were inserted: one of the arguments to insert_iterator's constructor is an + /// iterator p, and the new range will be inserted immediately before p. + /// + template + class insert_iterator : public iterator + { + public: + typedef Container container_type; + typedef typename Container::iterator iterator_type; + typedef typename Container::const_reference const_reference; + + protected: + Container& container; + iterator_type it; + + public: + // This assignment operator is defined more to stop compiler warnings (e.g. VC++ C4512) + // than to be useful. However, it does allow an insert_iterator to be assigned to another + // insert iterator provided that they point to the same container. + insert_iterator& operator=(const insert_iterator& x) + { + EASTL_ASSERT(&x.container == &container); + it = x.it; + return *this; + } + + insert_iterator(Container& x, iterator_type itNew) + : container(x), it(itNew) {} + + insert_iterator& operator=(const_reference value) + { + it = container.insert(it, value); + ++it; + return *this; + } + + insert_iterator& operator*() + { return *this; } + + insert_iterator& operator++() + { return *this; } // This is by design. + + insert_iterator& operator++(int) + { return *this; } // This is by design. + + }; // insert_iterator + + + /// inserter + /// + /// Creates an instance of an insert_iterator. + /// + template + inline eastl::insert_iterator + inserter(Container& x, Iterator i) + { + typedef typename Container::iterator iterator; + return eastl::insert_iterator(x, iterator(i)); + } + + + /// is_insert_iterator + /// + /// This is a type traits extension utility. + /// Given an iterator, tells if it's an insert_iterator vs anything else. + /// If it's a insert_iterator wrapped by another iterator then value is false. + /// + template + struct is_insert_iterator + : public eastl::false_type {}; + + template + struct is_insert_iterator< eastl::insert_iterator > + : public eastl::true_type {}; + + + + + ////////////////////////////////////////////////////////////////////////////////// + /// distance + /// + /// Implements the distance() function. There are two versions, one for + /// random access iterators (e.g. with vector) and one for regular input + /// iterators (e.g. with list). The former is more efficient. + /// + template + inline typename eastl::iterator_traits::difference_type + distance_impl(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag) + { + typename eastl::iterator_traits::difference_type n = 0; + + while(first != last) + { + ++first; + ++n; + } + return n; + } + + template + inline typename eastl::iterator_traits::difference_type + distance_impl(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag) + { + return last - first; + } + + // Special version defined so that std C++ iterators can be recognized by + // this function. Unfortunately, this function treats all foreign iterators + // as InputIterators and thus can seriously hamper performance in the case + // of large ranges of bidirectional_iterator_tag iterators. + //template + //inline typename eastl::iterator_traits::difference_type + //distance_impl(InputIterator first, InputIterator last, ...) + //{ + // typename eastl::iterator_traits::difference_type n = 0; + // + // while(first != last) + // { + // ++first; + // ++n; + // } + // return n; + //} + + template + inline typename eastl::iterator_traits::difference_type + distance(InputIterator first, InputIterator last) + { + typedef typename eastl::iterator_traits::iterator_category IC; + + return eastl::distance_impl(first, last, IC()); + } + + + + + ////////////////////////////////////////////////////////////////////////////////// + /// advance + /// + /// Implements the advance() function. There are three versions, one for + /// random access iterators (e.g. with vector), one for bidirectional + /// iterators (list) and one for regular input iterators (e.g. with slist). + /// + template + inline void + advance_impl(InputIterator& i, Distance n, EASTL_ITC_NS::input_iterator_tag) + { + while(n--) + ++i; + } + + template + struct advance_bi_impl + { + template + static void advance_impl(BidirectionalIterator& i, Distance n) // Specialization for unsigned distance type. + { + while(n--) + ++i; + } + }; + + template <> + struct advance_bi_impl + { + template + static void advance_impl(BidirectionalIterator& i, Distance n) // Specialization for signed distance type. + { + if(n > 0) + { + while(n--) + ++i; + } + else + { + while(n++) + --i; + } + } + }; + + template + inline void + advance_impl(BidirectionalIterator& i, Distance n, EASTL_ITC_NS::bidirectional_iterator_tag) + { + advance_bi_impl::value>::advance_impl(i, n); + } + + template + inline void + advance_impl(RandomAccessIterator& i, Distance n, EASTL_ITC_NS::random_access_iterator_tag) + { + i += n; + } + + // Special version defined so that std C++ iterators can be recognized by + // this function. Unfortunately, this function treats all foreign iterators + // as InputIterators and thus can seriously hamper performance in the case + // of large ranges of bidirectional_iterator_tag iterators. + //template + //inline void + //advance_impl(InputIterator& i, Distance n, ...) + //{ + // while(n--) + // ++i; + //} + + template + inline void + advance(InputIterator& i, Distance n) + { + typedef typename eastl::iterator_traits::iterator_category IC; + + eastl::advance_impl(i, n, IC()); + } + + + // eastl::next / eastl::prev + // Return the nth/-nth successor of iterator it. + // + // http://en.cppreference.com/w/cpp/iterator/next + // + template + inline InputIterator + next(InputIterator it, typename eastl::iterator_traits::difference_type n = 1) + { + eastl::advance(it, n); + return it; + } + + template + inline InputIterator + prev(InputIterator it, typename eastl::iterator_traits::difference_type n = 1) + { + eastl::advance(it, -n); + return it; + } + + +#if defined(EA_COMPILER_CPP11_ENABLED) && EA_COMPILER_CPP11_ENABLED + + // eastl::data + // + // http://en.cppreference.com/w/cpp/iterator/data + // + template + EA_CPP14_CONSTEXPR auto data(Container& c) -> decltype(c.data()) + { return c.data(); } + + template + EA_CPP14_CONSTEXPR auto data(const Container& c) -> decltype(c.data()) + { return c.data(); } + + template + EA_CPP14_CONSTEXPR T* data(T(&array)[N]) EA_NOEXCEPT + { return array; } + + template + EA_CPP14_CONSTEXPR const E* data(std::initializer_list il) EA_NOEXCEPT + { return il.begin(); } + + + // eastl::size + // + // http://en.cppreference.com/w/cpp/iterator/size + // + template + EA_CPP14_CONSTEXPR auto size(const C& c) -> decltype(c.size()) + { return c.size(); } + + template + EA_CPP14_CONSTEXPR size_t size(const T (&)[N]) EA_NOEXCEPT + { return N; } + + + // eastl::ssize + // + // https://en.cppreference.com/w/cpp/iterator/size + // + template + EA_CPP14_CONSTEXPR ptrdiff_t ssize(const T(&)[N]) EA_NOEXCEPT + { return N; } + + template + EA_CPP14_CONSTEXPR auto ssize(const C& c) + -> eastl::common_type_t> + { + using R = eastl::common_type_t>; + return static_cast(c.size()); + } + + + // eastl::empty + // + // http://en.cppreference.com/w/cpp/iterator/empty + // + template + EA_CPP14_CONSTEXPR auto empty(const Container& c) -> decltype(c.empty()) + { return c.empty(); } + + template + EA_CPP14_CONSTEXPR bool empty(const T (&)[N]) EA_NOEXCEPT + { return false; } + + template + EA_CPP14_CONSTEXPR bool empty(std::initializer_list il) EA_NOEXCEPT + { return il.size() == 0; } + +#endif // defined(EA_COMPILER_CPP11_ENABLED) && EA_COMPILER_CPP11_ENABLED + + + // eastl::begin / eastl::end + // http://en.cppreference.com/w/cpp/iterator/begin + // + // In order to enable eastl::begin and eastl::end, the compiler needs to have conforming support + // for argument-dependent lookup if it supports C++11 range-based for loops. The reason for this is + // that in C++11 range-based for loops result in usage of std::begin/std::end, but allow that to + // be overridden by argument-dependent lookup: + // C++11 Standard, section 6.5.4, paragraph 1. + // "otherwise, begin-expr and end-expr are begin(__range) and end(__range), respectively, + // where begin and end are looked up with argument-dependent lookup (3.4.2). For the + // purposes of this name lookup, namespace std is an associated namespace." + // It turns out that one compiler has a problem: GCC 4.6. That version added support for + // range-based for loops but has broken argument-dependent lookup which was fixed in GCC 4.7. + // + #if (defined(EA_COMPILER_GNUC) && (EA_COMPILER_VERSION == 4006)) + #define EASTL_BEGIN_END_ENABLED 0 + #else + #define EASTL_BEGIN_END_ENABLED 1 + #endif + + #if EASTL_BEGIN_END_ENABLED + template + EA_CPP14_CONSTEXPR inline auto begin(Container& container) -> decltype(container.begin()) + { + return container.begin(); + } + + template + EA_CPP14_CONSTEXPR inline auto begin(const Container& container) -> decltype(container.begin()) + { + return container.begin(); + } + + template + EA_CPP14_CONSTEXPR inline auto cbegin(const Container& container) -> decltype(container.begin()) + { + return container.begin(); + } + + template + EA_CPP14_CONSTEXPR inline auto end(Container& container) -> decltype(container.end()) + { + return container.end(); + } + + template + EA_CPP14_CONSTEXPR inline auto end(const Container& container) -> decltype(container.end()) + { + return container.end(); + } + + template + EA_CPP14_CONSTEXPR inline auto cend(const Container& container) -> decltype(container.end()) + { + return container.end(); + } + + template + EA_CPP14_CONSTEXPR inline auto rbegin(Container& container) -> decltype(container.rbegin()) + { + return container.rbegin(); + } + + template + EA_CPP14_CONSTEXPR inline auto rbegin(const Container& container) -> decltype(container.rbegin()) + { + return container.rbegin(); + } + + template + EA_CPP14_CONSTEXPR inline auto rend(Container& container) -> decltype(container.rend()) + { + return container.rend(); + } + + template + EA_CPP14_CONSTEXPR inline auto rend(const Container& container) -> decltype(container.rend()) + { + return container.rend(); + } + + template + EA_CPP14_CONSTEXPR inline auto crbegin(const Container& container) -> decltype(eastl::rbegin(container)) + { + return container.rbegin(); + } + + template + EA_CPP14_CONSTEXPR inline auto crend(const Container& container) -> decltype(eastl::rend(container)) + { + return container.rend(); + } + + template + EA_CPP14_CONSTEXPR inline T* begin(T (&arrayObject)[arraySize]) + { + return arrayObject; + } + + template + EA_CPP14_CONSTEXPR inline T* end(T (&arrayObject)[arraySize]) + { + return (arrayObject + arraySize); + } + + template + EA_CPP14_CONSTEXPR inline reverse_iterator rbegin(T (&arrayObject)[arraySize]) + { + return reverse_iterator(arrayObject + arraySize); + } + + template + EA_CPP14_CONSTEXPR inline reverse_iterator rend(T (&arrayObject)[arraySize]) + { + return reverse_iterator(arrayObject); + } + + template + EA_CPP14_CONSTEXPR inline reverse_iterator rbegin(std::initializer_list ilist) + { + return eastl::reverse_iterator(ilist.end()); + } + + template + EA_CPP14_CONSTEXPR inline reverse_iterator rend(std::initializer_list ilist) + { + return eastl::reverse_iterator(ilist.begin()); + } + + template + EA_CPP14_CONSTEXPR reverse_iterator make_reverse_iterator(Iterator i) + { return reverse_iterator(i); } + + #endif // EASTL_BEGIN_END_ENABLED + +} // namespace eastl + + + +// Some compilers (e.g. GCC 4.6) support range-based for loops, but have a bug with +// respect to argument-dependent lookup which results on them unilaterally using std::begin/end +// with range-based for loops. To work around this we #include for this case in +// order to make std::begin/end visible to users of , for portability. +#if !EASTL_BEGIN_END_ENABLED && !defined(EA_COMPILER_NO_RANGE_BASED_FOR_LOOP) + #include +#endif + + + +#if defined(_MSC_VER) + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + diff --git a/include/EASTL/linked_array.h b/include/EASTL/linked_array.h new file mode 100644 index 0000000..88d9914 --- /dev/null +++ b/include/EASTL/linked_array.h @@ -0,0 +1,336 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This class implements a linked_array template, which is an array version +// of linked_ptr. See linked_ptr for detailed documentation. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_LINKED_ARRAY_H +#define EASTL_LINKED_ARRAY_H + + +#include +#include // Defines smart_array_deleter +#include // Defines linked_ptr_base +#include // Definition of ptrdiff_t + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// class linked_array + /// + /// This class implements a linked_array template, which is an array version + /// of linked_ptr. See linked_ptr for detailed documentation. + /// + template > + class linked_array + { + + protected: + + /// this_type + /// This is an alias for linked_array, this class. + typedef linked_array this_type; + + /// deleter_type + typedef Deleter deleter_type; + + T* mpArray; + mutable const this_type* mpPrev; + mutable const this_type* mpNext; + + void link(const linked_array& linkedArray) + { // This code can only be called when we are in a reset state. + // assert(!mpArray && (mpNext == mpPrev)); + mpNext = linkedArray.mpNext; + mpNext->mpPrev = this; + mpPrev = &linkedArray; + linkedArray.mpNext = this; + } + + public: + /// element_type + /// Synonym for type T, useful for external code to reference the + /// type in a generic way. + typedef T element_type; + + + /// linked_array + /// Takes ownership of the pointer. It is OK if the input pointer is null. + explicit linked_array(T* pArray = NULL) + : mpArray(pArray) + { + mpPrev = mpNext = this; + } + + + /// linked_array + /// Shares ownership of a pointer with another instance of linked_array. + linked_array(const linked_array& linkedArray) + : mpArray(linkedArray.mpArray) + { + if(mpArray) + link(linkedArray); + else + mpPrev = mpNext = this; + } + + + /// ~linked_array + /// Removes this object from the of objects using the shared pointer. + /// If this object is the last owner of the shared pointer, the shared + /// pointer is deleted. + ~linked_array() + { + reset(); + } + + + /// operator= + /// Copies another linked_array to this object. Note that this object + /// may already own a shared pointer with another different pointer + /// (but still of the same type) before this call. In that case, + /// this function removes ownership of the old pointer and takes shared + /// ownership of the new pointer and increments its reference count. + linked_array& operator=(const linked_array& linkedArray) + { + if(linkedArray.mpArray != mpArray) + { + reset(linkedArray.mpArray); + if(linkedArray.mpArray) + link(linkedArray); + } + return *this; + } + + + /// operator= + /// Assigns a new pointer. If the new pointer is equivalent + /// to the current pointer, nothing is done. Otherwise the + /// current pointer is unlinked and possibly destroyed. + /// The new pointer can be NULL. + linked_array& operator=(T* pArray) + { + reset(pArray); + return *this; + } + + + /// reset + /// Releases the owned pointer and takes ownership of the + /// passed in pointer. If the passed in pointer is the same + /// as the owned pointer, nothing is done. The passed in pointer + /// can be null, in which case the use count is set to 1. + void reset(T* pArray = NULL) + { + if(pArray != mpArray) + { + if(unique()) + { + deleter_type del; + del(mpArray); + } + else + { + mpPrev->mpNext = mpNext; + mpNext->mpPrev = mpPrev; + mpPrev = mpNext = this; + } + mpArray = pArray; + } + } + + + /// swap + /// Exchanges the owned pointer beween two linkedArray objects. + /// + /// This function is disabled as it is currently deemed unsafe. + /// The problem is that the only way to implement this function + /// is to transfer pointers between the objects; you cannot + /// transfer the linked list membership between the objects. + /// Thus unless both linked_array objects were 'unique()', the + /// shared pointers would be duplicated amongst containers, + /// resulting in a crash. + //void swap(linked_array& linkedArray) + //{ + // if(linkedArray.mpArray != mpArray) + // { // This is only safe if both linked_arrays are unique(). + // linkedArray::element_type* const pArrayTemp = linkedArray.mpArray; + // linkedArray.reset(mpArray); + // reset(pArrayTemp); + // } + //} + + + /// operator[] + /// Returns a reference to the specified item in the owned pointer array. + T& operator[](ptrdiff_t i) const + { + // assert(mpArray && (i >= 0)); + return mpArray[i]; + } + + + /// operator* + /// Returns the owner pointer dereferenced. + T& operator*() const + { + return *mpArray; + } + + + /// operator-> + /// Allows access to the owned pointer via operator->() + T* operator->() const + { + return mpArray; + } + + + /// get + /// Returns the owned pointer. Note that this class does + /// not provide an operator T() function. This is because such + /// a thing (automatic conversion) is deemed unsafe. + T* get() const + { + return mpArray; + } + + + /// use_count + /// Returns the use count of the shared pointer. + /// The return value is one if the owned pointer is null. + /// This function is provided for compatibility with the + /// proposed C++ standard and for debugging purposes. It is not + /// intended for runtime use given that its execution time is + /// not constant. + int use_count() const + { + int useCount(1); + + for(const linked_ptr_base* pCurrent = this; pCurrent->mpNext != this; pCurrent = pCurrent->mpNext) + ++useCount; + + return useCount; + } + + + /// unique + /// Returns true if the use count of the owned pointer is one. + /// The return value is true if the owned pointer is null. + bool unique() const + { + return (mpNext == this); + } + + + /// Implicit operator bool + /// Allows for using a linked_array as a boolean. + /// Note that below we do not use operator bool(). The reason for this + /// is that booleans automatically convert up to short, int, float, etc. + /// The result is that this: if(linkedArray == 1) would yield true (bad). + typedef T* (this_type::*bool_)() const; + operator bool_() const + { + if(mpArray) + return &this_type::get; + return NULL; + } + + + /// operator! + /// This returns the opposite of operator bool; it returns true if + /// the owned pointer is null. Some compilers require this and some don't. + bool operator!() + { + return (mpArray == NULL); + } + + + /// force_delete + /// Forces deletion of the shared pointer. Fixes all references to the + /// pointer by any other owners to be NULL. + void force_delete() + { + T* const pArray = mpArray; + + this_type* p = this; + do + { + this_type* const pNext = const_cast(p->mpNext); + p->mpArray = NULL; + p->mpNext = p->mpPrev = p; + p = pNext; + } + while(p != this); + + deleter_type del; + del(pArray); + } + + }; // class linked_array + + + + /// get_pointer + /// Returns linked_array::get() via the input linked_array. Provided for compatibility + /// with certain well-known libraries that use this functionality. + template + inline T* get_pointer(const linked_array& linkedArray) + { + return linkedArray.get(); + } + + + /// operator== + /// Compares two linked_array objects for equality. Equality is defined as + /// being true when the pointer shared between two linked_array objects is equal. + template + inline bool operator==(const linked_array& linkedArray1, const linked_array& linkedArray2) + { + return (linkedArray1.get() == linkedArray2.get()); + } + + + /// operator!= + /// Compares two linked_array objects for inequality. Equality is defined as + /// being true when the pointer shared between two linked_array objects is equal. + template + inline bool operator!=(const linked_array& linkedArray1, const linked_array& linkedArray2) + { + return (linkedArray1.get() != linkedArray2.get()); + } + + + /// operator< + /// Returns which linked_array is 'less' than the other. Useful when storing + /// sorted containers of linked_array objects. + template + inline bool operator<(const linked_array& linkedArray1, const linked_array& linkedArray2) + { + return (linkedArray1.get() < linkedArray2.get()); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + diff --git a/include/EASTL/linked_ptr.h b/include/EASTL/linked_ptr.h new file mode 100644 index 0000000..f57681a --- /dev/null +++ b/include/EASTL/linked_ptr.h @@ -0,0 +1,426 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_LINKED_PTR_H +#define EASTL_LINKED_PTR_H + + + +#include +#include // Defines smart_ptr_deleter +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// linked_ptr_base + /// + /// This class allows linked_ptr and linked_ptr to share the same + /// base nodes and thus be in the same linked list. + /// + struct linked_ptr_base + { + mutable linked_ptr_base* mpPrev; + mutable linked_ptr_base* mpNext; + }; + + + /// linked_ptr + /// + /// This class implements a linked_ptr template. A linked_ptr is like the C++ + /// Standard Library auto_ptr except that it allows sharing of pointers between + /// instances of auto_ptr via reference counting. linked_ptr objects can safely + /// be copied and can safely be used in C++ Standard Library containers such + /// as std::vector or std::list. This implementation, however, is not thread-safe. + /// you would need to use a separate linked_ptr_mt (multi-threaded) to get + /// thread safety. + /// + /// linked_ptr is a variation of shared_ptr (a.k.a. counted_ptr) which differs + /// in that instead of being implemented by a shared integer stored on the heap, + /// it is implemented by linked list stored within the linked_ptr object itself. + /// The result is that no memory is explicitly allocated from the heap, though + /// the cost of each linked_ptr object is 12 bytes of memory (32 bit machine) + /// instead of 4 bytes for the case of shared_ptr (depending on the heap). + /// + template > + class linked_ptr : public linked_ptr_base + { + protected: + template friend class linked_ptr; + + /// this_type + /// This is an alias for linked_ptr, this class. + typedef linked_ptr this_type; + + /// deleter_type + typedef Deleter deleter_type; + + T* mpValue; /// The owned pointer. + + template + void link(const linked_ptr& linkedPtr) + { // This code can only be called when we are in a reset state. + // assert(!mpValue && (mpNext == mpPrev)); + mpNext = linkedPtr.mpNext; + mpNext->mpPrev = this; + mpPrev = const_cast*>(&linkedPtr); + linkedPtr.mpNext = this; + } + + public: + /// element_type + /// Synonym for type T, useful for external code to reference the + /// type in a generic way. + typedef T element_type; + + + /// linked_ptr + /// Default constructor. + linked_ptr() + : mpValue(NULL) + { + mpPrev = mpNext = this; + } + + + /// linked_ptr + /// Takes ownership of the pointer. It is OK if the input pointer is null. + template + explicit linked_ptr(U* pValue) + : mpValue(pValue) + { + mpPrev = mpNext = this; + } + + + /// linked_ptr + /// Construction with self type. + /// If we want a shared_ptr constructor that is templated on linked_ptr, + /// then we need to make it in addition to this function, as otherwise + /// the compiler will generate this function and things will go wrong. + linked_ptr(const linked_ptr& linkedPtr) + : mpValue(linkedPtr.mpValue) + { + if(mpValue) + link(linkedPtr); + else + mpPrev = mpNext = this; + } + + + /// linked_ptr + /// Shares ownership of a pointer with another instance of linked_ptr. + template + linked_ptr(const linked_ptr& linkedPtr) + : mpValue(linkedPtr.mpValue) + { + if(mpValue) + link(linkedPtr); + else + mpPrev = mpNext = this; + } + + + /// ~linked_ptr + /// Removes this object from the of objects using the shared pointer. + /// If this object is the last owner of the shared pointer, the shared + /// pointer is deleted. + ~linked_ptr() + { + reset(); + } + + + /// operator= + /// If we want a shared_ptr operator= that is templated on linked_ptr, + /// then we need to make it in addition to this function, as otherwise + /// the compiler will generate this function and things will go wrong. + linked_ptr& operator=(const linked_ptr& linkedPtr) + { + if(linkedPtr.mpValue != mpValue) + { + reset(linkedPtr.mpValue); + if(linkedPtr.mpValue) + link(linkedPtr); + } + return *this; + } + + + /// operator= + /// Copies another linked_ptr to this object. Note that this object + /// may already own a shared pointer with another different pointer + /// (but still of the same type) before this call. In that case, + /// this function removes ownership of the old pointer and takes shared + /// ownership of the new pointer and increments its reference count. + template + linked_ptr& operator=(const linked_ptr& linkedPtr) + { + if(linkedPtr.mpValue != mpValue) + { + reset(linkedPtr.mpValue); + if(linkedPtr.mpValue) + link(linkedPtr); + } + return *this; + } + + + /// operator= + /// Assigns a new pointer. If the new pointer is equivalent + /// to the current pointer, nothing is done. Otherwise the + /// current pointer is unlinked and possibly destroyed. + /// The new pointer can be NULL. + template + linked_ptr& operator=(U* pValue) + { + reset(pValue); + return *this; + } + + + /// reset + /// Releases the owned pointer and takes ownership of the + /// passed in pointer. If the passed in pointer is the same + /// as the owned pointer, nothing is done. The passed in pointer + /// can be NULL, in which case the use count is set to 1. + template + void reset(U* pValue) + { + if(pValue != mpValue) + { + if(unique()) + { + deleter_type del; + del(mpValue); + } + else + { + mpPrev->mpNext = mpNext; + mpNext->mpPrev = mpPrev; + mpPrev = mpNext = this; + } + mpValue = pValue; + } + } + + + /// reset + /// Resets the container with NULL. If the current pointer + /// is non-NULL, it is unlinked and possibly destroyed. + void reset() + { + reset((T*)NULL); + } + + + /// swap + /// Exchanges the owned pointer beween two linkedPtr objects. + /// + /// This function is disabled as it is currently deemed unsafe. + /// The problem is that the only way to implement this function + /// is to transfer pointers between the objects; you cannot + /// transfer the linked list membership between the objects. + /// Thus unless both linked_ptr objects were 'unique()', the + /// shared pointers would be duplicated amongst containers, + /// resulting in a crash. + //template + //void swap(linked_ptr& linkedPtr) + //{ + // if(linkedPtr.mpValue != mpValue) + // { // This is only safe if both linked_ptrs are unique(). + // linkedPtr::element_type* const pValueTemp = linkedPtr.mpValue; + // linkedPtr.reset(mpValue); + // reset(pValueTemp); + // } + //} + + + /// operator* + /// Returns the owner pointer dereferenced. + T& operator*() const + { + return *mpValue; + } + + + /// operator-> + /// Allows access to the owned pointer via operator->() + T* operator->() const + { + return mpValue; + } + + + /// get + /// Returns the owned pointer. Note that this class does + /// not provide an operator T() function. This is because such + /// a thing (automatic conversion) is deemed unsafe. + T* get() const + { + return mpValue; + } + + + /// use_count + /// Returns the use count of the shared pointer. + /// The return value is one if the owned pointer is null. + /// This function is provided for compatibility with the + /// proposed C++ standard and for debugging purposes. It is not + /// intended for runtime use given that its execution time is + /// not constant. + int use_count() const + { + int useCount(1); + + for(const linked_ptr_base* pCurrent = static_cast(this); + pCurrent->mpNext != static_cast(this); pCurrent = pCurrent->mpNext) + ++useCount; + + return useCount; + } + + + /// unique + /// Returns true if the use count of the owned pointer is one. + /// The return value is true if the owned pointer is null. + bool unique() const + { + return (mpNext == static_cast(this)); + } + + + /// Implicit operator bool + /// Allows for using a linked_ptr as a boolean. + /// Note that below we do not use operator bool(). The reason for this + /// is that booleans automatically convert up to short, int, float, etc. + /// The result is that this: if(linkedPtr == 1) would yield true (bad). + typedef T* (this_type::*bool_)() const; + operator bool_() const + { + if(mpValue) + return &this_type::get; + return NULL; + } + + + /// operator! + /// This returns the opposite of operator bool; it returns true if + /// the owned pointer is null. Some compilers require this and some don't. + bool operator!() + { + return (mpValue == NULL); + } + + + /// detach + /// Returns ownership of the pointer to the caller. Fixes all + /// references to the pointer by any other owners to be NULL. + /// This function can work properly only if all entries in the list + /// refer to type T and none refer to any other type (e.g. U). + T* detach() + { + T* const pValue = mpValue; + + linked_ptr_base* p = this; + do + { + linked_ptr_base* const pNext = p->mpNext; + static_cast(p)->mpValue = NULL; + p->mpNext = p->mpPrev = p; + p = pNext; + } + while(p != this); + + return pValue; + } + + /// force_delete + /// Forces deletion of the shared pointer. Fixes all references to the + /// pointer by any other owners to be NULL. + /// This function can work properly only if all entries in the list + /// refer to type T and none refer to any other type (e.g. U). + void force_delete() + { + T* const pValue = detach(); + Deleter del; + del(pValue); + } + + }; // class linked_ptr + + + + /// get_pointer + /// Returns linked_ptr::get() via the input linked_ptr. Provided for compatibility + /// with certain well-known libraries that use this functionality. + template + inline T* get_pointer(const linked_ptr& linkedPtr) + { + return linkedPtr.get(); + } + + + /// operator== + /// Compares two linked_ptr objects for equality. Equality is defined as + /// being true when the pointer shared between two linked_ptr objects is equal. + template + inline bool operator==(const linked_ptr& linkedPtr1, const linked_ptr& linkedPtr2) + { + return (linkedPtr1.get() == linkedPtr2.get()); + } + + + /// operator!= + /// Compares two linked_ptr objects for inequality. Equality is defined as + /// being true when the pointer shared between two linked_ptr objects is equal. + template + inline bool operator!=(const linked_ptr& linkedPtr1, const linked_ptr& linkedPtr2) + { + return (linkedPtr1.get() != linkedPtr2.get()); + } + + + /// operator< + /// Returns which linked_ptr is 'less' than the other. Useful when storing + /// sorted containers of linked_ptr objects. + template + inline bool operator<(const linked_ptr& linkedPtr1, const linked_ptr& linkedPtr2) + { + return (linkedPtr1.get() < linkedPtr2.get()); + } + + +} // namespace eastl + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/list.h b/include/EASTL/list.h new file mode 100644 index 0000000..023bcce --- /dev/null +++ b/include/EASTL/list.h @@ -0,0 +1,2196 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements a doubly-linked list, much like the C++ std::list class. +// The primary distinctions between this list and std::list are: +// - list doesn't implement some of the less-frequently used functions +// of std::list. Any required functions can be added at a later time. +// - list has a couple extension functions that increase performance. +// - list can contain objects with alignment requirements. std::list cannot +// do so without a bit of tedious non-portable effort. +// - list has optimizations that don't exist in the STL implementations +// supplied by library vendors for our targeted platforms. +// - list supports debug memory naming natively. +// - list::size() by default is not a constant time function, like the list::size +// in some std implementations such as STLPort and SGI STL but unlike the +// list in Dinkumware and Metrowerks. The EASTL_LIST_SIZE_CACHE option can change this. +// - list provides a guaranteed portable node definition that allows users +// to write custom fixed size node allocators that are portable. +// - list is easier to read, debug, and visualize. +// - list is savvy to an environment that doesn't have exception handling, +// as is sometimes the case with console or embedded environments. +// - list has less deeply nested function calls and allows the user to +// enable forced inlining in debug builds in order to reduce bloat. +// - list doesn't keep a member size variable. This means that list is +// smaller than std::list (depends on std::list) and that for most operations +// it is faster than std::list. However, the list::size function is slower. +// - list::size_type is defined as eastl_size_t instead of size_t in order to +// save memory and run faster on 64 bit systems. +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_LIST_H +#define EASTL_LIST_H + + +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc + #pragma warning(disable: 4345) // Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized + #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. + #pragma warning(disable: 4623) // default constructor was implicitly defined as deleted +#endif + + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// EASTL_LIST_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_LIST_DEFAULT_NAME + #define EASTL_LIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " list" // Unless the user overrides something, this is "EASTL list". + #endif + + + /// EASTL_LIST_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_LIST_DEFAULT_ALLOCATOR + #define EASTL_LIST_DEFAULT_ALLOCATOR allocator_type(EASTL_LIST_DEFAULT_NAME) + #endif + + + + /// ListNodeBase + /// + /// We define a ListNodeBase separately from ListNode (below), because it allows + /// us to have non-templated operations such as insert, remove (below), and it + /// makes it so that the list anchor node doesn't carry a T with it, which would + /// waste space and possibly lead to surprising the user due to extra Ts existing + /// that the user didn't explicitly create. The downside to all of this is that + /// it makes debug viewing of a list harder, given that the node pointers are of + /// type ListNodeBase and not ListNode. However, see ListNodeBaseProxy below. + /// + struct ListNodeBase + { + ListNodeBase* mpNext; + ListNodeBase* mpPrev; + + void insert(ListNodeBase* pNext) EA_NOEXCEPT; // Inserts this standalone node before the node pNext in pNext's list. + void remove() EA_NOEXCEPT; // Removes this node from the list it's in. Leaves this node's mpNext/mpPrev invalid. + void splice(ListNodeBase* pFirst, ListNodeBase* pLast) EA_NOEXCEPT; // Removes [pFirst,pLast) from the list it's in and inserts it before this in this node's list. + void reverse() EA_NOEXCEPT; // Reverses the order of nodes in the circular list this node is a part of. + static void swap(ListNodeBase& a, ListNodeBase& b) EA_NOEXCEPT; // Swaps the nodes a and b in the lists to which they belong. + + void insert_range(ListNodeBase* pFirst, ListNodeBase* pFinal) EA_NOEXCEPT; // Differs from splice in that first/final aren't in another list. + static void remove_range(ListNodeBase* pFirst, ListNodeBase* pFinal) EA_NOEXCEPT; // + } EASTL_LIST_PROXY_MAY_ALIAS; + + + #if EASTL_LIST_PROXY_ENABLED + + /// ListNodeBaseProxy + /// + /// In debug builds, we define ListNodeBaseProxy to be the same thing as + /// ListNodeBase, except it is templated on the parent ListNode class. + /// We do this because we want users in debug builds to be able to easily + /// view the list's contents in a debugger GUI. We do this only in a debug + /// build for the reasons described above: that ListNodeBase needs to be + /// as efficient as possible and not cause code bloat or extra function + /// calls (inlined or not). + /// + /// ListNodeBaseProxy *must* be separate from its parent class ListNode + /// because the list class must have a member node which contains no T value. + /// It is thus incorrect for us to have one single ListNode class which + /// has mpNext, mpPrev, and mValue. So we do a recursive template trick in + /// the definition and use of SListNodeBaseProxy. + /// + template + struct ListNodeBaseProxy + { + LN* mpNext; + LN* mpPrev; + }; + + template + struct ListNode : public ListNodeBaseProxy< ListNode > + { + T mValue; + }; + + #else + + EA_DISABLE_VC_WARNING(4625 4626) + template + struct ListNode : public ListNodeBase + { + T mValue; + }; + EA_RESTORE_VC_WARNING() + + #endif + + + + + /// ListIterator + /// + template + struct ListIterator + { + typedef ListIterator this_type; + typedef ListIterator iterator; + typedef ListIterator const_iterator; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef ListNode node_type; + typedef Pointer pointer; + typedef Reference reference; + typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category; + + public: + node_type* mpNode; + + public: + ListIterator() EA_NOEXCEPT; + ListIterator(const ListNodeBase* pNode) EA_NOEXCEPT; + ListIterator(const iterator& x) EA_NOEXCEPT; + + this_type next() const EA_NOEXCEPT; + this_type prev() const EA_NOEXCEPT; + + reference operator*() const EA_NOEXCEPT; + pointer operator->() const EA_NOEXCEPT; + + this_type& operator++() EA_NOEXCEPT; + this_type operator++(int) EA_NOEXCEPT; + + this_type& operator--() EA_NOEXCEPT; + this_type operator--(int) EA_NOEXCEPT; + + }; // ListIterator + + + + + /// ListBase + /// + /// See VectorBase (class vector) for an explanation of why we + /// create this separate base class. + /// + template + class ListBase + { + public: + typedef T value_type; + typedef Allocator allocator_type; + typedef ListNode node_type; + typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to size_t. + typedef ptrdiff_t difference_type; + #if EASTL_LIST_PROXY_ENABLED + typedef ListNodeBaseProxy< ListNode > base_node_type; + #else + typedef ListNodeBase base_node_type; // We use ListNodeBase instead of ListNode because we don't want to create a T. + #endif + + protected: + eastl::compressed_pair mNodeAllocator; + #if EASTL_LIST_SIZE_CACHE + size_type mSize; + #endif + + base_node_type& internalNode() EA_NOEXCEPT { return mNodeAllocator.first(); } + base_node_type const& internalNode() const EA_NOEXCEPT { return mNodeAllocator.first(); } + allocator_type& internalAllocator() EA_NOEXCEPT { return mNodeAllocator.second(); } + const allocator_type& internalAllocator() const EA_NOEXCEPT { return mNodeAllocator.second(); } + + public: + const allocator_type& get_allocator() const EA_NOEXCEPT; + allocator_type& get_allocator() EA_NOEXCEPT; + void set_allocator(const allocator_type& allocator); + + protected: + ListBase(); + ListBase(const allocator_type& a); + ~ListBase(); + + node_type* DoAllocateNode(); + void DoFreeNode(node_type* pNode); + + void DoInit() EA_NOEXCEPT; + void DoClear(); + + }; // ListBase + + + + + /// list + /// + /// -- size() is O(n) -- + /// Note that as of this writing, list::size() is an O(n) operation when EASTL_LIST_SIZE_CACHE is disabled. + /// That is, getting the size of the list is not a fast operation, as it requires traversing the list and + /// counting the nodes. We could make list::size() be fast by having a member mSize variable. There are reasons + /// for having such functionality and reasons for not having such functionality. We currently choose + /// to not have a member mSize variable as it would add four bytes to the class, add a tiny amount + /// of processing to functions such as insert and erase, and would only serve to improve the size + /// function, but no others. The alternative argument is that the C++ standard states that std::list + /// should be an O(1) operation (i.e. have a member size variable), most C++ standard library list + /// implementations do so, the size is but an integer which is quick to update, and many users + /// expect to have a fast size function. The EASTL_LIST_SIZE_CACHE option changes this. + /// To consider: Make size caching an optional template parameter. + /// + /// Pool allocation + /// If you want to make a custom memory pool for a list container, your pool + /// needs to contain items of type list::node_type. So if you have a memory + /// pool that has a constructor that takes the size of pool items and the + /// count of pool items, you would do this (assuming that MemoryPool implements + /// the Allocator interface): + /// typedef list WidgetList; // Delare your WidgetList type. + /// MemoryPool myPool(sizeof(WidgetList::node_type), 100); // Make a pool of 100 Widget nodes. + /// WidgetList myList(&myPool); // Create a list that uses the pool. + /// + template + class list : public ListBase + { + typedef ListBase base_type; + typedef list this_type; + + public: + typedef T value_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef ListIterator iterator; + typedef ListIterator const_iterator; + typedef eastl::reverse_iterator reverse_iterator; + typedef eastl::reverse_iterator const_reverse_iterator; + typedef typename base_type::size_type size_type; + typedef typename base_type::difference_type difference_type; + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::base_node_type base_node_type; + + using base_type::mNodeAllocator; + using base_type::DoAllocateNode; + using base_type::DoFreeNode; + using base_type::DoClear; + using base_type::DoInit; + using base_type::get_allocator; + #if EASTL_LIST_SIZE_CACHE + using base_type::mSize; + #endif + using base_type::internalNode; + using base_type::internalAllocator; + + public: + list(); + list(const allocator_type& allocator); + explicit list(size_type n, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR); + list(size_type n, const value_type& value, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR); + list(const this_type& x); + list(const this_type& x, const allocator_type& allocator); + list(this_type&& x); + list(this_type&&, const allocator_type&); + list(std::initializer_list ilist, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR); + + template + list(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg. + + this_type& operator=(const this_type& x); + this_type& operator=(std::initializer_list ilist); + this_type& operator=(this_type&& x); + + // In the case that the two containers' allocators are unequal, swap copies elements instead + // of replacing them in place. In this case swap is an O(n) operation instead of O(1). + void swap(this_type& x); + + void assign(size_type n, const value_type& value); + + template // It turns out that the C++ std::list specifies a two argument + void assign(InputIterator first, InputIterator last); // version of assign that takes (int size, int value). These are not + // iterators, so we need to do a template compiler trick to do the right thing. + void assign(std::initializer_list ilist); + + iterator begin() EA_NOEXCEPT; + const_iterator begin() const EA_NOEXCEPT; + const_iterator cbegin() const EA_NOEXCEPT; + + iterator end() EA_NOEXCEPT; + const_iterator end() const EA_NOEXCEPT; + const_iterator cend() const EA_NOEXCEPT; + + reverse_iterator rbegin() EA_NOEXCEPT; + const_reverse_iterator rbegin() const EA_NOEXCEPT; + const_reverse_iterator crbegin() const EA_NOEXCEPT; + + reverse_iterator rend() EA_NOEXCEPT; + const_reverse_iterator rend() const EA_NOEXCEPT; + const_reverse_iterator crend() const EA_NOEXCEPT; + + bool empty() const EA_NOEXCEPT; + size_type size() const EA_NOEXCEPT; + + void resize(size_type n, const value_type& value); + void resize(size_type n); + + reference front(); + const_reference front() const; + + reference back(); + const_reference back() const; + + template + void emplace_front(Args&&... args); + + template + void emplace_back(Args&&... args); + + void push_front(const value_type& value); + void push_front(value_type&& x); + reference push_front(); + void* push_front_uninitialized(); + + void push_back(const value_type& value); + void push_back(value_type&& x); + reference push_back(); + void* push_back_uninitialized(); + + void pop_front(); + void pop_back(); + + template + iterator emplace(const_iterator position, Args&&... args); + + iterator insert(const_iterator position); + iterator insert(const_iterator position, const value_type& value); + iterator insert(const_iterator position, value_type&& x); + iterator insert(const_iterator position, std::initializer_list ilist); + iterator insert(const_iterator position, size_type n, const value_type& value); + + template + iterator insert(const_iterator position, InputIterator first, InputIterator last); + + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + + reverse_iterator erase(const_reverse_iterator position); + reverse_iterator erase(const_reverse_iterator first, const_reverse_iterator last); + + void clear() EA_NOEXCEPT; + void reset_lose_memory() EA_NOEXCEPT; // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs. + + void remove(const T& x); + + template + void remove_if(Predicate); + + void reverse() EA_NOEXCEPT; + + // splice inserts elements in the range [first,last) before position and removes the elements from x. + // In the case that the two containers' allocators are unequal, splice copies elements + // instead of splicing them. In this case elements are not removed from x, and iterators + // into the spliced elements from x continue to point to the original values in x. + void splice(const_iterator position, this_type& x); + void splice(const_iterator position, this_type& x, const_iterator i); + void splice(const_iterator position, this_type& x, const_iterator first, const_iterator last); + void splice(const_iterator position, this_type&& x); + void splice(const_iterator position, this_type&& x, const_iterator i); + void splice(const_iterator position, this_type&& x, const_iterator first, const_iterator last); + + public: + // For merge, see notes for splice regarding the handling of unequal allocators. + void merge(this_type& x); + void merge(this_type&& x); + + template + void merge(this_type& x, Compare compare); + + template + void merge(this_type&& x, Compare compare); + + void unique(); + + template + void unique(BinaryPredicate); + + // Sorting functionality + // This is independent of the global sort algorithms, as lists are + // linked nodes and can be sorted more efficiently by moving nodes + // around in ways that global sort algorithms aren't privy to. + void sort(); + + template + void sort(Compare compare); + + public: + bool validate() const; + int validate_iterator(const_iterator i) const; + + protected: + node_type* DoCreateNode(); + + template + node_type* DoCreateNode(Args&&... args); + + template + void DoAssign(Integer n, Integer value, true_type); + + template + void DoAssign(InputIterator first, InputIterator last, false_type); + + void DoAssignValues(size_type n, const value_type& value); + + template + void DoInsert(ListNodeBase* pNode, Integer n, Integer value, true_type); + + template + void DoInsert(ListNodeBase* pNode, InputIterator first, InputIterator last, false_type); + + void DoInsertValues(ListNodeBase* pNode, size_type n, const value_type& value); + + template + void DoInsertValue(ListNodeBase* pNode, Args&&... args); + + void DoErase(ListNodeBase* pNode); + + void DoSwap(this_type& x); + + template + iterator DoSort(iterator i1, iterator end2, size_type n, Compare& compare); + + }; // class list + + + + + + /////////////////////////////////////////////////////////////////////// + // ListNodeBase + /////////////////////////////////////////////////////////////////////// + + // Swaps the nodes a and b in the lists to which they belong. This is similar to + // splicing a into b's list and b into a's list at the same time. + // Works by swapping the members of a and b, and fixes up the lists that a and b + // were part of to point to the new members. + inline void ListNodeBase::swap(ListNodeBase& a, ListNodeBase& b) EA_NOEXCEPT + { + const ListNodeBase temp(a); + a = b; + b = temp; + + if(a.mpNext == &b) + a.mpNext = a.mpPrev = &a; + else + a.mpNext->mpPrev = a.mpPrev->mpNext = &a; + + if(b.mpNext == &a) + b.mpNext = b.mpPrev = &b; + else + b.mpNext->mpPrev = b.mpPrev->mpNext = &b; + } + + + // splices the [first,last) range from its current list into our list before this node. + inline void ListNodeBase::splice(ListNodeBase* first, ListNodeBase* last) EA_NOEXCEPT + { + // We assume that [first, last] are not within our list. + last->mpPrev->mpNext = this; + first->mpPrev->mpNext = last; + this->mpPrev->mpNext = first; + + ListNodeBase* const pTemp = this->mpPrev; + this->mpPrev = last->mpPrev; + last->mpPrev = first->mpPrev; + first->mpPrev = pTemp; + } + + + inline void ListNodeBase::reverse() EA_NOEXCEPT + { + ListNodeBase* pNode = this; + do + { + EA_ANALYSIS_ASSUME(pNode != NULL); + ListNodeBase* const pTemp = pNode->mpNext; + pNode->mpNext = pNode->mpPrev; + pNode->mpPrev = pTemp; + pNode = pNode->mpPrev; + } + while(pNode != this); + } + + + inline void ListNodeBase::insert(ListNodeBase* pNext) EA_NOEXCEPT + { + mpNext = pNext; + mpPrev = pNext->mpPrev; + pNext->mpPrev->mpNext = this; + pNext->mpPrev = this; + } + + + // Removes this node from the list that it's in. Assumes that the + // node is within a list and thus that its prev/next pointers are valid. + inline void ListNodeBase::remove() EA_NOEXCEPT + { + mpNext->mpPrev = mpPrev; + mpPrev->mpNext = mpNext; + } + + + // Inserts the standalone range [pFirst, pFinal] before pPosition. Assumes that the + // range is not within a list and thus that it's prev/next pointers are not valid. + // Assumes that this node is within a list and thus that its prev/next pointers are valid. + inline void ListNodeBase::insert_range(ListNodeBase* pFirst, ListNodeBase* pFinal) EA_NOEXCEPT + { + mpPrev->mpNext = pFirst; + pFirst->mpPrev = mpPrev; + mpPrev = pFinal; + pFinal->mpNext = this; + } + + + // Removes the range [pFirst, pFinal] from the list that it's in. Assumes that the + // range is within a list and thus that its prev/next pointers are valid. + inline void ListNodeBase::remove_range(ListNodeBase* pFirst, ListNodeBase* pFinal) EA_NOEXCEPT + { + pFinal->mpNext->mpPrev = pFirst->mpPrev; + pFirst->mpPrev->mpNext = pFinal->mpNext; + } + + + /////////////////////////////////////////////////////////////////////// + // ListIterator + /////////////////////////////////////////////////////////////////////// + + template + inline ListIterator::ListIterator() EA_NOEXCEPT + : mpNode() // To consider: Do we really need to intialize mpNode? + { + // Empty + } + + + template + inline ListIterator::ListIterator(const ListNodeBase* pNode) EA_NOEXCEPT + : mpNode(static_cast((ListNode*)const_cast(pNode))) // All this casting is in the name of making runtime debugging much easier on the user. + { + // Empty + } + + + template + inline ListIterator::ListIterator(const iterator& x) EA_NOEXCEPT + : mpNode(const_cast(x.mpNode)) + { + // Empty + } + + + template + inline typename ListIterator::this_type + ListIterator::next() const EA_NOEXCEPT + { + return ListIterator(mpNode->mpNext); + } + + + template + inline typename ListIterator::this_type + ListIterator::prev() const EA_NOEXCEPT + { + return ListIterator(mpNode->mpPrev); + } + + + template + inline typename ListIterator::reference + ListIterator::operator*() const EA_NOEXCEPT + { + return mpNode->mValue; + } + + + template + inline typename ListIterator::pointer + ListIterator::operator->() const EA_NOEXCEPT + { + return &mpNode->mValue; + } + + + template + inline typename ListIterator::this_type& + ListIterator::operator++() EA_NOEXCEPT + { + mpNode = static_cast(mpNode->mpNext); + return *this; + } + + + template + inline typename ListIterator::this_type + ListIterator::operator++(int) EA_NOEXCEPT + { + this_type temp(*this); + mpNode = static_cast(mpNode->mpNext); + return temp; + } + + + template + inline typename ListIterator::this_type& + ListIterator::operator--() EA_NOEXCEPT + { + mpNode = static_cast(mpNode->mpPrev); + return *this; + } + + + template + inline typename ListIterator::this_type + ListIterator::operator--(int) EA_NOEXCEPT + { + this_type temp(*this); + mpNode = static_cast(mpNode->mpPrev); + return temp; + } + + + // The C++ defect report #179 requires that we support comparisons between const and non-const iterators. + // Thus we provide additional template paremeters here to support this. The defect report does not + // require us to support comparisons between reverse_iterators and const_reverse_iterators. + template + inline bool operator==(const ListIterator& a, + const ListIterator& b) EA_NOEXCEPT + { + return a.mpNode == b.mpNode; + } + + + template + inline bool operator!=(const ListIterator& a, + const ListIterator& b) EA_NOEXCEPT + { + return a.mpNode != b.mpNode; + } + + + // We provide a version of operator!= for the case where the iterators are of the + // same type. This helps prevent ambiguity errors in the presence of rel_ops. + template + inline bool operator!=(const ListIterator& a, + const ListIterator& b) EA_NOEXCEPT + { + return a.mpNode != b.mpNode; + } + + + + /////////////////////////////////////////////////////////////////////// + // ListBase + /////////////////////////////////////////////////////////////////////// + + template + inline ListBase::ListBase() + : mNodeAllocator(base_node_type(), allocator_type(EASTL_LIST_DEFAULT_NAME)) + #if EASTL_LIST_SIZE_CACHE + , mSize(0) + #endif + { + DoInit(); + } + + template + inline ListBase::ListBase(const allocator_type& allocator) + : mNodeAllocator(base_node_type(), allocator) + #if EASTL_LIST_SIZE_CACHE + , mSize(0) + #endif + { + DoInit(); + } + + + template + inline ListBase::~ListBase() + { + DoClear(); + } + + + template + const typename ListBase::allocator_type& + ListBase::get_allocator() const EA_NOEXCEPT + { + return internalAllocator(); + } + + + template + typename ListBase::allocator_type& + ListBase::get_allocator() EA_NOEXCEPT + { + return internalAllocator(); + } + + + template + inline void ListBase::set_allocator(const allocator_type& allocator) + { + EASTL_ASSERT((internalAllocator() == allocator) || (static_cast(internalNode().mpNext) == &internalNode())); // We can only assign a different allocator if we are empty of elements. + internalAllocator() = allocator; + } + + + template + inline typename ListBase::node_type* + ListBase::DoAllocateNode() + { + node_type* pNode = (node_type*)allocate_memory(internalAllocator(), sizeof(node_type), EASTL_ALIGN_OF(T), 0); + EASTL_ASSERT(pNode != nullptr); + return pNode; + } + + + template + inline void ListBase::DoFreeNode(node_type* p) + { + EASTLFree(internalAllocator(), p, sizeof(node_type)); + } + + + template + inline void ListBase::DoInit() EA_NOEXCEPT + { + internalNode().mpNext = (ListNode*)&internalNode(); + internalNode().mpPrev = (ListNode*)&internalNode(); + } + + + template + inline void ListBase::DoClear() + { + node_type* p = static_cast(internalNode().mpNext); + + while(p != &internalNode()) + { + node_type* const pTemp = p; + p = static_cast(p->mpNext); + pTemp->~node_type(); + EASTLFree(internalAllocator(), pTemp, sizeof(node_type)); + } + } + + + + /////////////////////////////////////////////////////////////////////// + // list + /////////////////////////////////////////////////////////////////////// + + template + inline list::list() + : base_type() + { + // Empty + } + + + template + inline list::list(const allocator_type& allocator) + : base_type(allocator) + { + // Empty + } + + + template + inline list::list(size_type n, const allocator_type& allocator) + : base_type(allocator) + { + DoInsertValues((ListNodeBase*)&internalNode(), n, value_type()); + } + + + template + inline list::list(size_type n, const value_type& value, const allocator_type& allocator) + : base_type(allocator) + { + DoInsertValues((ListNodeBase*)&internalNode(), n, value); + } + + + template + inline list::list(const this_type& x) + : base_type(x.internalAllocator()) + { + DoInsert((ListNodeBase*)&internalNode(), const_iterator((ListNodeBase*)x.internalNode().mpNext), const_iterator((ListNodeBase*)&x.internalNode()), false_type()); + } + + + template + inline list::list(const this_type& x, const allocator_type& allocator) + : base_type(allocator) + { + DoInsert((ListNodeBase*)&internalNode(), const_iterator((ListNodeBase*)x.internalNode().mpNext), const_iterator((ListNodeBase*)&x.internalNode()), false_type()); + } + + + template + inline list::list(this_type&& x) + : base_type(eastl::move(x.internalAllocator())) + { + swap(x); + } + + + template + inline list::list(this_type&& x, const allocator_type& allocator) + : base_type(allocator) + { + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + + + template + inline list::list(std::initializer_list ilist, const allocator_type& allocator) + : base_type(allocator) + { + DoInsert((ListNodeBase*)&internalNode(), ilist.begin(), ilist.end(), false_type()); + } + + + template + template + list::list(InputIterator first, InputIterator last) + : base_type(EASTL_LIST_DEFAULT_ALLOCATOR) + { + //insert(const_iterator((ListNodeBase*)&internalNode()), first, last); + DoInsert((ListNodeBase*)&internalNode(), first, last, is_integral()); + } + + + template + typename list::iterator + inline list::begin() EA_NOEXCEPT + { + return iterator((ListNodeBase*)internalNode().mpNext); + } + + + template + inline typename list::const_iterator + list::begin() const EA_NOEXCEPT + { + return const_iterator((ListNodeBase*)internalNode().mpNext); + } + + + template + inline typename list::const_iterator + list::cbegin() const EA_NOEXCEPT + { + return const_iterator((ListNodeBase*)internalNode().mpNext); + } + + + template + inline typename list::iterator + list::end() EA_NOEXCEPT + { + return iterator((ListNodeBase*)&internalNode()); + } + + + template + inline typename list::const_iterator + list::end() const EA_NOEXCEPT + { + return const_iterator((ListNodeBase*)&internalNode()); + } + + + template + inline typename list::const_iterator + list::cend() const EA_NOEXCEPT + { + return const_iterator((ListNodeBase*)&internalNode()); + } + + + template + inline typename list::reverse_iterator + list::rbegin() EA_NOEXCEPT + { + return reverse_iterator((ListNodeBase*)&internalNode()); + } + + + template + inline typename list::const_reverse_iterator + list::rbegin() const EA_NOEXCEPT + { + return const_reverse_iterator((ListNodeBase*)&internalNode()); + } + + + template + inline typename list::const_reverse_iterator + list::crbegin() const EA_NOEXCEPT + { + return const_reverse_iterator((ListNodeBase*)&internalNode()); + } + + + template + inline typename list::reverse_iterator + list::rend() EA_NOEXCEPT + { + return reverse_iterator((ListNodeBase*)internalNode().mpNext); + } + + + template + inline typename list::const_reverse_iterator + list::rend() const EA_NOEXCEPT + { + return const_reverse_iterator((ListNodeBase*)internalNode().mpNext); + } + + + template + inline typename list::const_reverse_iterator + list::crend() const EA_NOEXCEPT + { + return const_reverse_iterator((ListNodeBase*)internalNode().mpNext); + } + + + template + inline typename list::reference + list::front() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(internalNode().mpNext) == &internalNode())) + EASTL_FAIL_MSG("list::front -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(internalNode().mpNext)->mValue; + } + + + template + inline typename list::const_reference + list::front() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(internalNode().mpNext) == &internalNode())) + EASTL_FAIL_MSG("list::front -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(internalNode().mpNext)->mValue; + } + + + template + inline typename list::reference + list::back() + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(internalNode().mpNext) == &internalNode())) + EASTL_FAIL_MSG("list::back -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(internalNode().mpPrev)->mValue; + } + + + template + inline typename list::const_reference + list::back() const + { + #if EASTL_ASSERT_ENABLED && EASTL_EMPTY_REFERENCE_ASSERT_ENABLED + if (EASTL_UNLIKELY(static_cast(internalNode().mpNext) == &internalNode())) + EASTL_FAIL_MSG("list::back -- empty container"); + #else + // We allow the user to reference an empty container. + #endif + + return static_cast(internalNode().mpPrev)->mValue; + } + + + template + inline bool list::empty() const EA_NOEXCEPT + { + #if EASTL_LIST_SIZE_CACHE + return (mSize == 0); + #else + return static_cast(internalNode().mpNext) == &internalNode(); + #endif + } + + + template + inline typename list::size_type + list::size() const EA_NOEXCEPT + { + #if EASTL_LIST_SIZE_CACHE + return mSize; + #else + #if EASTL_DEBUG + const ListNodeBase* p = (ListNodeBase*)internalNode().mpNext; + size_type n = 0; + while(p != (ListNodeBase*)&internalNode()) + { + ++n; + p = (ListNodeBase*)p->mpNext; + } + return n; + #else + // The following optimizes to slightly better code than the code above. + return (size_type)eastl::distance(const_iterator((ListNodeBase*)internalNode().mpNext), const_iterator((ListNodeBase*)&internalNode())); + #endif + #endif + } + + + template + typename list::this_type& + list::operator=(const this_type& x) + { + if(this != &x) // If not assigning to self... + { + // If (EASTL_ALLOCATOR_COPY_ENABLED == 1) and the current contents are allocated by an + // allocator that's unequal to x's allocator, we need to reallocate our elements with + // our current allocator and reallocate it with x's allocator. If the allocators are + // equal then we can use a more optimal algorithm that doesn't reallocate our elements + // but instead can copy them in place. + + #if EASTL_ALLOCATOR_COPY_ENABLED + bool bSlowerPathwayRequired = (internalAllocator() != x.internalAllocator()); + #else + bool bSlowerPathwayRequired = false; + #endif + + if(bSlowerPathwayRequired) + { + clear(); + + #if EASTL_ALLOCATOR_COPY_ENABLED + internalAllocator() = x.internalAllocator(); + #endif + } + + DoAssign(x.begin(), x.end(), eastl::false_type()); + } + + return *this; + } + + + template + typename list::this_type& + list::operator=(this_type&& x) + { + if(this != &x) + { + clear(); // To consider: Are we really required to clear here? x is going away soon and will clear itself in its dtor. + swap(x); // member swap handles the case that x has a different allocator than our allocator by doing a copy. + } + return *this; + } + + + template + typename list::this_type& + list::operator=(std::initializer_list ilist) + { + DoAssign(ilist.begin(), ilist.end(), false_type()); + return *this; + } + + + template + inline void list::assign(size_type n, const value_type& value) + { + DoAssignValues(n, value); + } + + + // It turns out that the C++ std::list specifies a two argument + // version of assign that takes (int size, int value). These are not + // iterators, so we need to do a template compiler trick to do the right thing. + template + template + inline void list::assign(InputIterator first, InputIterator last) + { + DoAssign(first, last, is_integral()); + } + + + template + inline void list::assign(std::initializer_list ilist) + { + DoAssign(ilist.begin(), ilist.end(), false_type()); + } + + + template + inline void list::clear() EA_NOEXCEPT + { + DoClear(); + DoInit(); + #if EASTL_LIST_SIZE_CACHE + mSize = 0; + #endif + } + + + template + inline void list::reset_lose_memory() EA_NOEXCEPT + { + // The reset_lose_memory function is a special extension function which unilaterally + // resets the container to an empty state without freeing the memory of + // the contained objects. This is useful for very quickly tearing down a + // container built into scratch memory. + DoInit(); + #if EASTL_LIST_SIZE_CACHE + mSize = 0; + #endif + } + + + template + void list::resize(size_type n, const value_type& value) + { + iterator current((ListNodeBase*)internalNode().mpNext); + size_type i = 0; + + while((current.mpNode != &internalNode()) && (i < n)) + { + ++current; + ++i; + } + if(i == n) + erase(current, (ListNodeBase*)&internalNode()); + else + insert((ListNodeBase*)&internalNode(), n - i, value); + } + + + template + inline void list::resize(size_type n) + { + resize(n, value_type()); + } + + + template + template + void list::emplace_front(Args&&... args) + { + DoInsertValue((ListNodeBase*)internalNode().mpNext, eastl::forward(args)...); + } + + template + template + void list::emplace_back(Args&&... args) + { + DoInsertValue((ListNodeBase*)&internalNode(), eastl::forward(args)...); + } + + + template + inline void list::push_front(const value_type& value) + { + DoInsertValue((ListNodeBase*)internalNode().mpNext, value); + } + + + template + inline void list::push_front(value_type&& value) + { + emplace(begin(), eastl::move(value)); + } + + + template + inline typename list::reference + list::push_front() + { + node_type* const pNode = DoCreateNode(); + ((ListNodeBase*)pNode)->insert((ListNodeBase*)internalNode().mpNext); + #if EASTL_LIST_SIZE_CACHE + ++mSize; + #endif + return static_cast(internalNode().mpNext)->mValue; // Same as return front(); + } + + + template + inline void* list::push_front_uninitialized() + { + node_type* const pNode = DoAllocateNode(); + ((ListNodeBase*)pNode)->insert((ListNodeBase*)internalNode().mpNext); + #if EASTL_LIST_SIZE_CACHE + ++mSize; + #endif + return &pNode->mValue; + } + + + template + inline void list::pop_front() + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(static_cast(internalNode().mpNext) == &internalNode())) + EASTL_FAIL_MSG("list::pop_front -- empty container"); + #endif + + DoErase((ListNodeBase*)internalNode().mpNext); + } + + + template + inline void list::push_back(const value_type& value) + { + DoInsertValue((ListNodeBase*)&internalNode(), value); + } + + + template + inline void list::push_back(value_type&& value) + { + emplace(end(), eastl::move(value)); + } + + + template + inline typename list::reference + list::push_back() + { + node_type* const pNode = DoCreateNode(); + ((ListNodeBase*)pNode)->insert((ListNodeBase*)&internalNode()); + #if EASTL_LIST_SIZE_CACHE + ++mSize; + #endif + return static_cast(internalNode().mpPrev)->mValue; // Same as return back(); + } + + + template + inline void* list::push_back_uninitialized() + { + node_type* const pNode = DoAllocateNode(); + ((ListNodeBase*)pNode)->insert((ListNodeBase*)&internalNode()); + #if EASTL_LIST_SIZE_CACHE + ++mSize; + #endif + return &pNode->mValue; + } + + + template + inline void list::pop_back() + { + #if EASTL_ASSERT_ENABLED + if(EASTL_UNLIKELY(static_cast(internalNode().mpNext) == &internalNode())) + EASTL_FAIL_MSG("list::pop_back -- empty container"); + #endif + + DoErase((ListNodeBase*)internalNode().mpPrev); + } + + + template + template + inline typename list::iterator + list::emplace(const_iterator position, Args&&... args) + { + DoInsertValue(position.mpNode, eastl::forward(args)...); + return iterator(position.mpNode->mpPrev); + } + + + template + inline typename list::iterator + list::insert(const_iterator position) + { + node_type* const pNode = DoCreateNode(value_type()); + ((ListNodeBase*)pNode)->insert((ListNodeBase*)position.mpNode); + #if EASTL_LIST_SIZE_CACHE + ++mSize; + #endif + return (ListNodeBase*)pNode; + } + + + template + inline typename list::iterator + list::insert(const_iterator position, const value_type& value) + { + node_type* const pNode = DoCreateNode(value); + ((ListNodeBase*)pNode)->insert((ListNodeBase*)position.mpNode); + #if EASTL_LIST_SIZE_CACHE + ++mSize; + #endif + return (ListNodeBase*)pNode; + } + + + template + inline typename list::iterator + list::insert(const_iterator position, value_type&& value) + { + return emplace(position, eastl::move(value)); + } + + template + inline typename list::iterator + list::insert(const_iterator position, size_type n, const value_type& value) + { + iterator itPrev(position.mpNode); + --itPrev; + DoInsertValues((ListNodeBase*)position.mpNode, n, value); + return ++itPrev; // Inserts in front of position, returns iterator to new elements. + } + + + template + template + inline typename list::iterator + list::insert(const_iterator position, InputIterator first, InputIterator last) + { + iterator itPrev(position.mpNode); + --itPrev; + DoInsert((ListNodeBase*)position.mpNode, first, last, is_integral()); + return ++itPrev; // Inserts in front of position, returns iterator to new elements. + } + + + template + inline typename list::iterator + list::insert(const_iterator position, std::initializer_list ilist) + { + iterator itPrev(position.mpNode); + --itPrev; + DoInsert((ListNodeBase*)position.mpNode, ilist.begin(), ilist.end(), false_type()); + return ++itPrev; // Inserts in front of position, returns iterator to new elements. + } + + + template + inline typename list::iterator + list::erase(const_iterator position) + { + ++position; + DoErase((ListNodeBase*)position.mpNode->mpPrev); + return iterator(position.mpNode); + } + + + template + typename list::iterator + list::erase(const_iterator first, const_iterator last) + { + while(first != last) + first = erase(first); + return iterator(last.mpNode); + } + + + template + inline typename list::reverse_iterator + list::erase(const_reverse_iterator position) + { + return reverse_iterator(erase((++position).base())); + } + + + template + typename list::reverse_iterator + list::erase(const_reverse_iterator first, const_reverse_iterator last) + { + // Version which erases in order from first to last. + // difference_type i(first.base() - last.base()); + // while(i--) + // first = erase(first); + // return first; + + // Version which erases in order from last to first, but is slightly more efficient: + const_iterator itLastBase((++last).base()); + const_iterator itFirstBase((++first).base()); + + return reverse_iterator(erase(itLastBase, itFirstBase)); + } + + + template + void list::remove(const value_type& value) + { + iterator current((ListNodeBase*)internalNode().mpNext); + + while(current.mpNode != &internalNode()) + { + if(EASTL_LIKELY(!(*current == value))) + ++current; // We have duplicate '++current' statements here and below, but the logic here forces this. + else + { + ++current; + DoErase((ListNodeBase*)current.mpNode->mpPrev); + } + } + } + + + template + template + inline void list::remove_if(Predicate predicate) + { + for(iterator first((ListNodeBase*)internalNode().mpNext), last((ListNodeBase*)&internalNode()); first != last; ) + { + iterator temp(first); + ++temp; + if(predicate(first.mpNode->mValue)) + DoErase((ListNodeBase*)first.mpNode); + first = temp; + } + } + + + template + inline void list::reverse() EA_NOEXCEPT + { + ((ListNodeBase&)internalNode()).reverse(); + } + + + template + inline void list::splice(const_iterator position, this_type& x) + { + // Splicing operations cannot succeed if the two containers use unequal allocators. + // This issue is not addressed in the C++ 1998 standard but is discussed in the + // LWG defect reports, such as #431. There is no simple solution to this problem. + // One option is to throw an exception. Another option which probably captures the + // user intent most of the time is to copy the range from the source to the dest and + // remove it from the source. + + if(internalAllocator() == x.internalAllocator()) + { + #if EASTL_LIST_SIZE_CACHE + if(x.mSize) + { + ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)x.internalNode().mpNext, (ListNodeBase*)&x.internalNode()); + mSize += x.mSize; + x.mSize = 0; + } + #else + if(!x.empty()) + ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)x.internalNode().mpNext, (ListNodeBase*)&x.internalNode()); + #endif + } + else + { + insert(position, x.begin(), x.end()); + x.clear(); + } + } + + template + inline void list::splice(const_iterator position, this_type&& x) + { + return splice(position, x); // This will call splice(const_iterator, const this_type&); + } + + + template + inline void list::splice(const_iterator position, list& x, const_iterator i) + { + if(internalAllocator() == x.internalAllocator()) + { + iterator i2(i.mpNode); + ++i2; + if((position != i) && (position != i2)) + { + ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)i.mpNode, (ListNodeBase*)i2.mpNode); + + #if EASTL_LIST_SIZE_CACHE + ++mSize; + --x.mSize; + #endif + } + } + else + { + insert(position, *i); + x.erase(i); + } + } + + + template + inline void list::splice(const_iterator position, list&& x, const_iterator i) + { + return splice(position, x, i); // This will call splice(const_iterator, const this_type&, const_iterator); + } + + + template + inline void list::splice(const_iterator position, this_type& x, const_iterator first, const_iterator last) + { + if(internalAllocator() == x.internalAllocator()) + { + #if EASTL_LIST_SIZE_CACHE + const size_type n = (size_type)eastl::distance(first, last); + + if(n) + { + ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)first.mpNode, (ListNodeBase*)last.mpNode); + mSize += n; + x.mSize -= n; + } + #else + if(first != last) + ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)first.mpNode, (ListNodeBase*)last.mpNode); + #endif + } + else + { + insert(position, first, last); + x.erase(first, last); + } + } + + + template + inline void list::splice(const_iterator position, list&& x, const_iterator first, const_iterator last) + { + return splice(position, x, first, last); // This will call splice(const_iterator, const this_type&, const_iterator, const_iterator); + } + + + template + inline void list::swap(this_type& x) + { + if(internalAllocator() == x.internalAllocator()) // If allocators are equivalent... + DoSwap(x); + else // else swap the contents. + { + const this_type temp(*this); // Can't call eastl::swap because that would + *this = x; // itself call this member swap function. + x = temp; + } + } + + + template + void list::merge(this_type& x) + { + if(this != &x) + { + iterator first(begin()); + iterator firstX(x.begin()); + const iterator last(end()); + const iterator lastX(x.end()); + + while((first != last) && (firstX != lastX)) + { + if(*firstX < *first) + { + iterator next(firstX); + + splice(first, x, firstX, ++next); + firstX = next; + } + else + ++first; + } + + if(firstX != lastX) + splice(last, x, firstX, lastX); + } + } + + + template + void list::merge(this_type&& x) + { + return merge(x); // This will call merge(this_type&) + } + + + template + template + void list::merge(this_type& x, Compare compare) + { + if(this != &x) + { + iterator first(begin()); + iterator firstX(x.begin()); + const iterator last(end()); + const iterator lastX(x.end()); + + while((first != last) && (firstX != lastX)) + { + if(compare(*firstX, *first)) + { + iterator next(firstX); + + splice(first, x, firstX, ++next); + firstX = next; + } + else + ++first; + } + + if(firstX != lastX) + splice(last, x, firstX, lastX); + } + } + + + template + template + void list::merge(this_type&& x, Compare compare) + { + return merge(x, compare); // This will call merge(this_type&, Compare) + } + + + template + void list::unique() + { + iterator first(begin()); + const iterator last(end()); + + if(first != last) + { + iterator next(first); + + while(++next != last) + { + if(*first == *next) + DoErase((ListNodeBase*)next.mpNode); + else + first = next; + next = first; + } + } + } + + + template + template + void list::unique(BinaryPredicate predicate) + { + iterator first(begin()); + const iterator last(end()); + + if(first != last) + { + iterator next(first); + + while(++next != last) + { + if(predicate(*first, *next)) + DoErase((ListNodeBase*)next.mpNode); + else + first = next; + next = first; + } + } + } + + + template + void list::sort() + { + eastl::less compare; + DoSort(begin(), end(), size(), compare); + } + + + template + template + void list::sort(Compare compare) + { + DoSort(begin(), end(), size(), compare); + } + + + template + template + typename list::iterator + list::DoSort(iterator i1, iterator end2, size_type n, Compare& compare) + { + // A previous version of this function did this by creating temporary lists, + // but that was incompatible with fixed_list because the sizes could be too big. + // We sort subsegments by recursive descent. Then merge as we ascend. + // Return an iterator to the beginning of the sorted subsegment. + // Start with a special case for small node counts. + switch (n) + { + case 0: + case 1: + return i1; + + case 2: + // Potentialy swap these two nodes and return the resulting first of them. + if(compare(*--end2, *i1)) + { + end2.mpNode->remove(); + end2.mpNode->insert(i1.mpNode); + return end2; + } + return i1; + + case 3: + { + // We do a list insertion sort. Measurements showed this improved performance 3-12%. + iterator lowest = i1; + + for(iterator current = i1.next(); current != end2; ++current) + { + if(compare(*current, *lowest)) + lowest = current; + } + + if(lowest == i1) + ++i1; + else + { + lowest.mpNode->remove(); + lowest.mpNode->insert(i1.mpNode); + } + + if(compare(*--end2, *i1)) // At this point, i1 refers to the second element in this three element segment. + { + end2.mpNode->remove(); + end2.mpNode->insert(i1.mpNode); + } + + return lowest; + } + } + + // Divide the range into two parts are recursively sort each part. Upon return we will have + // two halves that are each sorted but we'll need to merge the two together before returning. + iterator result; + size_type nMid = (n / 2); + iterator end1 = eastl::next(i1, (difference_type)nMid); + i1 = DoSort(i1, end1, nMid, compare); // Return the new beginning of the first sorted sub-range. + iterator i2 = DoSort(end1, end2, n - nMid, compare); // Return the new beginning of the second sorted sub-range. + + // If the start of the second list is before the start of the first list, insert the first list + // into the second at an appropriate starting place. + if(compare(*i2, *i1)) + { + // Find the position to insert the first list into the second list. + iterator ix = i2.next(); + while((ix != end2) && compare(*ix, *i1)) + ++ix; + + // Cut out the initial segment of the second list and move it to be in front of the first list. + ListNodeBase* i2Cut = i2.mpNode; + ListNodeBase* i2CutLast = ix.mpNode->mpPrev; + result = i2; + end1 = i2 = ix; + ListNodeBase::remove_range(i2Cut, i2CutLast); + i1.mpNode->insert_range(i2Cut, i2CutLast); + } + else + { + result = i1; + end1 = i2; + } + + // Merge the two segments. We do this by merging the second sub-segment into the first, by walking forward in each of the two sub-segments. + for(++i1; (i1 != end1) && (i2 != end2); ++i1) // while still working on either segment... + { + if(compare(*i2, *i1)) // If i2 is less than i1 and it needs to be merged in front of i1... + { + // Find the position to insert the i2 list into the i1 list. + iterator ix = i2.next(); + while((ix != end2) && compare(*ix, *i1)) + ++ix; + + // Cut this section of the i2 sub-segment out and merge into the appropriate place in the i1 list. + ListNodeBase* i2Cut = i2.mpNode; + ListNodeBase* i2CutLast = ix.mpNode->mpPrev; + if(end1 == i2) + end1 = ix; + i2 = ix; + ListNodeBase::remove_range(i2Cut, i2CutLast); + i1.mpNode->insert_range(i2Cut, i2CutLast); + } + } + + return result; + } + + + template + template + inline typename list::node_type* + list::DoCreateNode(Args&&... args) + { + node_type* const pNode = DoAllocateNode(); // pNode is of type node_type, but it's uninitialized memory. + + #if EASTL_EXCEPTIONS_ENABLED + try + { + ::new((void*)&pNode->mValue) value_type(eastl::forward(args)...); + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #else + ::new((void*)&pNode->mValue) value_type(eastl::forward(args)...); + #endif + + return pNode; + } + + + template + inline typename list::node_type* + list::DoCreateNode() + { + node_type* const pNode = DoAllocateNode(); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + ::new((void*)&pNode->mValue) value_type(); + } + catch(...) + { + DoFreeNode(pNode); + throw; + } + #else + ::new((void*)&pNode->mValue) value_type; + #endif + + return pNode; + } + + + template + template + inline void list::DoAssign(Integer n, Integer value, true_type) + { + DoAssignValues(static_cast(n), static_cast(value)); + } + + + template + template + void list::DoAssign(InputIterator first, InputIterator last, false_type) + { + node_type* pNode = static_cast(internalNode().mpNext); + + for(; (pNode != &internalNode()) && (first != last); ++first) + { + pNode->mValue = *first; + pNode = static_cast(pNode->mpNext); + } + + if(first == last) + erase(const_iterator((ListNodeBase*)pNode), (ListNodeBase*)&internalNode()); + else + DoInsert((ListNodeBase*)&internalNode(), first, last, false_type()); + } + + + template + void list::DoAssignValues(size_type n, const value_type& value) + { + node_type* pNode = static_cast(internalNode().mpNext); + + for(; (pNode != &internalNode()) && (n > 0); --n) + { + pNode->mValue = value; + pNode = static_cast(pNode->mpNext); + } + + if(n) + DoInsertValues((ListNodeBase*)&internalNode(), n, value); + else + erase(const_iterator((ListNodeBase*)pNode), (ListNodeBase*)&internalNode()); + } + + + template + template + inline void list::DoInsert(ListNodeBase* pNode, Integer n, Integer value, true_type) + { + DoInsertValues(pNode, static_cast(n), static_cast(value)); + } + + + template + template + inline void list::DoInsert(ListNodeBase* pNode, InputIterator first, InputIterator last, false_type) + { + for(; first != last; ++first) + DoInsertValue(pNode, *first); + } + + + template + inline void list::DoInsertValues(ListNodeBase* pNode, size_type n, const value_type& value) + { + for(; n > 0; --n) + DoInsertValue(pNode, value); + } + + + template + template + inline void list::DoInsertValue(ListNodeBase* pNode, Args&&... args) + { + node_type* const pNodeNew = DoCreateNode(eastl::forward(args)...); + ((ListNodeBase*)pNodeNew)->insert(pNode); + #if EASTL_LIST_SIZE_CACHE + ++mSize; + #endif + } + + + template + inline void list::DoErase(ListNodeBase* pNode) + { + pNode->remove(); + ((node_type*)pNode)->~node_type(); + DoFreeNode(((node_type*)pNode)); + #if EASTL_LIST_SIZE_CACHE + --mSize; + #endif + + /* Test version that uses union intermediates + union + { + ListNodeBase* mpBase; + node_type* mpNode; + } node = { pNode }; + + node.mpNode->~node_type(); + node.mpBase->remove(); + DoFreeNode(node.mpNode); + #if EASTL_LIST_SIZE_CACHE + --mSize; + #endif + */ + } + + + template + inline void list::DoSwap(this_type& x) + { + ListNodeBase::swap((ListNodeBase&)internalNode(), (ListNodeBase&)x.internalNode()); // We need to implement a special swap because we can't do a shallow swap. + eastl::swap(internalAllocator(), x.internalAllocator()); // We do this even if EASTL_ALLOCATOR_COPY_ENABLED is 0. + #if EASTL_LIST_SIZE_CACHE + eastl::swap(mSize, x.mSize); + #endif + } + + + template + inline bool list::validate() const + { + #if EASTL_LIST_SIZE_CACHE + size_type n = 0; + + for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i) + ++n; + + if(n != mSize) + return false; + #endif + + // To do: More validation. + return true; + } + + + template + inline int list::validate_iterator(const_iterator i) const + { + // To do: Come up with a more efficient mechanism of doing this. + + for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp) + { + if(temp == i) + return (isf_valid | isf_current | isf_can_dereference); + } + + if(i == end()) + return (isf_valid | isf_current); + + return isf_none; + } + + + + /////////////////////////////////////////////////////////////////////// + // global operators + /////////////////////////////////////////////////////////////////////// + + template + bool operator==(const list& a, const list& b) + { + typename list::const_iterator ia = a.begin(); + typename list::const_iterator ib = b.begin(); + typename list::const_iterator enda = a.end(); + + #if EASTL_LIST_SIZE_CACHE + if(a.size() == b.size()) + { + while((ia != enda) && (*ia == *ib)) + { + ++ia; + ++ib; + } + return (ia == enda); + } + return false; + #else + typename list::const_iterator endb = b.end(); + + while((ia != enda) && (ib != endb) && (*ia == *ib)) + { + ++ia; + ++ib; + } + return (ia == enda) && (ib == endb); + #endif + } + + template + bool operator<(const list& a, const list& b) + { + return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end()); + } + + template + bool operator!=(const list& a, const list& b) + { + return !(a == b); + } + + template + bool operator>(const list& a, const list& b) + { + return b < a; + } + + template + bool operator<=(const list& a, const list& b) + { + return !(b < a); + } + + template + bool operator>=(const list& a, const list& b) + { + return !(a < b); + } + + template + void swap(list& a, list& b) + { + a.swap(b); + } + + + /////////////////////////////////////////////////////////////////////// + // erase / erase_if + // + // https://en.cppreference.com/w/cpp/container/list/erase2 + /////////////////////////////////////////////////////////////////////// + template + void erase(list& c, const U& value) + { + // Erases all elements that compare equal to value from the container. + c.remove_if([&](auto& elem) { return elem == value; }); + } + + template + void erase_if(list& c, Predicate predicate) + { + // Erases all elements that satisfy the predicate pred from the container. + c.remove_if(predicate); + } + + +} // namespace eastl + + +EA_RESTORE_SN_WARNING() + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +#endif // Header include guard + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/include/EASTL/map.h b/include/EASTL/map.h new file mode 100644 index 0000000..0e6c1d0 --- /dev/null +++ b/include/EASTL/map.h @@ -0,0 +1,684 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_MAP_H +#define EASTL_MAP_H + + +#include +#include +#include +#include + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + + +namespace eastl +{ + + /// EASTL_MAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_MAP_DEFAULT_NAME + #define EASTL_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " map" // Unless the user overrides something, this is "EASTL map". + #endif + + + /// EASTL_MULTIMAP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_MULTIMAP_DEFAULT_NAME + #define EASTL_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " multimap" // Unless the user overrides something, this is "EASTL multimap". + #endif + + + /// EASTL_MAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_MAP_DEFAULT_ALLOCATOR + #define EASTL_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_MAP_DEFAULT_NAME) + #endif + + /// EASTL_MULTIMAP_DEFAULT_ALLOCATOR + /// + #ifndef EASTL_MULTIMAP_DEFAULT_ALLOCATOR + #define EASTL_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_MULTIMAP_DEFAULT_NAME) + #endif + + + + /// map + /// + /// Implements a canonical map. + /// + /// The large majority of the implementation of this class is found in the rbtree + /// base class. We control the behaviour of rbtree via template parameters. + /// + /// Pool allocation + /// If you want to make a custom memory pool for a map container, your pool + /// needs to contain items of type map::node_type. So if you have a memory + /// pool that has a constructor that takes the size of pool items and the + /// count of pool items, you would do this (assuming that MemoryPool implements + /// the Allocator interface): + /// typedef map, MemoryPool> WidgetMap; // Delare your WidgetMap type. + /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes. + /// WidgetMap myMap(&myPool); // Create a map that uses the pool. + /// + template , typename Allocator = EASTLAllocatorType> + class map + : public rbtree, Compare, Allocator, eastl::use_first >, true, true> + { + public: + typedef rbtree, Compare, Allocator, + eastl::use_first >, true, true> base_type; + typedef map this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::key_type key_type; + typedef T mapped_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::insert_return_type insert_return_type; + typedef typename base_type::extract_key extract_key; + // Other types are inherited from the base class. + + using base_type::begin; + using base_type::end; + using base_type::find; + using base_type::lower_bound; + using base_type::upper_bound; + using base_type::insert; + using base_type::erase; + + protected: + using base_type::compare; + using base_type::get_compare; + + public: + class value_compare + { + protected: + friend class map; + Compare compare; + value_compare(Compare c) : compare(c) {} + + public: + typedef bool result_type; + typedef value_type first_argument_type; + typedef value_type second_argument_type; + + bool operator()(const value_type& x, const value_type& y) const + { return compare(x.first, y.first); } + }; + + public: + map(const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR); + map(const Compare& compare, const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR); + map(const this_type& x); + map(this_type&& x); + map(this_type&& x, const allocator_type& allocator); + map(std::initializer_list ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR); + + template + map(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To consider: Make a second version of this function without a default arg. + + this_type& operator=(const this_type& x) { return (this_type&)base_type::operator=(x); } + this_type& operator=(std::initializer_list ilist) { return (this_type&)base_type::operator=(ilist); } + this_type& operator=(this_type&& x) { return (this_type&)base_type::operator=(eastl::move(x)); } + + public: + /// This is an extension to the C++ standard. We insert a default-constructed + /// element with the given key. The reason for this is that we can avoid the + /// potentially expensive operation of creating and/or copying a mapped_type + /// object on the stack. Note that C++11 move insertions and variadic emplace + /// support make this extension mostly no longer necessary. + insert_return_type insert(const Key& key); + + value_compare value_comp() const; + + size_type erase(const Key& key); + size_type count(const Key& key) const; + + eastl::pair equal_range(const Key& key); + eastl::pair equal_range(const Key& key) const; + + T& operator[](const Key& key); // Of map, multimap, set, and multimap, only map has operator[]. + T& operator[](Key&& key); + + T& at(const Key& key); + const T& at(const Key& key) const; + + }; // map + + + + + + + /// multimap + /// + /// Implements a canonical multimap. + /// + /// The large majority of the implementation of this class is found in the rbtree + /// base class. We control the behaviour of rbtree via template parameters. + /// + /// Pool allocation + /// If you want to make a custom memory pool for a multimap container, your pool + /// needs to contain items of type multimap::node_type. So if you have a memory + /// pool that has a constructor that takes the size of pool items and the + /// count of pool items, you would do this (assuming that MemoryPool implements + /// the Allocator interface): + /// typedef multimap, MemoryPool> WidgetMap; // Delare your WidgetMap type. + /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes. + /// WidgetMap myMap(&myPool); // Create a map that uses the pool. + /// + template , typename Allocator = EASTLAllocatorType> + class multimap + : public rbtree, Compare, Allocator, eastl::use_first >, true, false> + { + public: + typedef rbtree, Compare, Allocator, + eastl::use_first >, true, false> base_type; + typedef multimap this_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::key_type key_type; + typedef T mapped_type; + typedef typename base_type::value_type value_type; + typedef typename base_type::node_type node_type; + typedef typename base_type::iterator iterator; + typedef typename base_type::const_iterator const_iterator; + typedef typename base_type::allocator_type allocator_type; + typedef typename base_type::insert_return_type insert_return_type; + typedef typename base_type::extract_key extract_key; + // Other types are inherited from the base class. + + using base_type::begin; + using base_type::end; + using base_type::find; + using base_type::lower_bound; + using base_type::upper_bound; + using base_type::insert; + using base_type::erase; + + protected: + using base_type::compare; + using base_type::get_compare; + + public: + class value_compare + { + protected: + friend class multimap; + Compare compare; + value_compare(Compare c) : compare(c) {} + + public: + typedef bool result_type; + typedef value_type first_argument_type; + typedef value_type second_argument_type; + + bool operator()(const value_type& x, const value_type& y) const + { return compare(x.first, y.first); } + }; + + public: + multimap(const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR); + multimap(const Compare& compare, const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR); + multimap(const this_type& x); + multimap(this_type&& x); + multimap(this_type&& x, const allocator_type& allocator); + multimap(std::initializer_list ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR); + + template + multimap(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To consider: Make a second version of this function without a default arg. + + this_type& operator=(const this_type& x) { return (this_type&)base_type::operator=(x); } + this_type& operator=(std::initializer_list ilist) { return (this_type&)base_type::operator=(ilist); } + this_type& operator=(this_type&& x) { return (this_type&)base_type::operator=(eastl::move(x)); } + + public: + /// This is an extension to the C++ standard. We insert a default-constructed + /// element with the given key. The reason for this is that we can avoid the + /// potentially expensive operation of creating and/or copying a mapped_type + /// object on the stack. Note that C++11 move insertions and variadic emplace + /// support make this extension mostly no longer necessary. + insert_return_type insert(const Key& key); + + value_compare value_comp() const; + + size_type erase(const Key& key); + size_type count(const Key& key) const; + + eastl::pair equal_range(const Key& key); + eastl::pair equal_range(const Key& key) const; + + /// equal_range_small + /// This is a special version of equal_range which is optimized for the + /// case of there being few or no duplicated keys in the tree. + eastl::pair equal_range_small(const Key& key); + eastl::pair equal_range_small(const Key& key) const; + + private: + // these base member functions are not included in multimaps + using base_type::try_emplace; + using base_type::insert_or_assign; + }; // multimap + + + + + + /////////////////////////////////////////////////////////////////////// + // map + /////////////////////////////////////////////////////////////////////// + + template + inline map::map(const allocator_type& allocator) + : base_type(allocator) + { + } + + + template + inline map::map(const Compare& compare, const allocator_type& allocator) + : base_type(compare, allocator) + { + } + + + template + inline map::map(const this_type& x) + : base_type(x) + { + } + + + template + inline map::map(this_type&& x) + : base_type(eastl::move(x)) + { + } + + template + inline map::map(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + template + inline map::map(std::initializer_list ilist, const Compare& compare, const allocator_type& allocator) + : base_type(ilist.begin(), ilist.end(), compare, allocator) + { + } + + + template + template + inline map::map(Iterator itBegin, Iterator itEnd) + : base_type(itBegin, itEnd, Compare(), EASTL_MAP_DEFAULT_ALLOCATOR) + { + } + + + template + inline typename map::insert_return_type + map::insert(const Key& key) + { + return base_type::DoInsertKey(true_type(), key); + } + + + template + inline typename map::value_compare + map::value_comp() const + { + return value_compare(get_compare()); + } + + + template + inline typename map::size_type + map::erase(const Key& key) + { + const iterator it(find(key)); + + if(it != end()) // If it exists... + { + base_type::erase(it); + return 1; + } + return 0; + } + + + template + inline typename map::size_type + map::count(const Key& key) const + { + const const_iterator it(find(key)); + return (it != end()) ? 1 : 0; + } + + + template + inline eastl::pair::iterator, + typename map::iterator> + map::equal_range(const Key& key) + { + // The resulting range will either be empty or have one element, + // so instead of doing two tree searches (one for lower_bound and + // one for upper_bound), we do just lower_bound and see if the + // result is a range of size zero or one. + const iterator itLower(lower_bound(key)); + + if((itLower == end()) || compare(key, itLower.mpNode->mValue.first)) // If at the end or if (key is < itLower)... + return eastl::pair(itLower, itLower); + + iterator itUpper(itLower); + return eastl::pair(itLower, ++itUpper); + } + + + template + inline eastl::pair::const_iterator, + typename map::const_iterator> + map::equal_range(const Key& key) const + { + // See equal_range above for comments. + const const_iterator itLower(lower_bound(key)); + + if((itLower == end()) || compare(key, itLower.mpNode->mValue.first)) // If at the end or if (key is < itLower)... + return eastl::pair(itLower, itLower); + + const_iterator itUpper(itLower); + return eastl::pair(itLower, ++itUpper); + } + + + template + inline T& map::operator[](const Key& key) + { + iterator itLower(lower_bound(key)); // itLower->first is >= key. + + if((itLower == end()) || compare(key, (*itLower).first)) + { + itLower = base_type::DoInsertKey(true_type(), itLower, key); + } + + return (*itLower).second; + + // Reference implementation of this function, which may not be as fast: + //iterator it(base_type::insert(eastl::pair(key, T())).first); + //return it->second; + } + + + template + inline T& map::operator[](Key&& key) + { + iterator itLower(lower_bound(key)); // itLower->first is >= key. + + if((itLower == end()) || compare(key, (*itLower).first)) + { + itLower = base_type::DoInsertKey(true_type(), itLower, eastl::move(key)); + } + + return (*itLower).second; + + // Reference implementation of this function, which may not be as fast: + //iterator it(base_type::insert(eastl::pair(key, T())).first); + //return it->second; + } + + + template + inline T& map::at(const Key& key) + { + iterator itLower(lower_bound(key)); // itLower->first is >= key. + + if(itLower == end()) + { + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("map::at key does not exist"); + #else + EASTL_FAIL_MSG("map::at key does not exist"); + #endif + } + + return (*itLower).second; + } + + + template + inline const T& map::at(const Key& key) const + { + const_iterator itLower(lower_bound(key)); // itLower->first is >= key. + + if(itLower == end()) + { + #if EASTL_EXCEPTIONS_ENABLED + throw std::out_of_range("map::at key does not exist"); + #else + EASTL_FAIL_MSG("map::at key does not exist"); + #endif + } + + return (*itLower).second; + } + + + /////////////////////////////////////////////////////////////////////// + // erase_if + // + // https://en.cppreference.com/w/cpp/container/map/erase_if + /////////////////////////////////////////////////////////////////////// + template + void erase_if(map& c, Predicate predicate) + { + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + } + + + /////////////////////////////////////////////////////////////////////// + // multimap + /////////////////////////////////////////////////////////////////////// + + template + inline multimap::multimap(const allocator_type& allocator) + : base_type(allocator) + { + } + + + template + inline multimap::multimap(const Compare& compare, const allocator_type& allocator) + : base_type(compare, allocator) + { + } + + + template + inline multimap::multimap(const this_type& x) + : base_type(x) + { + } + + + template + inline multimap::multimap(this_type&& x) + : base_type(eastl::move(x)) + { + } + + template + inline multimap::multimap(this_type&& x, const allocator_type& allocator) + : base_type(eastl::move(x), allocator) + { + } + + + template + inline multimap::multimap(std::initializer_list ilist, const Compare& compare, const allocator_type& allocator) + : base_type(ilist.begin(), ilist.end(), compare, allocator) + { + } + + + template + template + inline multimap::multimap(Iterator itBegin, Iterator itEnd) + : base_type(itBegin, itEnd, Compare(), EASTL_MULTIMAP_DEFAULT_ALLOCATOR) + { + } + + + template + inline typename multimap::insert_return_type + multimap::insert(const Key& key) + { + return base_type::DoInsertKey(false_type(), key); + } + + + template + inline typename multimap::value_compare + multimap::value_comp() const + { + return value_compare(get_compare()); + } + + + template + inline typename multimap::size_type + multimap::erase(const Key& key) + { + const eastl::pair range(equal_range(key)); + const size_type n = (size_type)eastl::distance(range.first, range.second); + base_type::erase(range.first, range.second); + return n; + } + + + template + inline typename multimap::size_type + multimap::count(const Key& key) const + { + const eastl::pair range(equal_range(key)); + return (size_type)eastl::distance(range.first, range.second); + } + + + template + inline eastl::pair::iterator, + typename multimap::iterator> + multimap::equal_range(const Key& key) + { + // There are multiple ways to implement equal_range. The implementation mentioned + // in the C++ standard and which is used by most (all?) commercial STL implementations + // is this: + // return eastl::pair(lower_bound(key), upper_bound(key)); + // + // This does two tree searches -- one for the lower bound and one for the + // upper bound. This works well for the case whereby you have a large container + // and there are lots of duplicated values. We provide an alternative version + // of equal_range called equal_range_small for cases where the user is confident + // that the number of duplicated items is only a few. + + return eastl::pair(lower_bound(key), upper_bound(key)); + } + + + template + inline eastl::pair::const_iterator, + typename multimap::const_iterator> + multimap::equal_range(const Key& key) const + { + // See comments above in the non-const version of equal_range. + return eastl::pair(lower_bound(key), upper_bound(key)); + } + + + template + inline eastl::pair::iterator, + typename multimap::iterator> + multimap::equal_range_small(const Key& key) + { + // We provide alternative version of equal_range here which works faster + // for the case where there are at most small number of potential duplicated keys. + const iterator itLower(lower_bound(key)); + iterator itUpper(itLower); + + while((itUpper != end()) && !compare(key, itUpper.mpNode->mValue.first)) + ++itUpper; + + return eastl::pair(itLower, itUpper); + } + + + template + inline eastl::pair::const_iterator, + typename multimap::const_iterator> + multimap::equal_range_small(const Key& key) const + { + // We provide alternative version of equal_range here which works faster + // for the case where there are at most small number of potential duplicated keys. + const const_iterator itLower(lower_bound(key)); + const_iterator itUpper(itLower); + + while((itUpper != end()) && !compare(key, itUpper.mpNode->mValue.first)) + ++itUpper; + + return eastl::pair(itLower, itUpper); + } + + + + /////////////////////////////////////////////////////////////////////// + // erase_if + // + // https://en.cppreference.com/w/cpp/container/multimap/erase_if + /////////////////////////////////////////////////////////////////////// + template + void erase_if(multimap& c, Predicate predicate) + { + // Erases all elements that satisfy the predicate pred from the container. + for (auto i = c.begin(), last = c.end(); i != last;) + { + if (predicate(*i)) + { + i = c.erase(i); + } + else + { + ++i; + } + } + } + +} // namespace eastl + + +#endif // Header include guard + + + + diff --git a/include/EASTL/memory.h b/include/EASTL/memory.h new file mode 100644 index 0000000..6d6b8a3 --- /dev/null +++ b/include/EASTL/memory.h @@ -0,0 +1,1702 @@ +/////////////////////////////////////////////////////////////////////////////// +// Copyright (c) Electronic Arts Inc. All rights reserved. +/////////////////////////////////////////////////////////////////////////////// + +/////////////////////////////////////////////////////////////////////////////// +// This file implements the following functions from the C++ standard that +// are found in the header: +// +// Temporary memory: +// get_temporary_buffer +// return_temporary_buffer +// +// Utility: +// late_constructed - Extention to standard functionality. +// +// Uninitialized operations: +// These are the same as the copy, fill, and fill_n algorithms, except that +// they *construct* the destination with the source values rather than assign +// the destination with the source values. +// +// uninitialized_copy +// uninitialized_copy_n +// uninitialized_default_construct +// uninitialized_default_construct_n +// uninitialized_move +// uninitialized_move_if_noexcept - Extention to standard functionality. +// uninitialized_move_n +// uninitialized_fill +// uninitialized_fill_n +// uninitialized_value_construct +// uninitialized_value_construct_n +// uninitialized_default_fill - Extention to standard functionality. +// uninitialized_default_fill_n - Extention to standard functionality. +// uninitialized_relocate - Extention to standard functionality. +// uninitialized_copy_ptr - Extention to standard functionality. +// uninitialized_move_ptr - Extention to standard functionality. +// uninitialized_move_ptr_if_noexcept- Extention to standard functionality. +// uninitialized_fill_ptr - Extention to standard functionality. +// uninitialized_fill_n_ptr - Extention to standard functionality. +// uninitialized_copy_fill - Extention to standard functionality. +// uninitialized_fill_copy - Extention to standard functionality. +// uninitialized_copy_copy - Extention to standard functionality. +// +// In-place destructor helpers: +// destruct(T*) - Non-standard extension. +// destruct(first, last) - Non-standard extension. +// destroy_at(T*) +// destroy(first, last) +// destroy_n(first, n) +// +// Alignment +// align +// align_advance - Extention to standard functionality. +// +// Allocator-related +// uses_allocator +// allocator_arg_t +// allocator_arg +// +// Pointers +// pointer_traits +// +/////////////////////////////////////////////////////////////////////////////// + + +#ifndef EASTL_MEMORY_H +#define EASTL_MEMORY_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +EA_DISABLE_ALL_VC_WARNINGS() +#include +#include +EA_RESTORE_ALL_VC_WARNINGS() + +#ifdef _MSC_VER + #pragma warning(push) + #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc + #pragma warning(disable: 4146) // unary minus operator applied to unsigned type, result still unsigned + #pragma warning(disable: 4571) // catch(...) semantics changed since Visual C++ 7.1; structured exceptions (SEH) are no longer caught. +#endif + +#if defined(EA_PRAGMA_ONCE_SUPPORTED) + #pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result. +#endif + + +namespace eastl +{ + + /// EASTL_TEMP_DEFAULT_NAME + /// + /// Defines a default container name in the absence of a user-provided name. + /// + #ifndef EASTL_TEMP_DEFAULT_NAME + #define EASTL_TEMP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " temp" // Unless the user overrides something, this is "EASTL temp". + #endif + + + /// get_temporary_buffer + /// + /// From the C++ standard, section 20.4.3: + /// 1 Effects: Obtains a pointer to storage sufficient to store up to n adjacent T objects. + /// 2 Returns: A pair containing the buffer's address and capacity (in the units of sizeof(T)), + /// or a pair of 0 values if no storage can be obtained. + /// + /// Note: The return value is space to hold T elements, but no T elements are constructed. + /// + /// Our implementation here differs slightly in that we have alignment, alignmentOffset, and pName arguments. + /// Note that you can use the EASTL_NAME_VAL macro to make names go away in release builds. + /// + /// Example usage: + /// pair pr = get_temporary_buffer(100, 0, 0, EASTL_NAME_VAL("Temp int array")); + /// memset(pr.first, 0, 100 * sizeof(int)); + /// return_temporary_buffer(pr.first); + /// + template + eastl::pair get_temporary_buffer(ptrdiff_t n, size_t alignment = 1, size_t alignmentOffset = 0, const char* pName = EASTL_TEMP_DEFAULT_NAME) + { + EASTLAllocatorType allocator(*EASTLAllocatorDefault(), pName); + return eastl::pair(static_cast(EASTLAllocAligned(allocator, n * sizeof(T), alignment, alignmentOffset)), n); + } + + + /// return_temporary_buffer + /// + /// From the C++ standard, section 20.4.3: + /// 3 Effects: Deallocates the buffer to which p points. + /// 4 Requires: The buffer shall have been previously allocated by get_temporary_buffer. + /// + /// Note: This function merely frees space and does not destruct any T elements. + /// + /// Example usage: + /// pair pr = get_temporary_buffer(300); + /// memset(pr.first, 0, 300 * sizeof(int)); + /// return_temporary_buffer(pr.first, pr.second); + /// + template + void return_temporary_buffer(T* p, ptrdiff_t n = 0) + { + EASTLAllocatorType& allocator(*EASTLAllocatorDefault()); + EASTLFree(allocator, p, n * sizeof(T)); + } + + + + /// late_constructed + /// + /// Implements a smart pointer type which separates the memory allocation of an object from + /// the object's construction. The primary use case is to declare a global variable of the + /// late_construction type, which allows the memory to be global but the constructor executes + /// at some point after main() begins as opposed to before main, which is often dangerous + /// for non-trivial types. + /// + /// The autoConstruct template parameter controls whether the object is automatically default + /// constructed upon first reference or must be manually constructed upon the first use of + /// operator * or ->. autoConstruct is convenient but it causes * and -> to be slightly slower + /// and may result in construction at an inconvenient time. + /// + /// The autoDestruct template parameter controls whether the object, if constructed, is automatically + /// destructed when ~late_constructed() is called or must be manually destructed via a call to + /// destruct(). + /// + /// While construction can be automatic or manual, automatic destruction support is always present. + /// Thus you aren't required in any case to manually call destruct. However, you may safely manually + /// destruct the object at any time before the late_constructed destructor is executed. + /// + /// You may still use late_constructed after calling destruct(), including calling construct() + /// again to reconstruct the instance. destruct returns the late_constructed instance to a + /// state equivalent to before construct was called. + /// + /// Caveat: While late_constructed instances can be declared in global scope and initialize + /// prior to main() executing, you cannot otherwise use such globally declared instances prior + /// to main with guaranteed behavior unless you can ensure that the late_constructed instance + /// is itself constructed prior to your use of it. + /// + /// Example usage (demonstrating manual-construction): + /// late_constructed gWidget; + /// + /// void main(){ + /// gWidget.construct(kScrollbarType, kVertical, "MyScrollbar"); + /// gWidget->SetValue(15); + /// gWidget.destruct(); + /// } + /// + /// Example usage (demonstrating auto-construction): + /// late_constructed gWidget; + /// + /// void main(){ + /// gWidget->SetValue(15); + /// // You may want to call destruct here, but aren't required to do so unless the Widget type requires it. + /// } + /// + template + class late_constructed + { + public: + using this_type = late_constructed; + using value_type = T; + using storage_type = eastl::aligned_storage_t>; + + late_constructed() EA_NOEXCEPT // In the case of the late_constructed instance being at global scope, we rely on the + : mStorage(), mpValue(nullptr) {} // compiler executing this constructor or placing the instance in auto-zeroed-at-startup memory. + + ~late_constructed() + { + if (autoDestruct && mpValue) + (*mpValue).~value_type(); + } + + template + void construct(Args&&... args) + { + if(!mpValue) + mpValue = new (&mStorage) value_type(eastl::forward(args)...); + } + + bool is_constructed() const EA_NOEXCEPT + { return mpValue != nullptr; } + + void destruct() + { + if(mpValue) + { + (*mpValue).~value_type(); + mpValue = nullptr; + } + } + + value_type& operator*() EA_NOEXCEPT + { + if(!mpValue) + construct(); + + EA_ANALYSIS_ASSUME(mpValue); + return *mpValue; + } + + const value_type& operator*() const EA_NOEXCEPT + { + if(!mpValue) + construct(); + + EA_ANALYSIS_ASSUME(mpValue); + return *mpValue; + } + + value_type* operator->() EA_NOEXCEPT + { + if(!mpValue) + construct(); + return mpValue; + } + + const value_type* operator->() const EA_NOEXCEPT + { + if(!mpValue) + construct(); + return mpValue; + } + + value_type* get() EA_NOEXCEPT + { + if(!mpValue) + construct(); + return mpValue; + } + + const value_type* get() const EA_NOEXCEPT + { + if(!mpValue) + construct(); + return mpValue; + } + + protected: + storage_type mStorage; // Declared first because it may have aligment requirements, and it would be more space-efficient if it was first. + value_type* mpValue; + }; + + + // Specialization that doesn't auto-construct on demand. + template + class late_constructed : public late_constructed + { + public: + typedef late_constructed base_type; + + typename base_type::value_type& operator*() EA_NOEXCEPT + { EASTL_ASSERT(base_type::mpValue); return *base_type::mpValue; } + + const typename base_type::value_type& operator*() const EA_NOEXCEPT + { EASTL_ASSERT(base_type::mpValue); return *base_type::mpValue; } + + typename base_type::value_type* operator->() EA_NOEXCEPT + { EASTL_ASSERT(base_type::mpValue); return base_type::mpValue; } + + const typename base_type::value_type* operator->() const EA_NOEXCEPT + { EASTL_ASSERT(base_type::mpValue); return base_type::mpValue; } + + typename base_type::value_type* get() EA_NOEXCEPT + { return base_type::mpValue; } + + const typename base_type::value_type* get() const EA_NOEXCEPT + { return base_type::mpValue; } + }; + + + + /// raw_storage_iterator + /// + /// From the C++11 Standard, section 20.6.10 p1 + /// raw_storage_iterator is provided to enable algorithms to store their results into uninitialized memory. + /// The formal template parameter OutputIterator is required to have its operator* return an object for + /// which operator& is defined and returns a pointer to T, and is also required to satisfy the requirements + /// of an output iterator (24.2.4). + + template + class raw_storage_iterator : public iterator + { + protected: + OutputIterator mIterator; + + public: + explicit raw_storage_iterator(OutputIterator iterator) + : mIterator(iterator) + { + } + + raw_storage_iterator& operator*() + { + return *this; + } + + raw_storage_iterator& operator=(const T& value) + { + ::new(eastl::addressof(*mIterator)) T(value); + return *this; + } + + raw_storage_iterator& operator++() + { + ++mIterator; + return *this; + } + + raw_storage_iterator operator++(int) + { + raw_storage_iterator tempIterator = *this; + ++mIterator; + return tempIterator; + } + }; + + + /// uninitialized_relocate (formerly named uninitialized_move prior to C++11) + /// + /// This utility is deprecated in favor of C++11 rvalue move functionality. + /// + /// uninitialized_relocate takes a constructed sequence of objects and an + /// uninitialized destination buffer. In the case of any exception thrown + /// while moving the objects, any newly constructed objects are guaranteed + /// to be destructed and the input left fully constructed. + /// + /// In the case where you need to do multiple moves atomically, split the + /// calls into uninitialized_relocate_start/abort/commit. + /// + /// uninitialized_relocate_start can possibly throw an exception. If it does, + /// you don't need to do anything. However, if it returns without throwing + /// an exception you need to guarantee that either uninitialized_relocate_abort + /// or uninitialized_relocate_commit is called. + /// + /// Both uninitialized_relocate_abort and uninitialize_move_commit are + /// guaranteed to not throw C++ exceptions. + namespace Internal + { + template + struct uninitialized_relocate_impl + { + template + static ForwardIteratorDest do_move_start(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) + { + typedef typename eastl::iterator_traits::value_type value_type; + + #if EASTL_EXCEPTIONS_ENABLED + ForwardIteratorDest origDest(dest); + try + { + #endif + for(; first != last; ++first, ++dest) + ::new((void*)eastl::addressof(*dest)) value_type(*first); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; origDest < dest; ++origDest) + (*origDest).~value_type(); + throw; + } + #endif + + return dest; + } + + template + static ForwardIteratorDest do_move_commit(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) //throw() + { + typedef typename eastl::iterator_traits::value_type value_type; + for(; first != last; ++first, ++dest) + (*first).~value_type(); + + return dest; + } + + template + static ForwardIteratorDest do_move_abort(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) //throw() + { + typedef typename eastl::iterator_traits::value_type value_type; + for(; first != last; ++first, ++dest) + (*dest).~value_type(); + return dest; + } + }; + + template <> + struct uninitialized_relocate_impl + { + template + static T* do_move_start(T* first, T* last, T* dest) + { + return (T*)memcpy(dest, first, (size_t)((uintptr_t)last - (uintptr_t)first)) + (last - first); + } + + template + static T* do_move_commit(T* first, T* last, T* dest) + { + return dest + (last - first); + } + + template + static T* do_move_abort(T* first, T* last, T* dest) + { + return dest + (last - first); + } + }; + } + + + /// uninitialized_relocate_start, uninitialized_relocate_commit, uninitialized_relocate_abort + /// + /// This utility is deprecated in favor of C++11 rvalue move functionality. + /// + /// After calling uninitialized_relocate_start, if it doesn't throw an exception, + /// both the source and destination iterators point to undefined data. If it + /// does throw an exception, the destination remains uninitialized and the source + /// is as it was before. + /// + /// In order to make the iterators valid again you need to call either uninitialized_relocate_abort + /// or uninitialized_relocate_commit. The abort call makes the original source + /// iterator valid again, and commit makes the destination valid. Both abort + /// and commit are guaranteed to not throw C++ exceptions. + /// + /// Example usage: + /// iterator dest2 = uninitialized_relocate_start(first, last, dest); + /// try { + /// // some code here that might throw an exception + /// } + /// catch(...) + /// { + /// uninitialized_relocate_abort(first, last, dest); + /// throw; + /// } + /// uninitialized_relocate_commit(first, last, dest); + /// + template + inline ForwardIteratorDest uninitialized_relocate_start(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) + { + typedef typename eastl::iterator_traits::iterator_category IC; + typedef typename eastl::iterator_traits::value_type value_type_input; + typedef typename eastl::iterator_traits::value_type value_type_output; + + const bool bHasTrivialMove = type_and::value, + is_pointer::value, + is_pointer::value, + is_same::value>::value; + + return Internal::uninitialized_relocate_impl::do_move_start(first, last, dest); + } + + template + inline ForwardIteratorDest uninitialized_relocate_commit(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) + { + typedef typename eastl::iterator_traits::iterator_category IC; + typedef typename eastl::iterator_traits::value_type value_type_input; + typedef typename eastl::iterator_traits::value_type value_type_output; + + const bool bHasTrivialMove = type_and::value, + is_pointer::value, + is_pointer::value, + is_same::value>::value; + + return Internal::uninitialized_relocate_impl::do_move_commit(first, last, dest); + } + + template + inline ForwardIteratorDest uninitialized_relocate_abort(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) + { + typedef typename eastl::iterator_traits::iterator_category IC; + typedef typename eastl::iterator_traits::value_type value_type_input; + typedef typename eastl::iterator_traits::value_type value_type_output; + + const bool bHasTrivialMove = type_and::value, + is_pointer::value, + is_pointer::value, + is_same::value>::value; + + return Internal::uninitialized_relocate_impl::do_move_abort(first, last, dest); + } + + /// uninitialized_relocate + /// + /// See above for documentation. + /// + template + inline ForwardIteratorDest uninitialized_relocate(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) + { + ForwardIteratorDest result = uninitialized_relocate_start(first, last, dest); + eastl::uninitialized_relocate_commit(first, last, dest); + + return result; + } + + + + + + // uninitialized_copy + // + namespace Internal + { + template + inline ForwardIterator uninitialized_copy_impl(InputIterator first, InputIterator last, ForwardIterator dest, true_type) + { + return eastl::copy(first, last, dest); // The copy() in turn will use memcpy for POD types. + } + + template + inline ForwardIterator uninitialized_copy_impl(InputIterator first, InputIterator last, ForwardIterator dest, false_type) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(dest); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; first != last; ++first, ++currentDest) + ::new(static_cast(eastl::addressof(*currentDest))) value_type(*first); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; dest < currentDest; ++dest) + (*dest).~value_type(); + throw; + } + #endif + + return currentDest; + } + } + + /// uninitialized_copy + /// + /// Copies a source range to a destination, copy-constructing the destination with + /// the source values (and not *assigning* the destination with the source values). + /// Returns the end of the destination range (i.e. dest + (last - first)). + /// + /// Declaration: + /// template + /// ForwardIterator uninitialized_copy(InputIterator sourceFirst, InputIterator sourceLast, ForwardIterator destination); + /// + /// Example usage: + /// SomeClass* pArray = malloc(10 * sizeof(SomeClass)); + /// uninitialized_copy(pSourceDataBegin, pSourceDataBegin + 10, pArray); + /// + template + inline ForwardIterator uninitialized_copy(InputIterator first, InputIterator last, ForwardIterator result) + { + typedef typename eastl::iterator_traits::value_type value_type; + + // We use is_trivial, which in the C++11 Standard means is_trivially_copyable and is_trivially_default_constructible. + return Internal::uninitialized_copy_impl(first, last, result, eastl::is_trivial()); + } + + + /// uninitialized_copy_n + /// + /// Copies count elements from a range beginning at first to an uninitialized memory area + /// beginning at dest. The elements in the uninitialized area are constructed using copy constructor. + /// If an exception is thrown during the initialization, the function has no final effects. + /// + /// first: Beginning of the range of the elements to copy. + /// dest: Beginning of the destination range. + /// return value: Iterator of dest type to the element past the last element copied. + /// + namespace Internal + { + template + struct uninitialized_copy_n_impl + { + static ForwardIterator impl(InputIterator first, Count n, ForwardIterator dest) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(dest); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; n > 0; --n, ++first, ++currentDest) + ::new((void*)(eastl::addressof(*currentDest))) value_type(*first); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; dest < currentDest; ++dest) + (*dest).~value_type(); + throw; + } + #endif + + return currentDest; + } + }; + + template + struct uninitialized_copy_n_impl + { + static inline ForwardIterator impl(InputIterator first, Count n, ForwardIterator dest) + { + return eastl::uninitialized_copy(first, first + n, dest); + } + }; + } + + template + inline ForwardIterator uninitialized_copy_n(InputIterator first, Count n, ForwardIterator dest) + { + typedef typename eastl::iterator_traits::iterator_category IC; + return Internal::uninitialized_copy_n_impl::impl(first, n, dest); + } + + + + /// uninitialized_copy_ptr + /// + /// This is a specialization of uninitialized_copy for iterators that are pointers. We use it because + /// internally it uses generic_iterator to make pointers act like regular eastl::iterator. + /// + template + inline Result uninitialized_copy_ptr(First first, Last last, Result result) + { + typedef typename eastl::iterator_traits >::value_type value_type; + const generic_iterator i(Internal::uninitialized_copy_impl(eastl::generic_iterator(first), // generic_iterator makes a pointer act like an iterator. + eastl::generic_iterator(last), + eastl::generic_iterator(result), + eastl::is_trivially_copy_assignable())); + return i.base(); + } + + + + /// uninitialized_move_ptr + /// + /// This is a specialization of uninitialized_move for iterators that are pointers. We use it because + /// internally it uses generic_iterator to make pointers act like regular eastl::iterator. + /// + namespace Internal + { + template + inline ForwardIterator uninitialized_move_impl(InputIterator first, InputIterator last, ForwardIterator dest, true_type) + { + return eastl::copy(first, last, dest); // The copy() in turn will use memcpy for is_trivially_copy_assignable (e.g. POD) types. + } + + template + inline ForwardIterator uninitialized_move_impl(InputIterator first, InputIterator last, ForwardIterator dest, false_type) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(dest); + + // We must run a loop over every element and move-construct it at the new location. + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; first != last; ++first, ++currentDest) + ::new((void*)eastl::addressof(*currentDest)) value_type(eastl::move(*first)); // If value_type has a move constructor then it will be used here. + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + // We have a problem here: If an exception occurs while doing the loop below then we will + // have values that were moved from the source to the dest that may need to be moved back + // in the catch. What does the C++11 Standard say about this? And what happens if there's an + // exception while moving them back? We may want to trace through a conforming C++11 Standard + // Library to see what it does and do something similar. Given that rvalue references are + // objects that are going away, we may not need to move the values back, though that has the + // side effect of a certain kind of lost elements problem. + for(; dest < currentDest; ++dest) + (*dest).~value_type(); + throw; + } + #endif + + return currentDest; + } + } + + template + inline Result uninitialized_move_ptr(First first, Last last, Result dest) + { + typedef typename eastl::iterator_traits >::value_type value_type; + const generic_iterator i(Internal::uninitialized_move_impl(eastl::generic_iterator(first), // generic_iterator makes a pointer act like an iterator. + eastl::generic_iterator(last), + eastl::generic_iterator(dest), + eastl::is_trivially_copy_assignable())); // is_trivially_copy_assignable identifies if copy assignment would be as valid as move assignment, which means we have the opportunity to memcpy/memmove optimization. + return i.base(); + } + + + + + /// uninitialized_move + /// + /// Moves a source range to a destination, move-constructing the destination with + /// the source values (and not *assigning* the destination with the source values). + /// Returns the end of the destination range (i.e. dest + (last - first)). + /// + /// uninitialized_move is not part of any current C++ Standard, up to C++14. + /// + /// Declaration: + /// template + /// ForwardIterator uninitialized_move(InputIterator sourceFirst, InputIterator sourceLast, ForwardIterator destination); + /// + /// Example usage: + /// SomeClass* pArray = malloc(10 * sizeof(SomeClass)); + /// uninitialized_move(pSourceDataBegin, pSourceDataBegin + 10, pArray); + /// + template + inline ForwardIterator uninitialized_move(InputIterator first, InputIterator last, ForwardIterator dest) + { + return eastl::uninitialized_copy(eastl::make_move_iterator(first), eastl::make_move_iterator(last), dest); + } + + + /// uninitialized_move_if_noexcept + /// + /// If the iterated type can be moved without exceptions, move construct the dest with the input. Else copy-construct + /// the dest witih the input. If move isn't supported by the compiler, do regular copy. + /// + template + inline ForwardIterator uninitialized_move_if_noexcept(InputIterator first, InputIterator last, ForwardIterator dest) + { + return eastl::uninitialized_copy(eastl::make_move_if_noexcept_iterator(first), eastl::make_move_if_noexcept_iterator(last), dest); + } + + + /// uninitialized_move_ptr_if_noexcept + /// + template + inline Result uninitialized_move_ptr_if_noexcept(First first, Last last, Result dest) + { + #if EASTL_EXCEPTIONS_ENABLED + return eastl::uninitialized_move_if_noexcept(first, last, dest); + #else + return eastl::uninitialized_move_ptr(first, last, dest); + #endif + } + + + /// uninitialized_move_n + /// + /// Moves count elements from a range beginning at first to an uninitialized memory area + /// beginning at dest. The elements in the uninitialized area are constructed using copy constructor. + /// If an exception is thrown during the initialization, the function has no final effects. + /// + /// first: Beginning of the range of the elements to move. + /// dest: Beginning of the destination range. + /// return value: Iterator of dest type to the element past the last element moved. + /// + template + inline ForwardIterator uninitialized_move_n(InputIterator first, Count n, ForwardIterator dest) + { + return eastl::uninitialized_copy_n(eastl::make_move_iterator(first), n, dest); + } + + // Disable warning C4345 - behavior change: an object of POD type constructed with an initializer of the form () + // will be default-initialized. + // This is the behavior we intend below. + EA_DISABLE_VC_WARNING(4345) + /// uninitialized_default_fill + /// + /// Default-constructs the elements in the destination range. + /// Returns void. It wouldn't be useful to return the end of the destination range, + /// as that is the same as the 'last' input parameter. + /// + /// Declaration: + /// template + /// void uninitialized_default_fill(ForwardIterator destinationFirst, ForwardIterator destinationLast); + /// + template + inline void uninitialized_default_fill(ForwardIterator first, ForwardIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(first); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for (; currentDest != last; ++currentDest) + ::new (eastl::addressof(*currentDest)) value_type(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch (...) + { + for (; first < currentDest; ++first) + (*first).~value_type(); + throw; + } + #endif + } + + /// uninitialized_default_fill_n + /// + /// Default-constructs the range of [first, first + n). + /// Returns void as per the C++ standard, though returning the end input iterator + /// value may be of use. + /// + /// Declaration: + /// template + /// void uninitialized_default_fill_n(ForwardIterator destination, Count n); + /// + namespace Internal + { + template + inline void uninitialized_default_fill_n_impl(ForwardIterator first, Count n, false_type) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(first); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for (; n > 0; --n, ++currentDest) + ::new (eastl::addressof(*currentDest)) value_type(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch (...) + { + for (; first < currentDest; ++first) + (*first).~value_type(); + throw; + } + #endif + } + + template + inline void uninitialized_default_fill_n_impl(ForwardIterator first, Count n, true_type) + { + typedef typename eastl::iterator_traits::value_type value_type; + memset(first, 0, sizeof(value_type) * n); + } + } + + template + inline void uninitialized_default_fill_n(ForwardIterator first, Count n) + { + typedef typename eastl::iterator_traits::value_type value_type; + Internal::uninitialized_default_fill_n_impl(first, n, is_scalar()); + } + EA_RESTORE_VC_WARNING() + + /// uninitialized_default_construct + /// + /// Constructs objects in the uninitialized storage designated by the range [first, last) by default-initialization. + /// + /// Default-initialization: + /// If T is a class, the default constructor is called; otherwise, no initialization is done, resulting in + /// indeterminate values. + /// + /// http://en.cppreference.com/w/cpp/memory/uninitialized_default_construct + /// + template + inline void uninitialized_default_construct(ForwardIterator first, ForwardIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(first); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for (; currentDest != last; ++currentDest) + ::new (eastl::addressof(*currentDest)) value_type; + #if EASTL_EXCEPTIONS_ENABLED + } + catch (...) + { + for (; first < currentDest; ++first) + (*first).~value_type(); + throw; + } + #endif + } + + /// uninitialized_default_construct_n + /// + /// Constructs n objects in the uninitialized storage starting at first by default-initialization. + /// + /// http://en.cppreference.com/w/cpp/memory/uninitialized_default_construct_n + /// + template + inline ForwardIterator uninitialized_default_construct_n(ForwardIterator first, Count n) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(first); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for (; n > 0; --n, ++currentDest) + ::new (eastl::addressof(*currentDest)) value_type; + return currentDest; + #if EASTL_EXCEPTIONS_ENABLED + } + catch (...) + { + for (; first < currentDest; ++first) + (*first).~value_type(); + throw; + } + #endif + } + + /// uninitialized_fill + /// + /// Copy-constructs the elements in the destination range with the given input value. + /// Returns void. It wouldn't be useful to return the end of the destination range, + /// as that is the same as the 'last' input parameter. + /// + /// Declaration: + /// template + /// void uninitialized_fill(ForwardIterator destinationFirst, ForwardIterator destinationLast, const T& value); + /// + namespace Internal + { + template + inline void uninitialized_fill_impl(ForwardIterator first, ForwardIterator last, const T& value, true_type) + { + eastl::fill(first, last, value); + } + + template + void uninitialized_fill_impl(ForwardIterator first, ForwardIterator last, const T& value, false_type) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(first); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; currentDest != last; ++currentDest) + ::new((void*)eastl::addressof(*currentDest)) value_type(value); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; first < currentDest; ++first) + (*first).~value_type(); + throw; + } + #endif + } + } + + template + inline void uninitialized_fill(ForwardIterator first, ForwardIterator last, const T& value) + { + typedef typename eastl::iterator_traits::value_type value_type; + Internal::uninitialized_fill_impl(first, last, value, eastl::is_trivially_copy_assignable()); + } + + /// uninitialized_value_construct + /// + /// Constructs objects in the uninitialized storage range [first, last) by value-initialization. + /// + /// Value-Initialization: + /// If T is a class, the object is default-initialized (after being zero-initialized if T's default + /// constructor is not user-provided/deleted); otherwise, the object is zero-initialized. + /// + /// http://en.cppreference.com/w/cpp/memory/uninitialized_value_construct + /// + template + void uninitialized_value_construct(ForwardIterator first, ForwardIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(first); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for (; currentDest != last; ++currentDest) + ::new (eastl::addressof(*currentDest)) value_type(); + #if EASTL_EXCEPTIONS_ENABLED + } + catch (...) + { + for (; first < currentDest; ++first) + (*first).~value_type(); + throw; + } + #endif + } + + /// uninitialized_value_construct_n + /// + /// Constructs n objects in the uninitialized storage starting at first by value-initialization. + /// + /// Value-Initialization: + /// If T is a class, the object is default-initialized (after being zero-initialized if T's default + /// constructor is not user-provided/deleted); otherwise, the object is zero-initialized. + /// + /// http://en.cppreference.com/w/cpp/memory/uninitialized_value_construct_n + /// + template + ForwardIterator uninitialized_value_construct_n(ForwardIterator first, Count n) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(first); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for (; n > 0; --n, ++currentDest) + ::new (eastl::addressof(*currentDest)) value_type(); + return currentDest; + #if EASTL_EXCEPTIONS_ENABLED + } + catch (...) + { + for (; first < currentDest; ++first) + (*first).~value_type(); + throw; + } + #endif + } + + /// uninitialized_fill_ptr + /// + /// This is a specialization of uninitialized_fill for iterators that are pointers. + /// It exists so that we can declare a value_type for the iterator, which you + /// can't do with a pointer by itself. + /// + template + inline void uninitialized_fill_ptr(T* first, T* last, const T& value) + { + typedef typename eastl::iterator_traits >::value_type value_type; + Internal::uninitialized_fill_impl(eastl::generic_iterator(first), + eastl::generic_iterator(last), value, + eastl::is_trivially_copy_assignable()); + } + + /// uninitialized_fill_n + /// + /// Copy-constructs the range of [first, first + n) with the given input value. + /// Returns void as per the C++ standard, though returning the end input iterator + /// value may be of use. + /// + /// Declaration: + /// template + /// void uninitialized_fill_n(ForwardIterator destination, Count n, const T& value); + /// + namespace Internal + { + template + inline void uninitialized_fill_n_impl(ForwardIterator first, Count n, const T& value, true_type) + { + eastl::fill_n(first, n, value); + } + + template + void uninitialized_fill_n_impl(ForwardIterator first, Count n, const T& value, false_type) + { + typedef typename eastl::iterator_traits::value_type value_type; + ForwardIterator currentDest(first); + + #if EASTL_EXCEPTIONS_ENABLED + try + { + #endif + for(; n > 0; --n, ++currentDest) + ::new((void*)eastl::addressof(*currentDest)) value_type(value); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; first < currentDest; ++first) + (*first).~value_type(); + throw; + } + #endif + } + } + + template + inline void uninitialized_fill_n(ForwardIterator first, Count n, const T& value) + { + typedef typename eastl::iterator_traits::value_type value_type; + Internal::uninitialized_fill_n_impl(first, n, value, eastl::is_trivially_copy_assignable()); + } + + + + /// uninitialized_fill_n_ptr + /// + /// This is a specialization of uninitialized_fill_n for iterators that are pointers. + /// It exists so that we can declare a value_type for the iterator, which you + /// can't do with a pointer by itself. + /// + template + inline void uninitialized_fill_n_ptr(T* first, Count n, const T& value) + { + typedef typename eastl::iterator_traits >::value_type value_type; + Internal::uninitialized_fill_n_impl(eastl::generic_iterator(first), n, value, eastl::is_trivially_copy_assignable()); + } + + + + + /// uninitialized_copy_fill + /// + /// Copies [first1, last1) into [first2, first2 + (last1 - first1)) then + /// fills [first2 + (last1 - first1), last2) with value. + /// + template + inline void uninitialized_copy_fill(InputIterator first1, InputIterator last1, + ForwardIterator first2, ForwardIterator last2, const T& value) + { + const ForwardIterator mid(eastl::uninitialized_copy(first1, last1, first2)); + + #if EASTL_EXCEPTIONS_ENABLED + typedef typename eastl::iterator_traits::value_type value_type; + try + { + #endif + eastl::uninitialized_fill(mid, last2, value); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; first2 < mid; ++first2) + (*first2).~value_type(); + throw; + } + #endif + } + + + /// uninitialized_move_fill + /// + /// Moves [first1, last1) into [first2, first2 + (last1 - first1)) then + /// fills [first2 + (last1 - first1), last2) with value. + /// + template + inline void uninitialized_move_fill(InputIterator first1, InputIterator last1, + ForwardIterator first2, ForwardIterator last2, const T& value) + { + const ForwardIterator mid(eastl::uninitialized_move(first1, last1, first2)); + + #if EASTL_EXCEPTIONS_ENABLED + typedef typename eastl::iterator_traits::value_type value_type; + try + { + #endif + eastl::uninitialized_fill(mid, last2, value); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; first2 < mid; ++first2) + (*first2).~value_type(); + throw; + } + #endif + } + + + + + + /// uninitialized_fill_copy + /// + /// Fills [result, mid) with value then copies [first, last) into [mid, mid + (last - first)). + /// + template + inline ForwardIterator + uninitialized_fill_copy(ForwardIterator result, ForwardIterator mid, const T& value, InputIterator first, InputIterator last) + { + eastl::uninitialized_fill(result, mid, value); + + #if EASTL_EXCEPTIONS_ENABLED + typedef typename eastl::iterator_traits::value_type value_type; + try + { + #endif + return eastl::uninitialized_copy(first, last, mid); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; result < mid; ++result) + (*result).~value_type(); + throw; + } + #endif + } + + + /// uninitialized_fill_move + /// + /// Fills [result, mid) with value then copies [first, last) into [mid, mid + (last - first)). + /// + template + inline ForwardIterator + uninitialized_fill_move(ForwardIterator result, ForwardIterator mid, const T& value, InputIterator first, InputIterator last) + { + eastl::uninitialized_fill(result, mid, value); + + #if EASTL_EXCEPTIONS_ENABLED + typedef typename eastl::iterator_traits::value_type value_type; + try + { + #endif + return eastl::uninitialized_move(first, last, mid); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; result < mid; ++result) + (*result).~value_type(); + throw; + } + #endif + } + + + + /// uninitialized_copy_copy + /// + /// Copies [first1, last1) into [result, result + (last1 - first1)) then + /// copies [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)). + /// + template + inline ForwardIterator + uninitialized_copy_copy(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, InputIterator2 last2, + ForwardIterator result) + { + const ForwardIterator mid(eastl::uninitialized_copy(first1, last1, result)); + + #if EASTL_EXCEPTIONS_ENABLED + typedef typename eastl::iterator_traits::value_type value_type; + try + { + #endif + return eastl::uninitialized_copy(first2, last2, mid); + #if EASTL_EXCEPTIONS_ENABLED + } + catch(...) + { + for(; result < mid; ++result) + (*result).~value_type(); + throw; + } + #endif + } + + + + /// destruct + /// + /// Calls the destructor of a given object. + /// + /// Note that we don't have a specialized version of this for objects + /// with trivial destructors, such as integers. This is because the + /// compiler can already see in our version here that the destructor + /// is a no-op. + /// + template + inline void destruct(T* p) + { + // https://msdn.microsoft.com/query/dev14.query?appId=Dev14IDEF1&l=EN-US&k=k(C4100)&rd=true + // "C4100 can also be issued when code calls a destructor on a otherwise unreferenced parameter + // of primitive type. This is a limitation of the Visual C++ compiler." + EA_UNUSED(p); + p->~T(); + } + + + + // destruct(first, last) + // + template + inline void destruct_impl(ForwardIterator /*first*/, ForwardIterator /*last*/, true_type) // true means the type has a trivial destructor. + { + // Empty. The type has a trivial destructor. + } + + template + inline void destruct_impl(ForwardIterator first, ForwardIterator last, false_type) // false means the type has a significant destructor. + { + typedef typename eastl::iterator_traits::value_type value_type; + + for(; first != last; ++first) + (*first).~value_type(); + } + + /// destruct + /// + /// Calls the destructor on a range of objects. + /// + /// We have a specialization for objects with trivial destructors, such as + /// PODs. In this specialization the destruction of the range is a no-op. + /// + template + inline void destruct(ForwardIterator first, ForwardIterator last) + { + typedef typename eastl::iterator_traits::value_type value_type; + destruct_impl(first, last, eastl::has_trivial_destructor()); + } + + + /// destroy_at + /// + /// Calls the destructor of a given object. + /// + /// Note that we don't have a specialized version of this for objects + /// with trivial destructors, such as integers. This is because the + /// compiler can already see in our version here that the destructor + /// is a no-op. + /// + /// This is the same as eastl::destruct but we included for C++17 compliance. + /// + /// http://en.cppreference.com/w/cpp/memory/destroy_at + /// + template + inline void destroy_at(T* p) + { + EA_UNUSED(p); + p->~T(); + } + + + /// destroy + /// + /// Calls the destructor on a range of objects. + /// + /// http://en.cppreference.com/w/cpp/memory/destroy + /// + template + inline void destroy(ForwardIterator first, ForwardIterator last) + { + for (; first != last; ++first) + destroy_at(addressof(*first)); + } + + + /// destroy_n + /// + /// Calls the destructor on the n objects in the range. + /// + /// http://en.cppreference.com/w/cpp/memory/destroy_n + /// + template + ForwardIterator destroy_n(ForwardIterator first, Size n) + { + for (; n > 0; ++first, --n) + destroy_at(addressof(*first)); + + return first; + } + + + /// align + /// + /// Same as C++11 std::align. http://en.cppreference.com/w/cpp/memory/align + /// If it is possible to fit size bytes of storage aligned by alignment into the buffer pointed to by + /// ptr with length space, the function updates ptr to point to the first possible address of such storage, + /// decreases space by the number of bytes used for alignment, and returns the new ptr value. Otherwise, + /// the function returns NULL and leaves ptr and space unmodified. + /// + /// Example usage: + /// char buffer[512]; + /// size_t space = sizeof(buffer); + /// void* p = buffer; + /// void* p1 = eastl::align(16, 3, p, space); p = (char*)p + 3; space -= 3; + /// void* p2 = eastl::align(32, 78, p, space); p = (char*)p + 78; space -= 78; + /// void* p3 = eastl::align(64, 9, p, space); p = (char*)p + 9; space -= 9; + + inline void* align(size_t alignment, size_t size, void*& ptr, size_t& space) + { + if(space >= size) + { + char* ptrAligned = (char*)(((size_t)ptr + (alignment - 1)) & -alignment); + size_t offset = (size_t)(ptrAligned - (char*)ptr); + + if((space - size) >= offset) // Have to implement this in terms of subtraction instead of addition in order to handle possible overflow. + { + ptr = ptrAligned; + space -= offset; + + return ptrAligned; + } + } + + return NULL; + } + + + /// align_advance + /// + /// Same as align except ptr and space can be adjusted to reflect remaining space. + /// Not present in the C++ Standard. + /// Note that the example code here is similar to align but simpler. + /// + /// Example usage: + /// char buffer[512]; + /// size_t space = sizeof(buffer); + /// void* p = buffer; + /// void* p1 = eastl::align_advance(16, 3, p, space, &p, &space); // p is advanced and space reduced accordingly. + /// void* p2 = eastl::align_advance(32, 78, p, space, &p, &space); + /// void* p3 = eastl::align_advance(64, 9, p, space, &p, &space); + /// void* p4 = eastl::align_advance(16, 33, p, space); + + inline void* align_advance(size_t alignment, size_t size, void* ptr, size_t space, void** ptrAdvanced = NULL, size_t* spaceReduced = NULL) + { + if(space >= size) + { + char* ptrAligned = (char*)(((size_t)ptr + (alignment - 1)) & -alignment); + size_t offset = (size_t)(ptrAligned - (char*)ptr); + + if((space - size) >= offset) // Have to implement this in terms of subtraction instead of addition in order to handle possible overflow. + { + if(ptrAdvanced) + *ptrAdvanced = (ptrAligned + size); + if(spaceReduced) + *spaceReduced = (space - (offset + size)); + + return ptrAligned; + } + } + + return NULL; + } + + + /////////////////////////////////////////////////////////////////////// + // uses_allocator + // + // Determines if the class T has an allocator_type member typedef + // which Allocator is convertible to. + // + // http://en.cppreference.com/w/cpp/memory/uses_allocator + // + // A program may specialize this template to derive from true_type for a + // user-defined type T that does not have a nested allocator_type but + // nonetheless can be constructed with an allocator where either: + // - the first argument of a constructor has type allocator_arg_t and + // the second argument has type Allocator. + // or + // - the last argument of a constructor has type Allocator. + // + // Example behavilor: + // uses_allocator::value => true + // uses_allocator::value => false + // + // This is useful for writing generic code for containers when you can't + // know ahead of time that the container has an allocator_type. + /////////////////////////////////////////////////////////////////////// + + template + struct has_allocator_type_helper + { + private: + template + static eastl::no_type test(...); + + template + static eastl::yes_type test(typename U::allocator_type* = NULL); + + public: + static const bool value = sizeof(test(NULL)) == sizeof(eastl::yes_type); + }; + + + template ::value> + struct uses_allocator_impl + : public integral_constant::value> + { + }; + + template + struct uses_allocator_impl + : public eastl::false_type + { + }; + + template + struct uses_allocator + : public uses_allocator_impl{ }; + + + + + + /////////////////////////////////////////////////////////////////////// + // pointer_traits + // + // C++11 Standard section 20.6.3 + // Provides information about a pointer type, mostly for the purpose + // of handling the case where the pointer type isn't a built-in T* but + // rather is a class that acts like a pointer. + // + // A user-defined Pointer has the following properties, by example: + // template + // struct Pointer + // { + // typedef Pointer pointer; // required for use by pointer_traits. + // typedef T1 element_type; // optional for use by pointer_traits. + // typedef T2 difference_type; // optional for use by pointer_traits. + // + // template + // using rebind = typename Ptr; // optional for use by pointer_traits. + // + // static pointer pointer_to(element_type& obj); // required for use by pointer_traits. + // }; + // + // + // Example usage: + // template + // typename pointer_traits::element_type& GetElementPointedTo(Pointer p) + // { return *p; } + // + /////////////////////////////////////////////////////////////////////// + + namespace Internal + { + // pointer_element_type + template + struct has_element_type // has_element_type::value is true if T has an element_type member typedef. + { + private: + template static eastl::no_type test(...); + template static eastl::yes_type test(typename U::element_type* = 0); + public: + static const bool value = sizeof(test(0)) == sizeof(eastl::yes_type); + }; + + template ::value> + struct pointer_element_type; + + template + struct pointer_element_type + { typedef typename Pointer::element_type type; }; + + #if EASTL_VARIADIC_TEMPLATES_ENABLED // See 20.6.3.1 p3 for why we need to support this. Pointer may be a template with various arguments as opposed to a non-templated class. + template