diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml new file mode 100644 index 0000000000..43ca35a2cc --- /dev/null +++ b/.github/workflows/build-and-test.yml @@ -0,0 +1,364 @@ +name: Github Autobuild +on: [ push, pull_request ] +env: + CCACHE_COMPRESS: exists means true + CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime,time_macros + # The following are for windows cross-build only: + BOOST_VERSION: 1_69_0 + BOOST_DOTTED_VERSION: 1.69.0 + CURL_VERSION: 7.67.0 + OPENSSL_VERSION: 1.1.1d + ZLIB_VERSION: 1.2.11 +jobs: + test-release: + name: Build and run tests in Release mode + runs-on: ubuntu-latest + services: + elasticsearch: + image: docker://elasticsearch:7.4.2 + options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 + steps: + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y \ + ccache \ + parallel \ + libboost-thread-dev \ + libboost-iostreams-dev \ + libboost-date-time-dev \ + libboost-system-dev \ + libboost-filesystem-dev \ + libboost-program-options-dev \ + libboost-chrono-dev \ + libboost-test-dev \ + libboost-context-dev \ + libboost-regex-dev \ + libboost-coroutine-dev \ + libcurl4-openssl-dev + sudo apt-get auto-remove -y + sudo apt-get clean -y + df -h + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Configure + run: | + mkdir -p _build + pushd _build + export -n BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR + cmake -D CMAKE_BUILD_TYPE=Release \ + -D CMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON \ + -D CMAKE_C_COMPILER=gcc \ + -D CMAKE_C_COMPILER_LAUNCHER=ccache \ + -D CMAKE_CXX_COMPILER=g++ \ + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ + .. + popd + - name: Load Cache + uses: actions/cache@v1 + with: + path: ccache + key: ccache-release-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ccache-release-${{ github.ref }}- + ccache-release- + - name: Build + run: | + export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" + mkdir -p "$CCACHE_DIR" + make -j 2 -C _build + df -h + - name: Unit-Tests + run: | + _build/tests/app_test -l message + _build/tests/es_test -l message + libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l message + libraries/fc/tests/run-parallel-tests.sh _build/tests/cli_test -l message + df -h + - name: Node-Test + run: | + df -h + pushd _build + ../programs/build_helpers/run-node-test + df -h + test-debug: + name: Build and run tests in Debug mode + runs-on: ubuntu-latest + services: + elasticsearch: + image: docker://elasticsearch:7.4.2 + options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 + steps: + - name: Install dependencies + run: | + df -h + sudo apt-get update + sudo apt-get install -y \ + ccache \ + parallel \ + libboost-thread-dev \ + libboost-iostreams-dev \ + libboost-date-time-dev \ + libboost-system-dev \ + libboost-filesystem-dev \ + libboost-program-options-dev \ + libboost-chrono-dev \ + libboost-test-dev \ + libboost-context-dev \ + libboost-regex-dev \ + libboost-coroutine-dev \ + libcurl4-openssl-dev + sudo apt-get auto-remove -y + sudo apt-get clean -y + df -h + sudo du -hs /mnt/* + sudo ls -alr /mnt/ + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Configure + run: | + pwd + df -h . + mkdir -p _build + sudo mkdir -p /_build/libraries /_build/programs /mnt/_build/tests + sudo chmod a+rwx /_build/libraries /_build/programs /mnt/_build/tests + ln -s /_build/libraries _build/libraries + ln -s /_build/programs _build/programs + ln -s /mnt/_build/tests _build/tests + sudo ln -s /_build/libraries /mnt/_build/libraries + sudo ln -s /_build/programs /mnt/_build/programs + sudo ln -s /mnt/_build/tests /_build/tests + ls -al _build + pushd _build + export -n BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR + cmake -D CMAKE_BUILD_TYPE=Debug \ + -D CMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON \ + -D CMAKE_C_COMPILER=gcc \ + -D CMAKE_C_COMPILER_LAUNCHER=ccache \ + -D CMAKE_CXX_COMPILER=g++ \ + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ + .. + popd + - name: Load Cache + uses: actions/cache@v1 + with: + path: ccache + key: ccache-debug-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ccache-debug-${{ github.ref }}- + ccache-debug- + - name: Build + run: | + export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" + mkdir -p "$CCACHE_DIR" + df -h + make -j 2 -C _build + df -h + du -hs _build/libraries/* _build/programs/* _build/tests/* + du -hs _build/* + du -hs /_build/* + - name: Unit-Tests + run: | + _build/tests/app_test -l message + df -h + _build/tests/es_test -l message + df -h + libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l message + libraries/fc/tests/run-parallel-tests.sh _build/tests/cli_test -l message + df -h + - name: Node-Test + run: | + df -h + pushd _build + ../programs/build_helpers/run-node-test + df -h + prepare-mingw64-libs: + name: Build 3rd-party libraries required for windows cross-build + runs-on: ubuntu-latest + steps: + - name: Load Cache + id: cache-libs + uses: actions/cache@v1 + with: + path: libs + key: mingw64-libs-${{ env.BOOST_VERSION }}_${{ env.CURL_VERSION }}_${{ env.OPENSSL_VERSION }}_${{ env.ZLIB_VERSION }} + - name: Install dependencies + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + sudo apt-get update + sudo apt-get install -y \ + g++-mingw-w64-x86-64 \ + mingw-w64-tools + - name: Download library sources + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + curl -LO https://dl.bintray.com/boostorg/release/${{ env.BOOST_DOTTED_VERSION }}/source/boost_${{ env.BOOST_VERSION }}.tar.bz2 + curl -LO https://curl.haxx.se/download/curl-${{ env.CURL_VERSION }}.tar.bz2 + curl -LO https://www.openssl.org/source/openssl-${{ env.OPENSSL_VERSION }}.tar.gz + curl -LO https://zlib.net/zlib-${{ env.ZLIB_VERSION }}.tar.gz + - name: Build zlib + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + LIBS="`pwd`/libs" + ZLIB="`echo zlib-*`" + tar xfz "$ZLIB" + pushd "${ZLIB%.tar.gz}" + CROSS_PREFIX=x86_64-w64-mingw32- ./configure --prefix="$LIBS" --static --64 + make install + - name: Build openssl + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + LIBS="`pwd`/libs" + OPENSSL="`echo openssl-*`" + tar xfz "$OPENSSL" + pushd "${OPENSSL%.tar.gz}" + ./Configure --prefix="$LIBS" --cross-compile-prefix=x86_64-w64-mingw32- \ + no-shared zlib threads \ + mingw64 + make CPPFLAGS="-I$LIBS/include" LDFLAGS="-L$LIBS/lib" build_libs + make -j 2 install_dev + - name: Build curl + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + LIBS="`pwd`/libs" + CURL="`echo curl-*`" + tar xfj "$CURL" + pushd "${CURL%.tar.bz2}" + sed -i 's=-lgdi32=-lcrypt32 \0=' configure + PKG_CONFIG_PATH="$LIBS/lib/pkgconfig" ./configure --host=x86_64-w64-mingw32 \ + --prefix="$LIBS" \ + --disable-shared \ + --disable-tftpf \ + --disable-ldap \ + --with-zlib \ + --without-ssl --with-winssl \ + --disable-tftp \ + --disable-ldap + make -j 2 install + - name: Build boost + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + LIBS="`pwd`/libs" + BOOST="`echo boost_*`" + tar xfj "$BOOST" + pushd "${BOOST%.tar.bz2}" + # See https://github.com/boostorg/context/issues/101 + sed -i '/os.\(name\|platform\)/d;/local tmp = /s=elf=pe=;/local tmp = /s=sysv=ms=' libs/context/build/Jamfile.v2 + ./bootstrap.sh --prefix=$LIBS + echo "using gcc : mingw32 : x86_64-w64-mingw32-g++ ;" > user-config.jam + ./b2 --user-config=user-config.jam \ + --without-python \ + toolset=gcc-mingw32 \ + target-os=windows \ + variant=release \ + link=static \ + threading=multi \ + runtime-link=static \ + address-model=64 \ + abi=ms \ + install + build-mingw64: + name: Cross-build for windows using mingw + runs-on: ubuntu-latest + needs: prepare-mingw64-libs + steps: + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y \ + ccache \ + g++-mingw-w64-x86-64 \ + mingw-w64-tools + sudo apt-get auto-remove -y + sudo apt-get clean -y + df -h + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Load external libraries + uses: actions/cache@v1 + with: + path: libs + key: mingw64-libs-${{ env.BOOST_VERSION }}_${{ env.CURL_VERSION }}_${{ env.OPENSSL_VERSION }}_${{ env.ZLIB_VERSION }} + - name: Configure + run: | + LIBS="`pwd`/libs" + mkdir -p _build + pushd _build + cmake -D CMAKE_BUILD_TYPE=Release \ + -D CMAKE_C_COMPILER=/usr/bin/x86_64-w64-mingw32-gcc-posix \ + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -D CMAKE_CXX_COMPILER=/usr/bin/x86_64-w64-mingw32-g++-posix \ + -D CMAKE_CXX_FLAGS=-Wa,-mbig-obj \ + -D CMAKE_SYSTEM_NAME=Windows \ + -D CURL_STATICLIB=ON \ + -D CMAKE_EXE_LINKER_FLAGS=--static \ + -D CMAKE_FIND_ROOT_PATH="/usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/;$LIBS" \ + -D CMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \ + -D CMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \ + -D CMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY \ + -D GRAPHENE_DISABLE_UNITY_BUILD=ON \ + .. + - name: Load Cache + uses: actions/cache@v1 + with: + path: ccache + key: ccache-mingw64-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ccache-mingw64-${{ github.ref }}- + ccache-mingw64- + - name: Build + run: | + export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" + mkdir -p "$CCACHE_DIR" + make -j 2 -C _build witness_node cli_wallet + build-osx: + name: Build and run tests on OSX + runs-on: macos-latest + steps: + - name: Install dependencies + run: | + brew install autoconf automake libtool + brew install ccache + brew search boost + brew install bitshares/boost160/boost@1.60 + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Configure + run: | + mkdir -p _build + pushd _build + cmake -D CMAKE_BUILD_TYPE=Release \ + -D CMAKE_C_COMPILER_LAUNCHER=ccache \ + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -D BOOST_ROOT=/usr/local/opt/boost@1.60 \ + -D OPENSSL_ROOT_DIR=/usr/local/opt/openssl \ + .. + - name: Load Cache + uses: actions/cache@v1 + with: + path: ccache + key: ccache-osx-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ccache-osx-${{ github.ref }}- + ccache-osx- + - name: Build + run: | + export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" + mkdir -p "$CCACHE_DIR" + make -j 2 -C _build witness_node cli_wallet app_test cli_test chain_test + df -h + - name: Unit-Tests + run: | + _build/tests/app_test -l message + libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l message + libraries/fc/tests/run-parallel-tests.sh _build/tests/cli_test -l message + df -h + - name: Node-Test + run: | + df -h + pushd _build + ../programs/build_helpers/run-node-test diff --git a/Doxyfile b/Doxyfile index 74f566efbd..5f6f9afc68 100644 --- a/Doxyfile +++ b/Doxyfile @@ -758,7 +758,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = README.md doc/main.dox libraries/chain libraries/db libraries/app libraries/wallet libraries/protocol libraries/net libraries/plugins libraries/fc libraries/utilities libraries/egenesis +INPUT = README.md doc/main.dox libraries # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -793,7 +793,7 @@ RECURSIVE = YES # Note that relative paths are relative to the directory from which doxygen is # run. -EXCLUDE = libraries/fc/vendor/editline libraries/fc/vendor/secp256k1-zkp libraries/fc/vendor/websocketpp +EXCLUDE = libraries/fc/vendor libraries/fc/tests # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded diff --git a/README-docker.md b/README-docker.md index 6a9f713a73..ec44a3285d 100644 --- a/README-docker.md +++ b/README-docker.md @@ -17,8 +17,10 @@ The `Dockerfile` performs the following steps: 8. Add a local bitshares user and set `$HOME` to `/var/lib/bitshares` 9. Make `/var/lib/bitshares` and `/etc/bitshares` a docker *volume* 10. Expose ports `8091` and `9091` -11. Add default config from `docker/default_config.ini` and entry point script -12. Run entry point script by default +11. Add default config from `docker/default_config.ini` and + `docker/default_logging.ini` +12. Add an entry point script +13. Run the entry point script by default The entry point simplifies the use of parameters for the `witness_node` (which is run by default when spinning up the container). @@ -47,7 +49,7 @@ The default configuration is: rpc-endpoint = 0.0.0.0:8091 bucket-size = [60,300,900,1800,3600,14400,86400] history-per-size = 1000 - max-ops-per-account = 1000 + max-ops-per-account = 100 partial-operations = true # Docker Compose diff --git a/README.md b/README.md index 7b17ce91b3..73c6b1f8f3 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ BitShares Test Network `master` | `develop` | `hardfork` | `testnet` | `bitshares-fc` --- | --- | --- | --- | --- [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=master)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=develop)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=hardfork)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=testnet)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-fc.svg?branch=master)](https://travis-ci.org/bitshares/bitshares-fc) + [![](https://github.com/bitshares/bitshares-core/workflows/Github%20Autobuild/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=branch%3Amaster) | [![](https://github.com/bitshares/bitshares-core/workflows/Github%20Autobuild/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=branch%3Adevelop) | [![](https://github.com/bitshares/bitshares-core/workflows/Github%20Autobuild/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=branch%3Ahardfork) | [![](https://github.com/bitshares/bitshares-core/workflows/Github%20Autobuild/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=branch%3Atestnet) | [![](https://github.com/bitshares/bitshares-fc/workflows/Github%20Autobuild/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=branch%3Amaster) * [Getting Started](#getting-started) * [Support](#support) diff --git a/docker/default_config.ini b/docker/default_config.ini index 7417816afe..e137d69670 100644 --- a/docker/default_config.ini +++ b/docker/default_config.ini @@ -70,6 +70,9 @@ rpc-endpoint = 0.0.0.0:8091 # For database_api_impl::get_full_accounts to set max items to return in the lists # api-limit-get-full-accounts-lists = 500 +# For database_api_impl::get_top_voters to set max limit value +# api-limit-get-top-voters = 200 + # For database_api_impl::get_call_orders and get_call_orders_by_account to set max limit value # api-limit-get-call-orders = 300 diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 1f22020adb..90576296c6 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -308,8 +308,11 @@ namespace graphene { namespace app { return *_custom_operations_api; } - vector history_api::get_fill_order_history( std::string asset_a, std::string asset_b, uint32_t limit )const + vector history_api::get_fill_order_history( std::string asset_a, std::string asset_b, + uint32_t limit )const { + auto market_hist_plugin = _app.get_plugin( "market_history" ); + FC_ASSERT( market_hist_plugin, "Market history plugin is not enabled" ); FC_ASSERT(_app.chain_database()); const auto& db = *_app.chain_database(); asset_id_type a = database_api.get_asset_id_from_string( asset_a ); @@ -476,9 +479,9 @@ namespace graphene { namespace app { flat_set history_api::get_market_history_buckets()const { - auto hist = _app.get_plugin( "market_history" ); - FC_ASSERT( hist ); - return hist->tracked_buckets(); + auto market_hist_plugin = _app.get_plugin( "market_history" ); + FC_ASSERT( market_hist_plugin, "Market history plugin is not enabled" ); + return market_hist_plugin->tracked_buckets(); } history_operation_detail history_api::get_account_history_by_operations( const std::string account_id_or_name, @@ -514,7 +517,11 @@ namespace graphene { namespace app { uint32_t bucket_seconds, fc::time_point_sec start, fc::time_point_sec end )const { try { + + auto market_hist_plugin = _app.get_plugin( "market_history" ); + FC_ASSERT( market_hist_plugin, "Market history plugin is not enabled" ); FC_ASSERT(_app.chain_database()); + const auto& db = *_app.chain_database(); asset_id_type a = database_api.get_asset_id_from_string( asset_a ); asset_id_type b = database_api.get_asset_id_from_string( asset_b ); diff --git a/libraries/app/api_objects.cpp b/libraries/app/api_objects.cpp index a999ad99ca..a9be85244c 100644 --- a/libraries/app/api_objects.cpp +++ b/libraries/app/api_objects.cpp @@ -34,6 +34,7 @@ market_ticker::market_ticker(const market_ticker_object& mto, const order_book& orders) { time = now; + mto_id = mto.id; base = asset_base.symbol; quote = asset_quote.symbol; percent_change = "0"; diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index d67f899095..defa850abc 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -191,7 +191,11 @@ void application_impl::reset_websocket_server() if( !_options->count("rpc-endpoint") ) return; - _websocket_server = std::make_shared(); + string proxy_forward_header; + if( _options->count("proxy-forwarded-for-header") ) + proxy_forward_header = _options->at("proxy-forwarded-for-header").as(); + + _websocket_server = std::make_shared( proxy_forward_header ); _websocket_server->on_connection( std::bind(&application_impl::new_connection, this, std::placeholders::_1) ); ilog("Configured websocket rpc to listen on ${ip}", ("ip",_options->at("rpc-endpoint").as())); @@ -209,8 +213,13 @@ void application_impl::reset_websocket_tls_server() return; } + string proxy_forward_header; + if( _options->count("proxy-forwarded-for-header") ) + proxy_forward_header = _options->at("proxy-forwarded-for-header").as(); + string password = _options->count("server-pem-password") ? _options->at("server-pem-password").as() : ""; - _websocket_tls_server = std::make_shared( _options->at("server-pem").as(), password ); + _websocket_tls_server = std::make_shared( + _options->at("server-pem").as(), password, proxy_forward_header ); _websocket_tls_server->on_connection( std::bind(&application_impl::new_connection, this, std::placeholders::_1) ); ilog("Configured websocket TLS rpc to listen on ${ip}", ("ip",_options->at("rpc-tls-endpoint").as())); @@ -259,6 +268,9 @@ void application_impl::set_api_limit() { if(_options->count("api-limit-get-full-accounts-lists")) { _app_options.api_limit_get_full_accounts_lists = _options->at("api-limit-get-full-accounts-lists").as(); } + if(_options->count("api-limit-get-top-voters")) { + _app_options.api_limit_get_top_voters = _options->at("api-limit-get-top-voters").as(); + } if(_options->count("api-limit-get-call-orders")) { _app_options.api_limit_get_call_orders = _options->at("api-limit-get-call-orders").as(); } @@ -313,6 +325,9 @@ void application_impl::set_api_limit() { if(_options->count("api-limit-get-withdraw-permissions-by-recipient")) { _app_options.api_limit_get_withdraw_permissions_by_recipient = _options->at("api-limit-get-withdraw-permissions-by-recipient").as(); } + if(_options->count("api-limit-get-liquidity-pools")) { + _app_options.api_limit_get_liquidity_pools = _options->at("api-limit-get-liquidity-pools").as(); + } } void application_impl::startup() @@ -975,6 +990,9 @@ void application::set_program_options(boost::program_options::options_descriptio "Endpoint for TLS websocket RPC to listen on") ("server-pem,p", bpo::value()->implicit_value("server.pem"), "The TLS certificate file for this server") ("server-pem-password,P", bpo::value()->implicit_value(""), "Password for this certificate") + ("proxy-forwarded-for-header", bpo::value()->implicit_value("X-Forwarded-For-Client"), + "A HTTP header similar to X-Forwarded-For (XFF), used by the RPC server to extract clients' address info, " + "usually added by a trusted reverse proxy") ("genesis-json", bpo::value(), "File to read Genesis State from") ("dbg-init-key", bpo::value(), "Block signing key to use for init witnesses, overrides genesis file") ("api-access", bpo::value(), "JSON file specifying API permissions") @@ -1004,6 +1022,8 @@ void application::set_program_options(boost::program_options::options_descriptio "For database_api_impl::get_full_accounts to set max accounts to query at once") ("api-limit-get-full-accounts-lists",boost::program_options::value()->default_value(500), "For database_api_impl::get_full_accounts to set max items to return in the lists") + ("api-limit-get-top-voters",boost::program_options::value()->default_value(200), + "For database_api_impl::get_top_voters to set max limit value") ("api-limit-get-call-orders",boost::program_options::value()->default_value(300), "For database_api_impl::get_call_orders and get_call_orders_by_account to set max limit value") ("api-limit-get-settle-orders",boost::program_options::value()->default_value(300), @@ -1038,6 +1058,8 @@ void application::set_program_options(boost::program_options::options_descriptio "For database_api_impl::get_withdraw_permissions_by_giver to set max limit value") ("api-limit-get-withdraw-permissions-by-recipient",boost::program_options::value()->default_value(101), "For database_api_impl::get_withdraw_permissions_by_recipient to set max limit value") + ("api-limit-get-liquidity-pools",boost::program_options::value()->default_value(101), + "For database_api_impl::get_liquidity_pools_* to set max limit value") ; command_line_options.add(configuration_file_options); command_line_options.add_options() diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index ff03ce2e2a..3590bcb0c2 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -461,6 +461,11 @@ std::map database_api::get_full_accounts( const vectorget_full_accounts( names_or_ids, subscribe ); } +vector database_api::get_top_voters(uint32_t limit)const +{ + return my->get_top_voters( limit ); +} + std::map database_api_impl::get_full_accounts( const vector& names_or_ids, optional subscribe ) { @@ -640,6 +645,27 @@ std::map database_api_impl::get_full_accounts( const return results; } +vector database_api_impl::get_top_voters(uint32_t limit)const +{ + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_top_voters; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + vector result; + + auto last_vote_tally_time = _db.get_dynamic_global_properties().last_vote_tally_time; + const auto& idx = _db.get_index_type().indices().get(); + + for(auto itr = idx.begin(); result.size() < limit && itr != idx.end() && itr->vote_tally_time >= last_vote_tally_time; ++itr) + { + result.emplace_back(*itr); + } + + return result; +} + optional database_api::get_account_by_name( string name )const { return my->get_account_by_name( name ); @@ -1710,6 +1736,141 @@ vector database_api_impl::get_trade_history_by_sequence( return result; } +////////////////////////////////////////////////////////////////////// +// // +// Liquidity pools // +// // +////////////////////////////////////////////////////////////////////// + +vector database_api::get_liquidity_pools_by_asset_a( + std::string asset_symbol_or_id, + optional limit, + optional start_id )const +{ + return my->get_liquidity_pools_by_asset_a( + asset_symbol_or_id, + limit, + start_id ); +} + +vector database_api_impl::get_liquidity_pools_by_asset_a( + std::string asset_symbol_or_id, + optional limit, + optional start_id )const +{ + return get_liquidity_pools_by_asset_x( + asset_symbol_or_id, + limit, + start_id ); +} + +vector database_api::get_liquidity_pools_by_asset_b( + std::string asset_symbol_or_id, + optional limit, + optional start_id )const +{ + return my->get_liquidity_pools_by_asset_b( + asset_symbol_or_id, + limit, + start_id ); +} + +vector database_api_impl::get_liquidity_pools_by_asset_b( + std::string asset_symbol_or_id, + optional limit, + optional start_id )const +{ + return get_liquidity_pools_by_asset_x( + asset_symbol_or_id, + limit, + start_id ); +} + +vector database_api::get_liquidity_pools_by_both_assets( + std::string asset_symbol_or_id_a, + std::string asset_symbol_or_id_b, + optional limit, + optional start_id )const +{ + return my->get_liquidity_pools_by_both_assets( + asset_symbol_or_id_a, + asset_symbol_or_id_b, + limit, + start_id ); +} + +vector database_api_impl::get_liquidity_pools_by_both_assets( + std::string asset_symbol_or_id_a, + std::string asset_symbol_or_id_b, + optional olimit, + optional ostart_id )const +{ + uint32_t limit = olimit.valid() ? *olimit : 101; + + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + vector results; + + asset_id_type asset_id_a = get_asset_from_string(asset_symbol_or_id_a)->id; + asset_id_type asset_id_b = get_asset_from_string(asset_symbol_or_id_b)->id; + if( asset_id_a > asset_id_b ) + std::swap( asset_id_a, asset_id_b ); + + liquidity_pool_id_type start_id = ostart_id.valid() ? *ostart_id : liquidity_pool_id_type(); + + const auto& idx = _db.get_index_type().indices().get(); + auto lower_itr = idx.lower_bound( std::make_tuple( asset_id_a, asset_id_b, start_id ) ); + auto upper_itr = idx.upper_bound( std::make_tuple( asset_id_a, asset_id_b ) ); + + results.reserve( limit ); + uint32_t count = 0; + for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) + { + results.emplace_back( *lower_itr ); + } + + return results; +} + +vector> database_api::get_liquidity_pools_by_share_asset( + const vector& asset_symbols_or_ids, + optional subscribe )const +{ + return my->get_liquidity_pools_by_share_asset( + asset_symbols_or_ids, + subscribe ); +} + +vector> database_api_impl::get_liquidity_pools_by_share_asset( + const vector& asset_symbols_or_ids, + optional subscribe )const +{ + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + FC_ASSERT( asset_symbols_or_ids.size() <= configured_limit, + "size of the querying list can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + bool to_subscribe = get_whether_to_subscribe( subscribe ); + vector> result; result.reserve(asset_symbols_or_ids.size()); + std::transform(asset_symbols_or_ids.begin(), asset_symbols_or_ids.end(), std::back_inserter(result), + [this,to_subscribe](std::string id_or_name) -> optional { + + const asset_object* asset_obj = get_asset_from_string( id_or_name, false ); + if( asset_obj == nullptr || !asset_obj->is_liquidity_pool_share_asset() ) + return {}; + const liquidity_pool_object& lp_obj = (*asset_obj->for_liquidity_pool)(_db); + if( to_subscribe ) + subscribe_to_item( lp_obj.id ); + return lp_obj; + }); + return result; +} + ////////////////////////////////////////////////////////////////////// // // // Witnesses // diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index 4f544da427..c6053eefa8 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -72,6 +72,7 @@ class database_api_impl : public std::enable_shared_from_this optional subscribe )const; std::map get_full_accounts( const vector& names_or_ids, optional subscribe ); + vector get_top_voters(uint32_t limit)const; optional get_account_by_name( string name )const; vector get_account_references( const std::string account_id_or_name )const; vector> lookup_account_names(const vector& account_names)const; @@ -137,6 +138,24 @@ class database_api_impl : public std::enable_shared_from_this int64_t start, fc::time_point_sec stop, unsigned limit = 100 )const; + // Liquidity pools + vector get_liquidity_pools_by_asset_a( + std::string asset_symbol_or_id, + optional limit = 101, + optional start_id = optional() )const; + vector get_liquidity_pools_by_asset_b( + std::string asset_symbol_or_id, + optional limit = 101, + optional start_id = optional() )const; + vector get_liquidity_pools_by_both_assets( + std::string asset_symbol_or_id_a, + std::string asset_symbol_or_id_b, + optional limit = 101, + optional start_id = optional() )const; + vector> get_liquidity_pools_by_share_asset( + const vector& asset_symbols_or_ids, + optional subscribe = optional() )const; + // Witnesses vector> get_witnesses(const vector& witness_ids)const; fc::optional get_witness_by_account(const std::string account_id_or_name)const; @@ -238,6 +257,45 @@ class database_api_impl : public std::enable_shared_from_this vector get_limit_orders( const asset_id_type a, const asset_id_type b, const uint32_t limit )const; + //////////////////////////////////////////////// + // Liquidity pools + //////////////////////////////////////////////// + + // template function to reduce duplicate code + template + vector get_liquidity_pools_by_asset_x( + std::string asset_symbol_or_id, + optional olimit, + optional ostart_id )const + { + uint32_t limit = olimit.valid() ? *olimit : 101; + + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + vector results; + + const asset_id_type asset_id = get_asset_from_string(asset_symbol_or_id)->id; + + liquidity_pool_id_type start_id = ostart_id.valid() ? *ostart_id : liquidity_pool_id_type(); + + const auto& idx = _db.get_index_type().indices().get(); + auto lower_itr = idx.lower_bound( std::make_tuple( asset_id, start_id ) ); + auto upper_itr = idx.upper_bound( asset_id ); + + results.reserve( limit ); + uint32_t count = 0; + for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) + { + results.emplace_back( *lower_itr ); + } + + return results; + } + //////////////////////////////////////////////// // Subscription //////////////////////////////////////////////// diff --git a/libraries/app/include/graphene/app/api_objects.hpp b/libraries/app/include/graphene/app/api_objects.hpp index 3582baaa1d..00a03f1b75 100644 --- a/libraries/app/include/graphene/app/api_objects.hpp +++ b/libraries/app/include/graphene/app/api_objects.hpp @@ -108,6 +108,7 @@ namespace graphene { namespace app { string percent_change; string base_volume; string quote_volume; + optional mto_id; market_ticker() {} market_ticker(const market_ticker_object& mto, @@ -184,10 +185,12 @@ FC_REFLECT( graphene::app::order, (price)(quote)(base) ); FC_REFLECT( graphene::app::order_book, (base)(quote)(bids)(asks) ); FC_REFLECT( graphene::app::market_ticker, (time)(base)(quote)(latest)(lowest_ask)(lowest_ask_base_size)(lowest_ask_quote_size) - (highest_bid)(highest_bid_base_size)(highest_bid_quote_size)(percent_change)(base_volume)(quote_volume) ); + (highest_bid)(highest_bid_base_size)(highest_bid_quote_size)(percent_change)(base_volume)(quote_volume)(mto_id) ); FC_REFLECT( graphene::app::market_volume, (time)(base)(quote)(base_volume)(quote_volume) ); FC_REFLECT( graphene::app::market_trade, (sequence)(date)(price)(amount)(value)(type) (side1_account_id)(side2_account_id)); FC_REFLECT_DERIVED( graphene::app::extended_asset_object, (graphene::chain::asset_object), (total_in_collateral)(total_backing_collateral) ); + + diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index 6446d3b70d..5d9be78b92 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -53,6 +53,7 @@ namespace graphene { namespace app { uint64_t api_limit_get_htlc_by = 100; uint64_t api_limit_get_full_accounts = 50; uint64_t api_limit_get_full_accounts_lists = 500; + uint64_t api_limit_get_top_voters = 200; uint64_t api_limit_get_call_orders = 300; uint64_t api_limit_get_settle_orders = 300; uint64_t api_limit_get_assets = 101; @@ -71,6 +72,7 @@ namespace graphene { namespace app { uint64_t api_limit_get_trade_history_by_sequence = 100; uint64_t api_limit_get_withdraw_permissions_by_giver = 101; uint64_t api_limit_get_withdraw_permissions_by_recipient = 101; + uint64_t api_limit_get_liquidity_pools = 101; }; class application diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 4934f9ebc9..6df0d49433 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -118,6 +119,7 @@ class database_api * - lookup_accounts * - get_full_accounts * - get_htlc + * - get_liquidity_pools_by_share_asset * * Note: auto-subscription is enabled by default * @@ -279,6 +281,13 @@ class database_api std::map get_full_accounts( const vector& names_or_ids, optional subscribe = optional() ); + /** + * @brief Returns vector of voting power sorted by reverse vp_active + * @param limit Max number of results + * @return Desc Sorted voting power vector + */ + vector get_top_voters(uint32_t limit)const; + /** * @brief Get info of an account by name * @param name Name of the account to retrieve @@ -618,6 +627,81 @@ class database_api unsigned limit = 100 )const; + ///////////////////// + // Liquidity pools // + ///////////////////// + + /** + * @brief Get a list of liquidity pools by the symbol or ID of the first asset in the pool + * @param asset_symbol_or_id symbol name or ID of the asset + * @param limit The limitation of items each query can fetch, not greater than a configured value + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @return The liquidity pools + * + * @note + * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools + * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + */ + vector get_liquidity_pools_by_asset_a( + std::string asset_symbol_or_id, + optional limit = 101, + optional start_id = optional() )const; + + /** + * @brief Get a list of liquidity pools by the symbol or ID of the second asset in the pool + * @param asset_symbol_or_id symbol name or ID of the asset + * @param limit The limitation of items each query can fetch, not greater than a configured value + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @return The liquidity pools + * + * @note + * 1. if @p asset_symbol_or_id cannot be tied to an asset, an error will be returned + * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools + * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + */ + vector get_liquidity_pools_by_asset_b( + std::string asset_symbol_or_id, + optional limit = 101, + optional start_id = optional() )const; + + /** + * @brief Get a list of liquidity pools by the symbols or IDs of the two assets in the pool + * @param asset_symbol_or_id_a symbol name or ID of one asset + * @param asset_symbol_or_id_b symbol name or ID of the other asset + * @param limit The limitation of items each query can fetch, not greater than a configured value + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @return The liquidity pools + * + * @note + * 1. if @p asset_symbol_or_id_a or @p asset_symbol_or_id_b cannot be tied to an asset, + * an error will be returned + * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools + * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + */ + vector get_liquidity_pools_by_both_assets( + std::string asset_symbol_or_id_a, + std::string asset_symbol_or_id_b, + optional limit = 101, + optional start_id = optional() )const; + + /** + * @brief Get a list of liquidity pools by their share asset symbols or IDs + * @param asset_symbols_or_ids symbol names or IDs of the share assets + * @param subscribe @a true to subscribe to the queried objects; @a false to not subscribe; + * @a null to subscribe or not subscribe according to current auto-subscription setting + * (see @ref set_auto_subscription) + * @return The liquidity pools that the assets are for + * + * @note if an asset in the list can not be found or is not a share asset of any liquidity pool, + * the corresponding data in the returned list is null. + */ + vector> get_liquidity_pools_by_share_asset( + const vector& asset_symbols_or_ids, + optional subscribe = optional() )const; /////////////// // Witnesses // @@ -948,6 +1032,7 @@ FC_API(graphene::app::database_api, (get_account_id_from_string) (get_accounts) (get_full_accounts) + (get_top_voters) (get_account_by_name) (get_account_references) (lookup_account_names) @@ -988,6 +1073,12 @@ FC_API(graphene::app::database_api, (get_trade_history) (get_trade_history_by_sequence) + // Liquidity pools + (get_liquidity_pools_by_asset_a) + (get_liquidity_pools_by_asset_b) + (get_liquidity_pools_by_both_assets) + (get_liquidity_pools_by_share_asset) + // Witnesses (get_witnesses) (get_witness_by_account) diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index a0f28dfb46..6843a76029 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -36,6 +36,7 @@ add_library( graphene_chain exceptions.cpp evaluator.cpp + liquidity_pool_evaluator.cpp balance_evaluator.cpp account_evaluator.cpp assert_evaluator.cpp @@ -90,13 +91,10 @@ if(MSVC) set_source_files_properties( ${GRAPHENE_CHAIN_BIG_FILES} PROPERTIES COMPILE_FLAGS "/bigobj" ) else( MSVC ) if( MINGW ) + # Note: Even with the big-obj property and the -Os property set, + # "string table overflow" and "File too big" errors may still occur on database.cpp. + # Can set GRAPHENE_DISABLE_UNITY_BUILD to ON to get around the issue. set_source_files_properties( ${GRAPHENE_CHAIN_BIG_FILES} PROPERTIES COMPILE_FLAGS -Wa,-mbig-obj ) - if( (CMAKE_BUILD_TYPE MATCHES Release) OR (CMAKE_BUILD_TYPE MATCHES RelWithDebInfo) ) - # Use -Os to avoid string table overflow - set_source_files_properties( database.cpp PROPERTIES COMPILE_FLAGS -Os ) - set_source_files_properties( database.cpp PROPERTIES LINK_FLAGS -Os ) - set_source_files_properties( database.cpp PROPERTIES STATIC_LIBRARY_FLAGS -Os ) - endif( CMAKE_BUILD_TYPE ) endif( MINGW ) endif(MSVC) diff --git a/libraries/chain/account_object.cpp b/libraries/chain/account_object.cpp index f53046280a..627de8e899 100644 --- a/libraries/chain/account_object.cpp +++ b/libraries/chain/account_object.cpp @@ -66,13 +66,6 @@ void account_statistics_object::process_fees(const account_object& a, database& share_type network_cut = cut_fee(core_fee_total, account.network_fee_percentage); assert( network_cut <= core_fee_total ); -#ifndef NDEBUG - const auto& props = d.get_global_properties(); - - share_type reserveed = cut_fee(network_cut, props.parameters.reserve_percent_of_fee); - share_type accumulated = network_cut - reserveed; - assert( accumulated + reserveed == network_cut ); -#endif share_type lifetime_cut = cut_fee(core_fee_total, account.lifetime_referrer_fee_percentage); share_type referral = core_fee_total - network_cut - lifetime_cut; @@ -90,7 +83,7 @@ void account_statistics_object::process_fees(const account_object& a, database& d.deposit_cashback(d.get(account.referrer), referrer_cut, require_vesting); d.deposit_cashback(d.get(account.registrar), registrar_cut, require_vesting); - assert( referrer_cut + registrar_cut + accumulated + reserveed + lifetime_cut == core_fee_total ); + assert( referrer_cut + registrar_cut + network_cut + lifetime_cut == core_fee_total ); }; pay_out_fees(a, pending_fees, true); @@ -337,6 +330,8 @@ FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::account_statistics_object, (has_cashback_vb) (is_voting) (last_vote_time) + (vp_all)(vp_active)(vp_committee)(vp_witness)(vp_worker) + (vote_tally_time) (lifetime_fees_paid) (pending_fees)(pending_vested_fees) ) diff --git a/libraries/chain/asset_evaluator.cpp b/libraries/chain/asset_evaluator.cpp index 530a2c79ce..db05b25de0 100644 --- a/libraries/chain/asset_evaluator.cpp +++ b/libraries/chain/asset_evaluator.cpp @@ -295,6 +295,8 @@ void_result asset_issue_evaluator::do_evaluate( const asset_issue_operation& o ) FC_ASSERT( o.issuer == a.issuer ); FC_ASSERT( !a.is_market_issued(), "Cannot manually issue a market-issued asset." ); + FC_ASSERT( !a.is_liquidity_pool_share_asset(), "Cannot manually issue a liquidity pool share asset." ); + FC_ASSERT( a.can_create_new_supply(), "Can not create new supply" ); to_account = &o.issue_to_account(d); @@ -329,11 +331,21 @@ void_result asset_reserve_evaluator::do_evaluate( const asset_reserve_operation& ("sym", a.symbol) ); - from_account = &o.payer(d); + from_account = fee_paying_account; FC_ASSERT( is_authorized_asset( d, *from_account, a ) ); asset_dyn_data = &a.dynamic_asset_data_id(d); - FC_ASSERT( (asset_dyn_data->current_supply - o.amount_to_reserve.amount) >= 0 ); + if( !a.is_liquidity_pool_share_asset() ) + { + FC_ASSERT( asset_dyn_data->current_supply >= o.amount_to_reserve.amount, + "Can not reserve an amount that is more than the current supply" ); + } + else + { + FC_ASSERT( asset_dyn_data->current_supply > o.amount_to_reserve.amount, + "The asset is a liquidity pool share asset thus can only reserve an amount " + "that is less than the current supply" ); + } return void_result(); } FC_CAPTURE_AND_RETHROW( (o) ) } diff --git a/libraries/chain/db_init.cpp b/libraries/chain/db_init.cpp index dfbbec9132..d88cb53210 100644 --- a/libraries/chain/db_init.cpp +++ b/libraries/chain/db_init.cpp @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -57,6 +58,7 @@ #include #include #include +#include #include #include #include @@ -74,69 +76,6 @@ namespace graphene { namespace chain { -// C++ requires that static class variables declared and initialized -// in headers must also have a definition in a single source file, -// else linker errors will occur [1]. -// -// The purpose of this source file is to collect such definitions in -// a single place. -// -// [1] http://stackoverflow.com/questions/8016780/undefined-reference-to-static-constexpr-char - -const uint8_t account_object::space_id; -const uint8_t account_object::type_id; - -const uint8_t asset_object::space_id; -const uint8_t asset_object::type_id; - -const uint8_t block_summary_object::space_id; -const uint8_t block_summary_object::type_id; - -const uint8_t call_order_object::space_id; -const uint8_t call_order_object::type_id; - -const uint8_t committee_member_object::space_id; -const uint8_t committee_member_object::type_id; - -const uint8_t force_settlement_object::space_id; -const uint8_t force_settlement_object::type_id; - -const uint8_t global_property_object::space_id; -const uint8_t global_property_object::type_id; - -const uint8_t limit_order_object::space_id; -const uint8_t limit_order_object::type_id; - -const uint8_t operation_history_object::space_id; -const uint8_t operation_history_object::type_id; - -const uint8_t proposal_object::space_id; -const uint8_t proposal_object::type_id; - -const uint8_t transaction_history_object::space_id; -const uint8_t transaction_history_object::type_id; - -const uint8_t vesting_balance_object::space_id; -const uint8_t vesting_balance_object::type_id; - -const uint8_t withdraw_permission_object::space_id; -const uint8_t withdraw_permission_object::type_id; - -const uint8_t witness_object::space_id; -const uint8_t witness_object::type_id; - -const uint8_t worker_object::space_id; -const uint8_t worker_object::type_id; - -const uint8_t htlc_object::space_id; -const uint8_t htlc_object::type_id; - -const uint8_t custom_authority_object::space_id; -const uint8_t custom_authority_object::type_id; - -const uint8_t ticket_object::space_id; -const uint8_t ticket_object::type_id; - void database::initialize_evaluators() { _operation_evaluators.resize(255); @@ -192,6 +131,11 @@ void database::initialize_evaluators() register_evaluator(); register_evaluator(); register_evaluator(); + register_evaluator(); + register_evaluator(); + register_evaluator(); + register_evaluator(); + register_evaluator(); } void database::initialize_indexes() @@ -217,6 +161,7 @@ void database::initialize_indexes() add_index< primary_index< htlc_index> >(); add_index< primary_index< custom_authority_index> >(); add_index< primary_index >(); + add_index< primary_index >(); //Implementation object indexes add_index< primary_index >(); diff --git a/libraries/chain/db_maint.cpp b/libraries/chain/db_maint.cpp index ae1b0ef13b..8ba8d2ac7a 100644 --- a/libraries/chain/db_maint.cpp +++ b/libraries/chain/db_maint.cpp @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -1092,6 +1093,38 @@ void delete_expired_custom_authorities( database& db ) db.remove(*index.begin()); } +/// A one-time data process to set values of existing liquid tickets to zero. +void process_hf_2262( database& db ) +{ + for( const auto& ticket_obj : db.get_index_type().indices().get() ) + { + if( ticket_obj.current_type != liquid ) // only update liquid tickets + continue; + db.modify( db.get_account_stats_by_owner( ticket_obj.account ), [&ticket_obj](account_statistics_object& aso) { + aso.total_pol_value -= ticket_obj.value; + }); + db.modify( ticket_obj, []( ticket_object& t ) { + t.value = 0; + }); + } + // Code for testnet, begin + const ticket_object* t15 = db.find( ticket_id_type(15) ); // a ticket whose target is lock_forever + if( t15 && t15->account == account_id_type(3833) ) // its current type should be lock_720_days at hf time + { + db.modify( *t15, [&db]( ticket_object& t ) { + t.next_auto_update_time = db.head_block_time() + fc::seconds(60); + }); + } + const ticket_object* t33 = db.find( ticket_id_type(33) ); // a ticket whose target is lock_720_days + if( t33 && t33->account == account_id_type(3833) ) // its current type should be liquid at hf time + { + db.modify( *t33, [&db]( ticket_object& t ) { + t.next_auto_update_time = db.head_block_time() + fc::seconds(30); + }); + } + // Code for testnet, end +} + namespace detail { struct vote_recalc_times @@ -1170,6 +1203,7 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g { const auto& gpo = get_global_properties(); const auto& dgpo = get_dynamic_global_properties(); + auto last_vote_tally_time = head_block_time(); distribute_fba_balances(*this); create_buyback_orders(*this); @@ -1180,6 +1214,7 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g const dynamic_global_property_object& dprops; const time_point_sec now; const bool hf2103_passed; + const bool hf2262_passed; const bool pob_activated; optional witness_recalc_times; @@ -1188,8 +1223,9 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g optional delegator_recalc_times; vote_tally_helper( database& db ) - : d(db), props( d.get_global_properties() ), dprops( d.get_dynamic_global_properties() ), + : d(db), props( d.get_global_properties() ), dprops( d.get_dynamic_global_properties() ), now( d.head_block_time() ), hf2103_passed( HARDFORK_CORE_2103_PASSED( now ) ), + hf2262_passed( HARDFORK_CORE_2262_PASSED( now ) ), pob_activated( dprops.total_pob > 0 || dprops.total_inactive > 0 ) { d._vote_tally_buffer.resize( props.next_available_vote_id, 0 ); @@ -1229,8 +1265,16 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g uint64_t voting_stake[3]; // 0=committee, 1=witness, 2=worker, as in vote_id_type::vote_type uint64_t num_committee_voting_stake; // number of committee members voting_stake[2] = ( pob_activated ? 0 : stats.total_core_in_orders.value ) - + (stake_account.cashback_vb.valid() ? (*stake_account.cashback_vb)(d).balance.amount.value: 0) - + stats.core_in_balance.value; + + ( ( !hf2262_passed && stake_account.cashback_vb.valid() ) ? + (*stake_account.cashback_vb)(d).balance.amount.value : 0 ) + + ( hf2262_passed ? 0 : stats.core_in_balance.value ); + + // voting power stats + uint64_t vp_all = 0; ///< all voting power. + uint64_t vp_active = 0; ///< the voting power of the proxy, if there is no attenuation, it is equal to vp_all. + uint64_t vp_committee = 0; ///< the final voting power for the committees. + uint64_t vp_witness = 0; ///< the final voting power for the witnesses. + uint64_t vp_worker = 0; ///< the final voting power for the workers. //PoB const uint64_t pol_amount = stats.total_core_pol.value; @@ -1281,33 +1325,57 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g if( voting_stake[2] == 0 ) return; + const account_statistics_object& opinion_account_stats = ( directly_voting ? stats : opinion_account.statistics( d ) ); + // Recalculate votes if( !hf2103_passed ) { voting_stake[0] = voting_stake[2]; voting_stake[1] = voting_stake[2]; num_committee_voting_stake = voting_stake[2]; + vp_all = vp_active = vp_committee = vp_witness = vp_worker = voting_stake[2]; } else { + vp_all = vp_active = voting_stake[2]; if( !directly_voting ) { - voting_stake[2] = detail::vote_recalc_options::delegator().get_recalced_voting_stake( - voting_stake[2], stats.last_vote_time, *delegator_recalc_times ); + vp_active = voting_stake[2] = detail::vote_recalc_options::delegator().get_recalced_voting_stake( + voting_stake[2], stats.last_vote_time, *delegator_recalc_times ); } - const account_statistics_object& opinion_account_stats = ( directly_voting ? stats - : opinion_account.statistics( d ) ); - voting_stake[1] = detail::vote_recalc_options::witness().get_recalced_voting_stake( - voting_stake[2], opinion_account_stats.last_vote_time, *witness_recalc_times ); - voting_stake[0] = detail::vote_recalc_options::committee().get_recalced_voting_stake( - voting_stake[2], opinion_account_stats.last_vote_time, *committee_recalc_times ); + vp_witness = voting_stake[1] = detail::vote_recalc_options::witness().get_recalced_voting_stake( + voting_stake[2], opinion_account_stats.last_vote_time, *witness_recalc_times ); + vp_committee = voting_stake[0] = detail::vote_recalc_options::committee().get_recalced_voting_stake( + voting_stake[2], opinion_account_stats.last_vote_time, *committee_recalc_times ); num_committee_voting_stake = voting_stake[0]; if( opinion_account.num_committee_voted > 1 ) voting_stake[0] /= opinion_account.num_committee_voted; - voting_stake[2] = detail::vote_recalc_options::worker().get_recalced_voting_stake( - voting_stake[2], opinion_account_stats.last_vote_time, *worker_recalc_times ); + vp_worker = voting_stake[2] = detail::vote_recalc_options::worker().get_recalced_voting_stake( + voting_stake[2], opinion_account_stats.last_vote_time, *worker_recalc_times ); } + // update voting power + d.modify( opinion_account_stats, [=]( account_statistics_object& update_stats ) { + if (update_stats.vote_tally_time != now) + { + update_stats.vp_all = vp_all; + update_stats.vp_active = vp_active; + update_stats.vp_committee = vp_committee; + update_stats.vp_witness = vp_witness; + update_stats.vp_worker = vp_worker; + update_stats.vote_tally_time = now; + } + else + { + update_stats.vp_all += vp_all; + update_stats.vp_active += vp_active; + update_stats.vp_committee += vp_committee; + update_stats.vp_witness += vp_witness; + update_stats.vp_worker += vp_worker; + // update_stats.vote_tally_time = now; + } + }); + for( vote_id_type id : opinion_account.options.votes ) { uint32_t offset = id.instance(); @@ -1339,7 +1407,7 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g } tally_helper(*this); perform_account_maintenance( tally_helper ); - + struct clear_canary { clear_canary(vector& target): target(target){} ~clear_canary() { target.clear(); } @@ -1422,8 +1490,13 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g if ( dgpo.next_maintenance_time <= HARDFORK_CORE_2103_TIME && next_maintenance_time > HARDFORK_CORE_2103_TIME ) process_hf_2103(*this); - modify(dgpo, [next_maintenance_time](dynamic_global_property_object& d) { + // Update tickets. Note: the new values will take effect only on the next maintenance interval + if ( dgpo.next_maintenance_time <= HARDFORK_CORE_2262_TIME && next_maintenance_time > HARDFORK_CORE_2262_TIME ) + process_hf_2262(*this); + + modify(dgpo, [last_vote_tally_time, next_maintenance_time](dynamic_global_property_object& d) { d.next_maintenance_time = next_maintenance_time; + d.last_vote_tally_time = last_vote_tally_time; d.accounts_registered_this_interval = 0; }); diff --git a/libraries/chain/db_management.cpp b/libraries/chain/db_management.cpp index 9ca657ef6f..c71d85d3d9 100644 --- a/libraries/chain/db_management.cpp +++ b/libraries/chain/db_management.cpp @@ -125,12 +125,15 @@ void database::reindex( fc::path data_dir ) { std::stringstream bysize; std::stringstream bynum; - bysize << std::fixed << std::setprecision(5) << double(std::get<0>(blocks.front())) / total_block_size * 100; - bynum << std::fixed << std::setprecision(5) << double(i*100)/last_block_num; + size_t current_pos = std::get<0>(blocks.front()); + if( current_pos > total_block_size ) + total_block_size = current_pos; + bysize << std::fixed << std::setprecision(5) << double(current_pos) / total_block_size * 100; + bynum << std::fixed << std::setprecision(5) << double(i)*100/last_block_num; ilog( " [by size: ${size}% ${processed} of ${total}] [by num: ${num}% ${i} of ${last}]", ("size", bysize.str()) - ("processed", std::get<0>(blocks.front())) + ("processed", current_pos) ("total", total_block_size) ("num", bynum.str()) ("i", i) diff --git a/libraries/chain/db_market.cpp b/libraries/chain/db_market.cpp index 1f5faacfa4..d24598b372 100644 --- a/libraries/chain/db_market.cpp +++ b/libraries/chain/db_market.cpp @@ -1339,7 +1339,8 @@ void database::pay_order( const account_object& receiver, const asset& receives, adjust_balance(receiver.get_id(), receives); } -asset database::calculate_market_fee( const asset_object& trade_asset, const asset& trade_amount, const bool& is_maker) +asset database::calculate_market_fee( const asset_object& trade_asset, const asset& trade_amount, + const bool& is_maker )const { assert( trade_asset.id == trade_amount.asset_id ); @@ -1374,9 +1375,10 @@ asset database::calculate_market_fee( const asset_object& trade_asset, const ass asset database::pay_market_fees(const account_object* seller, const asset_object& recv_asset, const asset& receives, - const bool& is_maker) + const bool& is_maker, const optional& calculated_market_fees ) { - const auto market_fees = calculate_market_fee( recv_asset, receives, is_maker ); + const auto market_fees = ( calculated_market_fees.valid() ? *calculated_market_fees + : calculate_market_fee( recv_asset, receives, is_maker ) ); auto issuer_fees = market_fees; FC_ASSERT( issuer_fees <= receives, "Market fee shouldn't be greater than receives"); //Don't dirty undo state if not actually collecting any fees diff --git a/libraries/chain/db_notify.cpp b/libraries/chain/db_notify.cpp index bea56d7b6a..648ee569a9 100644 --- a/libraries/chain/db_notify.cpp +++ b/libraries/chain/db_notify.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -314,6 +315,26 @@ struct get_impacted_account_visitor { _impacted.insert( op.fee_payer() ); // account } + void operator()( const liquidity_pool_create_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account + } + void operator()( const liquidity_pool_delete_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account + } + void operator()( const liquidity_pool_deposit_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account + } + void operator()( const liquidity_pool_withdraw_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account + } + void operator()( const liquidity_pool_exchange_operation& op ) + { + _impacted.insert( op.fee_payer() ); // account + } }; } // namespace detail @@ -424,6 +445,9 @@ void get_relevant_accounts( const object* obj, flat_set& accoun FC_ASSERT( aobj != nullptr ); accounts.insert( aobj->account ); break; + } case liquidity_pool_object_type:{ + // no account info in the object although it does have an owner + break; } } } @@ -562,6 +586,9 @@ void database::notify_changed_objects() GRAPHENE_TRY_NOTIFY( removed_objects, removed_ids, removed, removed_accounts_impacted ) } } +} catch( const graphene::chain::plugin_exception& e ) { + elog( "Caught plugin exception: ${e}", ("e", e.to_detail_string() ) ); + throw; } FC_CAPTURE_AND_LOG( (0) ) } } } // namespace graphene::chain diff --git a/libraries/chain/db_update.cpp b/libraries/chain/db_update.cpp index 889f62739a..7c0fec421c 100644 --- a/libraries/chain/db_update.cpp +++ b/libraries/chain/db_update.cpp @@ -604,6 +604,9 @@ void database::clear_expired_htlcs() generic_operation_result database::process_tickets() { + const auto maint_time = get_dynamic_global_properties().next_maintenance_time; + ticket_version version = ( HARDFORK_CORE_2262_PASSED(maint_time) ? ticket_v2 : ticket_v1 ); + generic_operation_result result; share_type total_delta_pob; share_type total_delta_inactive; @@ -627,8 +630,8 @@ generic_operation_result database::process_tickets() { ticket_type old_type = ticket.current_type; share_type old_value = ticket.value; - modify( ticket, []( ticket_object& o ) { - o.auto_update(); + modify( ticket, [version]( ticket_object& o ) { + o.auto_update( version ); }); result.updated_objects.insert( ticket.id ); diff --git a/libraries/chain/hardfork.d/CORE_2262.hf b/libraries/chain/hardfork.d/CORE_2262.hf new file mode 100644 index 0000000000..9671cb70d7 --- /dev/null +++ b/libraries/chain/hardfork.d/CORE_2262.hf @@ -0,0 +1,5 @@ +// bitshares-core issue #2262: Remove voting power from liquid BTS and tickets +#ifndef HARDFORK_CORE_2262_TIME +#define HARDFORK_CORE_2262_TIME (fc::time_point_sec( 1600520400 )) // testnet, Saturday, Sept. 19, 2020 13:00:00 UTC +#define HARDFORK_CORE_2262_PASSED(next_maint_time) (next_maint_time > HARDFORK_CORE_2262_TIME) +#endif diff --git a/libraries/chain/hardfork.d/LIQUIDITY_POOL.hf b/libraries/chain/hardfork.d/LIQUIDITY_POOL.hf new file mode 100644 index 0000000000..2a12ecdb47 --- /dev/null +++ b/libraries/chain/hardfork.d/LIQUIDITY_POOL.hf @@ -0,0 +1,5 @@ +// Liquidity pool +#ifndef HARDFORK_LIQUIDITY_POOL_TIME +#define HARDFORK_LIQUIDITY_POOL_TIME (fc::time_point_sec( 1600520400 )) // testnet, Saturday, Sept. 19, 2020 13:00:00 UTC +#define HARDFORK_LIQUIDITY_POOL_PASSED(now) (now >= HARDFORK_LIQUIDITY_POOL_TIME) +#endif diff --git a/libraries/chain/include/graphene/chain/account_object.hpp b/libraries/chain/include/graphene/chain/account_object.hpp index 5b65792265..9bb6fceaf7 100644 --- a/libraries/chain/include/graphene/chain/account_object.hpp +++ b/libraries/chain/include/graphene/chain/account_object.hpp @@ -46,8 +46,8 @@ namespace graphene { namespace chain { class account_statistics_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_account_statistics_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_account_statistics_object_type; account_id_type owner; @@ -94,6 +94,18 @@ namespace graphene { namespace chain { time_point_sec last_vote_time; ///< last time voted + /// Voting Power Stats + ///@{ + uint64_t vp_all = 0; ///< all voting power. + uint64_t vp_active = 0; ///< active voting power, if there is no attenuation, it is equal to vp_all. + uint64_t vp_committee = 0; ///< the final voting power for the committees. + uint64_t vp_witness = 0; ///< the final voting power for the witnesses. + uint64_t vp_worker = 0; ///< the final voting power for the workers. + /// timestamp of the last count of votes. + /// if there is no statistics, the date is less than `_db.get_dynamic_global_properties().last_vote_tally_time`. + time_point_sec vote_tally_time; + ///@} + /// Whether this account owns some CORE asset and is voting inline bool has_some_core_voting() const { @@ -146,8 +158,8 @@ namespace graphene { namespace chain { class account_balance_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_account_balance_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_account_balance_object_type; account_id_type owner; asset_id_type asset_type; @@ -170,8 +182,8 @@ namespace graphene { namespace chain { class account_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = account_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = account_object_type; /** * The time at which this account's membership expires. @@ -212,7 +224,6 @@ namespace graphene { namespace chain { /// operations the account may perform. authority active; - typedef account_options options_type; account_options options; /// Pre-calculated for better performance on chain maintenance @@ -420,7 +431,8 @@ namespace graphene { namespace chain { typedef generic_index account_index; struct by_maintenance_seq; - + struct by_voting_power_active; + /** * @ingroup object_index */ @@ -434,6 +446,17 @@ namespace graphene { namespace chain { const_mem_fun, member > + >, + ordered_non_unique< tag, + composite_key< + account_statistics_object, + member, + member + >, + composite_key_compare< + std::greater< time_point_sec >, + std::greater< uint64_t > + > > > > account_stats_multi_index_type; diff --git a/libraries/chain/include/graphene/chain/asset_object.hpp b/libraries/chain/include/graphene/chain/asset_object.hpp index dced0377c0..cb6a8ae386 100644 --- a/libraries/chain/include/graphene/chain/asset_object.hpp +++ b/libraries/chain/include/graphene/chain/asset_object.hpp @@ -58,8 +58,8 @@ namespace graphene { namespace chain { class asset_dynamic_data_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_asset_dynamic_data_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_asset_dynamic_data_object_type; /// The number of shares currently in existence share_type current_supply; @@ -79,8 +79,8 @@ namespace graphene { namespace chain { class asset_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = asset_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = asset_object_type; /// This function does not check if any registered asset has this symbol or not; it simply checks whether the /// symbol would be valid. @@ -89,6 +89,8 @@ namespace graphene { namespace chain { /// @return true if this is a market-issued asset; false otherwise. bool is_market_issued()const { return bitasset_data_id.valid(); } + /// @return true if this is a share asset of a liquidity pool; false otherwise. + bool is_liquidity_pool_share_asset()const { return for_liquidity_pool.valid(); } /// @return true if users may request force-settlement of this market-issued asset; false otherwise bool can_force_settle()const { return !(options.flags & disable_force_settle); } /// @return true if the issuer of this market-issued asset may globally settle the asset; false otherwise @@ -144,6 +146,9 @@ namespace graphene { namespace chain { optional buyback_account; + /// The ID of the liquidity pool if the asset is the share asset of a liquidity pool + optional for_liquidity_pool; + asset_id_type get_id()const { return id; } void validate()const @@ -249,8 +254,8 @@ namespace graphene { namespace chain { class asset_bitasset_data_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_asset_bitasset_data_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_asset_bitasset_data_object_type; /// The asset this object belong to asset_id_type asset_id; @@ -419,6 +424,7 @@ FC_REFLECT_DERIVED( graphene::chain::asset_object, (graphene::db::object), (dynamic_asset_data_id) (bitasset_data_id) (buyback_account) + (for_liquidity_pool) ) FC_REFLECT_TYPENAME( graphene::chain::asset_bitasset_data_object ) diff --git a/libraries/chain/include/graphene/chain/balance_object.hpp b/libraries/chain/include/graphene/chain/balance_object.hpp index ef385a0642..478f78be39 100644 --- a/libraries/chain/include/graphene/chain/balance_object.hpp +++ b/libraries/chain/include/graphene/chain/balance_object.hpp @@ -30,8 +30,8 @@ namespace graphene { namespace chain { class balance_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = balance_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = balance_object_type; bool is_vesting_balance()const { return vesting_policy.valid(); } diff --git a/libraries/chain/include/graphene/chain/block_summary_object.hpp b/libraries/chain/include/graphene/chain/block_summary_object.hpp index 2206843c47..3486b993b4 100644 --- a/libraries/chain/include/graphene/chain/block_summary_object.hpp +++ b/libraries/chain/include/graphene/chain/block_summary_object.hpp @@ -40,8 +40,8 @@ namespace graphene { namespace chain { class block_summary_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_block_summary_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_block_summary_object_type; block_id_type block_id; }; diff --git a/libraries/chain/include/graphene/chain/budget_record_object.hpp b/libraries/chain/include/graphene/chain/budget_record_object.hpp index 3f8ec3cbcf..15eab0a6cc 100644 --- a/libraries/chain/include/graphene/chain/budget_record_object.hpp +++ b/libraries/chain/include/graphene/chain/budget_record_object.hpp @@ -56,8 +56,8 @@ struct budget_record class budget_record_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_budget_record_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_budget_record_object_type; fc::time_point_sec time; budget_record record; diff --git a/libraries/chain/include/graphene/chain/buyback_object.hpp b/libraries/chain/include/graphene/chain/buyback_object.hpp index a17f6a9ed3..6724c9453e 100644 --- a/libraries/chain/include/graphene/chain/buyback_object.hpp +++ b/libraries/chain/include/graphene/chain/buyback_object.hpp @@ -43,8 +43,8 @@ namespace graphene { namespace chain { class buyback_object : public graphene::db::abstract_object< buyback_object > { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_buyback_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_buyback_object_type; asset_id_type asset_to_buy; }; diff --git a/libraries/chain/include/graphene/chain/chain_property_object.hpp b/libraries/chain/include/graphene/chain/chain_property_object.hpp index 86dba6b8fd..28542c231e 100644 --- a/libraries/chain/include/graphene/chain/chain_property_object.hpp +++ b/libraries/chain/include/graphene/chain/chain_property_object.hpp @@ -33,8 +33,8 @@ namespace graphene { namespace chain { class chain_property_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_chain_property_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_chain_property_object_type; chain_id_type chain_id; immutable_chain_parameters immutable_parameters; diff --git a/libraries/chain/include/graphene/chain/committee_member_object.hpp b/libraries/chain/include/graphene/chain/committee_member_object.hpp index 8812222fc0..9c0cef0816 100644 --- a/libraries/chain/include/graphene/chain/committee_member_object.hpp +++ b/libraries/chain/include/graphene/chain/committee_member_object.hpp @@ -43,8 +43,8 @@ namespace graphene { namespace chain { class committee_member_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = committee_member_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = committee_member_object_type; account_id_type committee_member_account; vote_id_type vote_id; diff --git a/libraries/chain/include/graphene/chain/confidential_object.hpp b/libraries/chain/include/graphene/chain/confidential_object.hpp index 9c8fba2150..02057e46b5 100644 --- a/libraries/chain/include/graphene/chain/confidential_object.hpp +++ b/libraries/chain/include/graphene/chain/confidential_object.hpp @@ -41,8 +41,8 @@ namespace graphene { namespace chain { class blinded_balance_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_blinded_balance_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_blinded_balance_object_type; fc::ecc::commitment_type commitment; asset_id_type asset_id; diff --git a/libraries/chain/include/graphene/chain/config.hpp b/libraries/chain/include/graphene/chain/config.hpp index 39b44d35ad..ae1c0b195d 100644 --- a/libraries/chain/include/graphene/chain/config.hpp +++ b/libraries/chain/include/graphene/chain/config.hpp @@ -30,7 +30,7 @@ #define GRAPHENE_MAX_NESTED_OBJECTS (200) -#define GRAPHENE_CURRENT_DB_VERSION "20200510" +#define GRAPHENE_CURRENT_DB_VERSION "20200910" #define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4 #define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3 diff --git a/libraries/chain/include/graphene/chain/custom_authority_object.hpp b/libraries/chain/include/graphene/chain/custom_authority_object.hpp index bfa6b7d0f4..d73194a11a 100644 --- a/libraries/chain/include/graphene/chain/custom_authority_object.hpp +++ b/libraries/chain/include/graphene/chain/custom_authority_object.hpp @@ -42,8 +42,8 @@ namespace graphene { namespace chain { mutable optional predicate_cache; public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = custom_authority_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = custom_authority_object_type; account_id_type account; bool enabled; diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index 24ef7b6180..dce314d363 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -491,9 +491,10 @@ namespace graphene { namespace chain { * @param trade_amount the quantity that the fee calculation is based upon * @param is_maker TRUE if this is the fee for a maker, FALSE if taker */ - asset calculate_market_fee( const asset_object& trade_asset, const asset& trade_amount, const bool& is_maker); + asset calculate_market_fee( const asset_object& trade_asset, const asset& trade_amount, + const bool& is_maker )const; asset pay_market_fees(const account_object* seller, const asset_object& recv_asset, const asset& receives, - const bool& is_maker); + const bool& is_maker, const optional& calculated_market_fees = {}); asset pay_force_settle_fees(const asset_object& collecting_asset, const asset& collat_receives); ///@} diff --git a/libraries/chain/include/graphene/chain/fba_object.hpp b/libraries/chain/include/graphene/chain/fba_object.hpp index 5558b92a62..3b5b2f7eb1 100644 --- a/libraries/chain/include/graphene/chain/fba_object.hpp +++ b/libraries/chain/include/graphene/chain/fba_object.hpp @@ -37,8 +37,8 @@ class database; class fba_accumulator_object : public graphene::db::abstract_object< fba_accumulator_object > { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_fba_accumulator_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_fba_accumulator_object_type; share_type accumulated_fba_fees; optional< asset_id_type > designated_asset; diff --git a/libraries/chain/include/graphene/chain/global_property_object.hpp b/libraries/chain/include/graphene/chain/global_property_object.hpp index 1c4df931a7..a7fe629872 100644 --- a/libraries/chain/include/graphene/chain/global_property_object.hpp +++ b/libraries/chain/include/graphene/chain/global_property_object.hpp @@ -40,8 +40,8 @@ namespace graphene { namespace chain { class global_property_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_global_property_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_global_property_object_type; chain_parameters parameters; optional pending_parameters; @@ -64,14 +64,15 @@ namespace graphene { namespace chain { class dynamic_global_property_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_dynamic_global_property_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_dynamic_global_property_object_type; uint32_t head_block_number = 0; block_id_type head_block_id; time_point_sec time; witness_id_type current_witness; time_point_sec next_maintenance_time; + time_point_sec last_vote_tally_time; time_point_sec last_budget_time; share_type witness_budget; share_type total_pob; diff --git a/libraries/chain/include/graphene/chain/hardfork_visitor.hpp b/libraries/chain/include/graphene/chain/hardfork_visitor.hpp index 0df9aa13eb..c2f3c97545 100644 --- a/libraries/chain/include/graphene/chain/hardfork_visitor.hpp +++ b/libraries/chain/include/graphene/chain/hardfork_visitor.hpp @@ -49,6 +49,11 @@ struct hardfork_visitor { using BSIP_40_ops = TL::list; using hf2103_ops = TL::list; + using liquidity_pool_ops = TL::list< liquidity_pool_create_operation, + liquidity_pool_delete_operation, + liquidity_pool_deposit_operation, + liquidity_pool_withdraw_operation, + liquidity_pool_exchange_operation >; fc::time_point_sec now; hardfork_visitor(fc::time_point_sec now) : now(now) {} @@ -64,6 +69,9 @@ struct hardfork_visitor { template std::enable_if_t(), bool> visit() { return HARDFORK_CORE_2103_PASSED(now); } + template + std::enable_if_t(), bool> + visit() { return HARDFORK_LIQUIDITY_POOL_PASSED(now); } /// @} /// typelist::runtime::dispatch adaptor diff --git a/libraries/chain/include/graphene/chain/htlc_object.hpp b/libraries/chain/include/graphene/chain/htlc_object.hpp index a9f8cf5f43..ab84d58565 100644 --- a/libraries/chain/include/graphene/chain/htlc_object.hpp +++ b/libraries/chain/include/graphene/chain/htlc_object.hpp @@ -40,8 +40,8 @@ namespace graphene { namespace chain { class htlc_object : public graphene::db::abstract_object { public: // uniquely identify this object in the database - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = htlc_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = htlc_object_type; struct transfer_info { account_id_type from; diff --git a/libraries/chain/include/graphene/chain/liquidity_pool_evaluator.hpp b/libraries/chain/include/graphene/chain/liquidity_pool_evaluator.hpp new file mode 100644 index 0000000000..6d8497c189 --- /dev/null +++ b/libraries/chain/include/graphene/chain/liquidity_pool_evaluator.hpp @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2020 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once +#include + +#include + +namespace graphene { namespace chain { + + class asset_object; + class asset_dynamic_data_object; + class liquidity_pool_object; + + class liquidity_pool_create_evaluator : public evaluator + { + public: + typedef liquidity_pool_create_operation operation_type; + + void_result do_evaluate( const liquidity_pool_create_operation& op ); + generic_operation_result do_apply( const liquidity_pool_create_operation& op ); + + const asset_object* _share_asset = nullptr; + }; + + class liquidity_pool_delete_evaluator : public evaluator + { + public: + typedef liquidity_pool_delete_operation operation_type; + + void_result do_evaluate( const liquidity_pool_delete_operation& op ); + generic_operation_result do_apply( const liquidity_pool_delete_operation& op ); + + const liquidity_pool_object* _pool = nullptr; + const asset_object* _share_asset = nullptr; + }; + + class liquidity_pool_deposit_evaluator : public evaluator + { + public: + typedef liquidity_pool_deposit_operation operation_type; + + void_result do_evaluate( const liquidity_pool_deposit_operation& op ); + generic_exchange_operation_result do_apply( const liquidity_pool_deposit_operation& op ); + + const liquidity_pool_object* _pool = nullptr; + const asset_dynamic_data_object* _share_asset_dyn_data = nullptr; + asset _account_receives; + asset _pool_receives_a; + asset _pool_receives_b; + }; + + class liquidity_pool_withdraw_evaluator : public evaluator + { + public: + typedef liquidity_pool_withdraw_operation operation_type; + + void_result do_evaluate( const liquidity_pool_withdraw_operation& op ); + generic_exchange_operation_result do_apply( const liquidity_pool_withdraw_operation& op ); + + const liquidity_pool_object* _pool = nullptr; + const asset_dynamic_data_object* _share_asset_dyn_data = nullptr; + asset _pool_pays_a; + asset _pool_pays_b; + }; + + class liquidity_pool_exchange_evaluator : public evaluator + { + public: + typedef liquidity_pool_exchange_operation operation_type; + + void_result do_evaluate( const liquidity_pool_exchange_operation& op ); + generic_exchange_operation_result do_apply( const liquidity_pool_exchange_operation& op ); + + const liquidity_pool_object* _pool = nullptr; + const asset_object* _pool_pays_asset = nullptr; + const asset_object* _pool_receives_asset = nullptr; + asset _pool_pays; + asset _pool_receives; + asset _account_receives; + asset _maker_market_fee; + asset _taker_market_fee; + }; + +} } // graphene::chain diff --git a/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp b/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp new file mode 100644 index 0000000000..82a4b72943 --- /dev/null +++ b/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2020 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include + +#include +#include + +#include + +namespace graphene { namespace chain { + +using namespace graphene::db; + +/** + * @brief A liquidity pool + * @ingroup object + * @ingroup protocol + * + */ +class liquidity_pool_object : public abstract_object +{ + public: + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = liquidity_pool_object_type; + + asset_id_type asset_a; ///< Type of the first asset in the pool + asset_id_type asset_b; ///< Type of the second asset in the pool + share_type balance_a; ///< The balance of the first asset in the pool + share_type balance_b; ///< The balance of the second asset in the pool + asset_id_type share_asset; ///< Type of the share asset aka the LP token + uint16_t taker_fee_percent = 0; ///< Taker fee percent + uint16_t withdrawal_fee_percent = 0; ///< Withdrawal fee percent + fc::uint128_t virtual_value = 0; ///< Virtual value of the pool + + void update_virtual_value() + { + virtual_value = fc::uint128_t( balance_a.value ) * balance_b.value; + } +}; + +struct by_share_asset; +struct by_asset_a; +struct by_asset_b; +struct by_asset_ab; + +/** +* @ingroup object_index +*/ +typedef multi_index_container< + liquidity_pool_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > >, + ordered_unique< tag, + member< liquidity_pool_object, asset_id_type, &liquidity_pool_object::share_asset > >, + ordered_unique< tag, + composite_key< liquidity_pool_object, + member< liquidity_pool_object, asset_id_type, &liquidity_pool_object::asset_a >, + member< object, object_id_type, &object::id> + > + >, + ordered_unique< tag, + composite_key< liquidity_pool_object, + member< liquidity_pool_object, asset_id_type, &liquidity_pool_object::asset_b >, + member< object, object_id_type, &object::id> + > + >, + ordered_unique< tag, + composite_key< liquidity_pool_object, + member< liquidity_pool_object, asset_id_type, &liquidity_pool_object::asset_a >, + member< liquidity_pool_object, asset_id_type, &liquidity_pool_object::asset_b >, + member< object, object_id_type, &object::id> + > + > + > +> liquidity_pool_multi_index_type; + +/** +* @ingroup object_index +*/ +typedef generic_index liquidity_pool_index; + +} } // graphene::chain + +MAP_OBJECT_ID_TO_TYPE( graphene::chain::liquidity_pool_object ) + +FC_REFLECT_TYPENAME( graphene::chain::liquidity_pool_object ) + +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::chain::liquidity_pool_object ) diff --git a/libraries/chain/include/graphene/chain/market_object.hpp b/libraries/chain/include/graphene/chain/market_object.hpp index 365e2c281c..71e9f108a1 100644 --- a/libraries/chain/include/graphene/chain/market_object.hpp +++ b/libraries/chain/include/graphene/chain/market_object.hpp @@ -44,8 +44,8 @@ using namespace graphene::db; class limit_order_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = limit_order_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = limit_order_object_type; time_point_sec expiration; account_id_type seller; @@ -119,8 +119,8 @@ typedef generic_index limit_or class call_order_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = call_order_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = call_order_object_type; asset get_collateral()const { return asset( collateral, call_price.base.asset_id ); } asset get_debt()const { return asset( debt, debt_type() ); } @@ -168,8 +168,8 @@ class call_order_object : public abstract_object class force_settlement_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = force_settlement_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = force_settlement_object_type; account_id_type owner; asset balance; @@ -189,8 +189,8 @@ class force_settlement_object : public abstract_object class collateral_bid_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_collateral_bid_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_collateral_bid_object_type; asset get_additional_collateral()const { return inv_swan_price.base; } asset get_debt_covered()const { return inv_swan_price.quote; } diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index 097892d33d..2f735698da 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -48,8 +48,8 @@ namespace graphene { namespace chain { class operation_history_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = operation_history_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = operation_history_object_type; operation_history_object( const operation& o ):op(o){} operation_history_object(){} @@ -90,8 +90,8 @@ namespace graphene { namespace chain { class account_transaction_history_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_account_transaction_history_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_account_transaction_history_object_type; account_id_type account; /// the account this operation applies to operation_history_id_type operation_id; uint64_t sequence = 0; /// the operation position within the given account diff --git a/libraries/chain/include/graphene/chain/proposal_object.hpp b/libraries/chain/include/graphene/chain/proposal_object.hpp index 8d6bc849da..0a49bc05da 100644 --- a/libraries/chain/include/graphene/chain/proposal_object.hpp +++ b/libraries/chain/include/graphene/chain/proposal_object.hpp @@ -40,8 +40,8 @@ namespace graphene { namespace chain { class proposal_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = proposal_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = proposal_object_type; time_point_sec expiration_time; optional review_period_time; diff --git a/libraries/chain/include/graphene/chain/special_authority_object.hpp b/libraries/chain/include/graphene/chain/special_authority_object.hpp index 1e45bc28dc..cbd8103eb4 100644 --- a/libraries/chain/include/graphene/chain/special_authority_object.hpp +++ b/libraries/chain/include/graphene/chain/special_authority_object.hpp @@ -42,8 +42,8 @@ namespace graphene { namespace chain { class special_authority_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_special_authority_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_special_authority_object_type; account_id_type account; }; diff --git a/libraries/chain/include/graphene/chain/ticket_object.hpp b/libraries/chain/include/graphene/chain/ticket_object.hpp index 2bf2e2ab1e..d6c18e93e1 100644 --- a/libraries/chain/include/graphene/chain/ticket_object.hpp +++ b/libraries/chain/include/graphene/chain/ticket_object.hpp @@ -46,6 +46,13 @@ enum ticket_status TICKET_STATUS_COUNT }; +/// Version of a ticket +enum ticket_version +{ + ticket_v1 = 1, + ticket_v2 = 2 +}; + /** * @brief a ticket for governance voting * @ingroup object @@ -55,8 +62,8 @@ enum ticket_status class ticket_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = ticket_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = ticket_object_type; account_id_type account; ///< The account who owns the ticket ticket_type target_type; ///< The target type of the ticket @@ -79,31 +86,33 @@ class ticket_object : public abstract_object static constexpr uint32_t _seconds_to_downgrade[] = { 180 * 86400, 180 * 86400, 360 * 86400 }; return _seconds_to_downgrade[ static_cast(i) ]; } - static uint8_t value_multiplier( ticket_type i ) { - static constexpr uint32_t _value_multiplier[] = { 1, 2, 4, 8, 8, 0 }; - return _value_multiplier[ static_cast(i) ]; + static uint8_t value_multiplier( ticket_type i, ticket_version version ) { + static constexpr uint32_t _value_multiplier_v1[] = { 1, 2, 4, 8, 8, 0 }; + static constexpr uint32_t _value_multiplier_v2[] = { 0, 2, 4, 8, 8, 0 }; + return ( version == ticket_v1 ? _value_multiplier_v1[ static_cast(i) ] + : _value_multiplier_v2[ static_cast(i) ] ); } /// Initialize member variables for a ticket newly created from account balance void init_new( time_point_sec now, account_id_type new_account, - ticket_type new_target_type, const asset& new_amount ); + ticket_type new_target_type, const asset& new_amount, ticket_version version ); /// Initialize member variables for a ticket split from another ticket void init_split( time_point_sec now, const ticket_object& old_ticket, - ticket_type new_target_type, const asset& new_amount ); + ticket_type new_target_type, const asset& new_amount, ticket_version version ); /// Set a new target type and update member variables accordingly - void update_target_type( time_point_sec now, ticket_type new_target_type ); + void update_target_type( time_point_sec now, ticket_type new_target_type, ticket_version version ); /// Adjust amount and update member variables accordingly - void adjust_amount( const asset& delta_amount ); + void adjust_amount( const asset& delta_amount, ticket_version version ); /// Update the ticket when it's time - void auto_update(); + void auto_update( ticket_version version ); private: /// Recalculate value of the ticket - void update_value(); + void update_value( ticket_version version ); }; diff --git a/libraries/chain/include/graphene/chain/transaction_history_object.hpp b/libraries/chain/include/graphene/chain/transaction_history_object.hpp index 2f0ebe9fc2..b5844c2f82 100644 --- a/libraries/chain/include/graphene/chain/transaction_history_object.hpp +++ b/libraries/chain/include/graphene/chain/transaction_history_object.hpp @@ -44,8 +44,8 @@ namespace graphene { namespace chain { class transaction_history_object : public abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_transaction_history_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_transaction_history_object_type; signed_transaction trx; transaction_id_type trx_id; diff --git a/libraries/chain/include/graphene/chain/vesting_balance_object.hpp b/libraries/chain/include/graphene/chain/vesting_balance_object.hpp index eea6c681be..3e91d9cf9c 100644 --- a/libraries/chain/include/graphene/chain/vesting_balance_object.hpp +++ b/libraries/chain/include/graphene/chain/vesting_balance_object.hpp @@ -151,8 +151,8 @@ namespace graphene { namespace chain { class vesting_balance_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = vesting_balance_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = vesting_balance_object_type; /// Account which owns and may withdraw from this vesting balance account_id_type owner; diff --git a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp index cb954ad7f3..b8f6b1f54f 100644 --- a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp +++ b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp @@ -44,8 +44,8 @@ namespace graphene { namespace chain { class withdraw_permission_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = withdraw_permission_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = withdraw_permission_object_type; /// The account authorizing @ref authorized_account to withdraw from it account_id_type withdraw_from_account; diff --git a/libraries/chain/include/graphene/chain/witness_object.hpp b/libraries/chain/include/graphene/chain/witness_object.hpp index 0c48f3bc17..29810eed2d 100644 --- a/libraries/chain/include/graphene/chain/witness_object.hpp +++ b/libraries/chain/include/graphene/chain/witness_object.hpp @@ -32,8 +32,8 @@ namespace graphene { namespace chain { class witness_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = witness_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = witness_object_type; account_id_type witness_account; uint64_t last_aslot = 0; diff --git a/libraries/chain/include/graphene/chain/witness_schedule_object.hpp b/libraries/chain/include/graphene/chain/witness_schedule_object.hpp index f481e05852..080e76551d 100644 --- a/libraries/chain/include/graphene/chain/witness_schedule_object.hpp +++ b/libraries/chain/include/graphene/chain/witness_schedule_object.hpp @@ -30,8 +30,8 @@ namespace graphene { namespace chain { class witness_schedule_object : public graphene::db::abstract_object { public: - static const uint8_t space_id = implementation_ids; - static const uint8_t type_id = impl_witness_schedule_object_type; + static constexpr uint8_t space_id = implementation_ids; + static constexpr uint8_t type_id = impl_witness_schedule_object_type; vector< witness_id_type > current_shuffled_witnesses; }; diff --git a/libraries/chain/include/graphene/chain/worker_object.hpp b/libraries/chain/include/graphene/chain/worker_object.hpp index 34528a79ef..3855a9f34f 100644 --- a/libraries/chain/include/graphene/chain/worker_object.hpp +++ b/libraries/chain/include/graphene/chain/worker_object.hpp @@ -107,8 +107,8 @@ typedef static_variant< class worker_object : public abstract_object { public: - static const uint8_t space_id = protocol_ids; - static const uint8_t type_id = worker_object_type; + static constexpr uint8_t space_id = protocol_ids; + static constexpr uint8_t type_id = worker_object_type; /// ID of the account which owns this worker account_id_type worker_account; diff --git a/libraries/chain/liquidity_pool_evaluator.cpp b/libraries/chain/liquidity_pool_evaluator.cpp new file mode 100644 index 0000000000..607cedc7a0 --- /dev/null +++ b/libraries/chain/liquidity_pool_evaluator.cpp @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2020 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include +#include + +#include + +#include +#include +#include +#include + +#include + +namespace graphene { namespace chain { + +void_result liquidity_pool_create_evaluator::do_evaluate(const liquidity_pool_create_operation& op) +{ try { + const database& d = db(); + const auto block_time = d.head_block_time(); + + FC_ASSERT( HARDFORK_LIQUIDITY_POOL_PASSED(block_time), "Not allowed until the LP hardfork" ); + + op.asset_a(d); // Make sure it exists + op.asset_b(d); // Make sure it exists + _share_asset = &op.share_asset(d); + + FC_ASSERT( _share_asset->issuer == op.account, + "Only the asset owner can set an asset as the share asset of a liquidity pool" ); + + FC_ASSERT( !_share_asset->is_market_issued(), + "Can not specify a market-issued asset as the share asset of a liquidity pool" ); + + FC_ASSERT( !_share_asset->is_liquidity_pool_share_asset(), + "The share asset is already bound to another liquidity pool" ); + + FC_ASSERT( _share_asset->dynamic_data(d).current_supply == 0, + "Current supply of the share asset needs to be zero" ); + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (op) ) } + +generic_operation_result liquidity_pool_create_evaluator::do_apply(const liquidity_pool_create_operation& op) +{ try { + database& d = db(); + generic_operation_result result; + + const auto& new_liquidity_pool_object = d.create([&op](liquidity_pool_object& obj){ + obj.asset_a = op.asset_a; + obj.asset_b = op.asset_b; + obj.share_asset = op.share_asset; + obj.taker_fee_percent = op.taker_fee_percent; + obj.withdrawal_fee_percent = op.withdrawal_fee_percent; + }); + result.new_objects.insert( new_liquidity_pool_object.id ); + + result.updated_objects.insert( _share_asset->id ); + d.modify( *_share_asset, [&new_liquidity_pool_object](asset_object& ao) { + ao.for_liquidity_pool = new_liquidity_pool_object.id; + }); + + return result; +} FC_CAPTURE_AND_RETHROW( (op) ) } + +void_result liquidity_pool_delete_evaluator::do_evaluate(const liquidity_pool_delete_operation& op) +{ try { + const database& d = db(); + + _pool = &op.pool(d); + + FC_ASSERT( _pool->balance_a == 0 && _pool->balance_b == 0, "Can not delete a non-empty pool" ); + + _share_asset = &_pool->share_asset(d); + + FC_ASSERT( _share_asset->issuer == op.account, "The account is not the owner of the liquidity pool" ); + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (op) ) } + +generic_operation_result liquidity_pool_delete_evaluator::do_apply(const liquidity_pool_delete_operation& op) +{ try { + database& d = db(); + generic_operation_result result; + + result.updated_objects.insert( _share_asset->id ); + d.modify( *_share_asset, [](asset_object& ao) { + ao.for_liquidity_pool.reset(); + }); + + result.removed_objects.insert( _pool->id ); + d.remove( *_pool ); + + return result; +} FC_CAPTURE_AND_RETHROW( (op) ) } + +void_result liquidity_pool_deposit_evaluator::do_evaluate(const liquidity_pool_deposit_operation& op) +{ try { + const database& d = db(); + + _pool = &op.pool(d); + + FC_ASSERT( op.amount_a.asset_id == _pool->asset_a, "Asset type A mismatch" ); + FC_ASSERT( op.amount_b.asset_id == _pool->asset_b, "Asset type B mismatch" ); + + FC_ASSERT( (_pool->balance_a == 0) == (_pool->balance_b == 0), "Internal error" ); + + const asset_object& share_asset_obj = _pool->share_asset(d); + + FC_ASSERT( share_asset_obj.can_create_new_supply(), "Can not create new supply for the share asset" ); + + if( _pool->balance_a == 0 ) // which implies that _pool->balance_b == 0 + { + FC_ASSERT( share_asset_obj.issuer == op.account, "The initial deposit can only be done by the pool owner" ); + } + + _share_asset_dyn_data = &share_asset_obj.dynamic_data(d); + + FC_ASSERT( (_pool->balance_a == 0) == (_share_asset_dyn_data->current_supply == 0), "Internal error" ); + + FC_ASSERT( _share_asset_dyn_data->current_supply < share_asset_obj.options.max_supply, + "Can not create new supply for the share asset" ); + + FC_ASSERT( is_authorized_asset( d, *fee_paying_account, share_asset_obj ), + "The account is unauthorized by the share asset" ); + FC_ASSERT( is_authorized_asset( d, *fee_paying_account, _pool->asset_a(d) ), + "The account is unauthorized by asset A" ); + FC_ASSERT( is_authorized_asset( d, *fee_paying_account, _pool->asset_b(d) ), + "The account is unauthorized by asset B" ); + + if( _pool->balance_a == 0 ) + { + share_type share_amount = std::max( op.amount_a.amount.value, op.amount_b.amount.value ); + FC_ASSERT( share_amount <= share_asset_obj.options.max_supply, + "For initial deposit, each amount of the two assets in the pool should not be greater than " + "the maximum supply of the share asset" ); + _pool_receives_a = op.amount_a; + _pool_receives_b = op.amount_b; + _account_receives = asset( share_amount, _pool->share_asset ); + } + else + { + share_type max_new_supply = share_asset_obj.options.max_supply - _share_asset_dyn_data->current_supply; + fc::uint128_t max128( max_new_supply.value ); + fc::uint128_t supply128( _share_asset_dyn_data->current_supply.value ); + fc::uint128_t new_supply_if_a = supply128 * op.amount_a.amount.value / _pool->balance_a.value; + fc::uint128_t new_supply_if_b = supply128 * op.amount_b.amount.value / _pool->balance_b.value; + fc::uint128_t new_supply = std::min( { new_supply_if_a, new_supply_if_b, max128 } ); + + FC_ASSERT( new_supply > 0, "Aborting due to zero outcome" ); + + fc::uint128_t a128 = ( new_supply * _pool->balance_a.value + supply128 - 1 ) / supply128; // round up + FC_ASSERT( a128 <= fc::uint128_t( op.amount_a.amount.value ), "Internal error" ); + _pool_receives_a = asset( static_cast( a128 ), _pool->asset_a ); + + fc::uint128_t b128 = ( new_supply * _pool->balance_b.value + supply128 - 1 ) / supply128; // round up + FC_ASSERT( b128 <= fc::uint128_t( op.amount_b.amount.value ), "Internal error" ); + _pool_receives_b = asset( static_cast( b128 ), _pool->asset_b ); + + _account_receives = asset( static_cast( new_supply ), _pool->share_asset ); + } + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (op) ) } + +generic_exchange_operation_result liquidity_pool_deposit_evaluator::do_apply( + const liquidity_pool_deposit_operation& op) +{ try { + database& d = db(); + generic_exchange_operation_result result; + + d.adjust_balance( op.account, -_pool_receives_a ); + d.adjust_balance( op.account, -_pool_receives_b ); + d.adjust_balance( op.account, _account_receives ); + + d.modify( *_pool, [this]( liquidity_pool_object& lpo ){ + lpo.balance_a += _pool_receives_a.amount; + lpo.balance_b += _pool_receives_b.amount; + lpo.update_virtual_value(); + }); + + d.modify( *_share_asset_dyn_data, [this]( asset_dynamic_data_object& data ){ + data.current_supply += _account_receives.amount; + }); + + FC_ASSERT( _pool->balance_a > 0 && _pool->balance_b > 0, "Internal error" ); + FC_ASSERT( _share_asset_dyn_data->current_supply > 0, "Internal error" ); + + result.paid.emplace_back( _pool_receives_a ); + result.paid.emplace_back( _pool_receives_b ); + result.received.emplace_back( _account_receives ); + + return result; +} FC_CAPTURE_AND_RETHROW( (op) ) } + +void_result liquidity_pool_withdraw_evaluator::do_evaluate(const liquidity_pool_withdraw_operation& op) +{ try { + const database& d = db(); + + _pool = &op.pool(d); + + FC_ASSERT( op.share_amount.asset_id == _pool->share_asset, "Share asset type mismatch" ); + + FC_ASSERT( _pool->balance_a > 0 && _pool->balance_b > 0, "The pool has not been initialized" ); + + const asset_object& share_asset_obj = _pool->share_asset(d); + + FC_ASSERT( is_authorized_asset( d, *fee_paying_account, share_asset_obj ), + "The account is unauthorized by the share asset" ); + FC_ASSERT( is_authorized_asset( d, *fee_paying_account, _pool->asset_a(d) ), + "The account is unauthorized by asset A" ); + FC_ASSERT( is_authorized_asset( d, *fee_paying_account, _pool->asset_b(d) ), + "The account is unauthorized by asset B" ); + + _share_asset_dyn_data = &share_asset_obj.dynamic_data(d); + + FC_ASSERT( _share_asset_dyn_data->current_supply >= op.share_amount.amount, + "Can not withdraw an amount that is more than the current supply" ); + + if( _share_asset_dyn_data->current_supply == op.share_amount.amount ) + { + _pool_pays_a = asset( _pool->balance_a, _pool->asset_a ); + _pool_pays_b = asset( _pool->balance_b, _pool->asset_b ); + } + else + { + fc::uint128_t share128( op.share_amount.amount.value ); + fc::uint128_t a128 = share128 * _pool->balance_a.value / _share_asset_dyn_data->current_supply.value; + FC_ASSERT( a128 < fc::uint128_t( _pool->balance_a.value ), "Internal error" ); + fc::uint128_t fee_a = a128 * _pool->withdrawal_fee_percent / GRAPHENE_100_PERCENT; + FC_ASSERT( fee_a <= a128, "Withdrawal fee percent of the pool is too high" ); + a128 -= fee_a; + fc::uint128_t b128 = share128 * _pool->balance_b.value / _share_asset_dyn_data->current_supply.value; + FC_ASSERT( b128 < fc::uint128_t( _pool->balance_b.value ), "Internal error" ); + fc::uint128_t fee_b = b128 * _pool->withdrawal_fee_percent / GRAPHENE_100_PERCENT; + FC_ASSERT( fee_b <= b128, "Withdrawal fee percent of the pool is too high" ); + b128 -= fee_b; + FC_ASSERT( a128 > 0 || b128 > 0, "Aborting due to zero outcome" ); + _pool_pays_a = asset( static_cast( a128 ), _pool->asset_a ); + _pool_pays_b = asset( static_cast( b128 ), _pool->asset_b ); + } + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (op) ) } + +generic_exchange_operation_result liquidity_pool_withdraw_evaluator::do_apply( + const liquidity_pool_withdraw_operation& op) +{ try { + database& d = db(); + generic_exchange_operation_result result; + + d.adjust_balance( op.account, -op.share_amount ); + + if( _pool_pays_a.amount > 0 ) + d.adjust_balance( op.account, _pool_pays_a ); + if( _pool_pays_b.amount > 0 ) + d.adjust_balance( op.account, _pool_pays_b ); + + d.modify( *_share_asset_dyn_data, [&op]( asset_dynamic_data_object& data ){ + data.current_supply -= op.share_amount.amount; + }); + + d.modify( *_pool, [this]( liquidity_pool_object& lpo ){ + lpo.balance_a -= _pool_pays_a.amount; + lpo.balance_b -= _pool_pays_b.amount; + lpo.update_virtual_value(); + }); + + FC_ASSERT( (_pool->balance_a == 0) == (_pool->balance_b == 0), "Internal error" ); + FC_ASSERT( (_pool->balance_a == 0) == (_share_asset_dyn_data->current_supply == 0), "Internal error" ); + + result.paid.emplace_back( op.share_amount ); + result.received.emplace_back( _pool_pays_a ); + result.received.emplace_back( _pool_pays_b ); + + return result; +} FC_CAPTURE_AND_RETHROW( (op) ) } + +void_result liquidity_pool_exchange_evaluator::do_evaluate(const liquidity_pool_exchange_operation& op) +{ try { + const database& d = db(); + + _pool = &op.pool(d); + + FC_ASSERT( _pool->balance_a > 0 && _pool->balance_b > 0, "The pool has not been initialized" ); + + FC_ASSERT( ( op.amount_to_sell.asset_id == _pool->asset_a && op.min_to_receive.asset_id == _pool->asset_b ) + || ( op.amount_to_sell.asset_id == _pool->asset_b && op.min_to_receive.asset_id == _pool->asset_a ), + "Asset type mismatch" ); + + const asset_object& asset_obj_a = _pool->asset_a(d); + FC_ASSERT( is_authorized_asset( d, *fee_paying_account, asset_obj_a ), + "The account is unauthorized by asset A" ); + + const asset_object& asset_obj_b = _pool->asset_b(d); + FC_ASSERT( is_authorized_asset( d, *fee_paying_account, asset_obj_b ), + "The account is unauthorized by asset B" ); + + _pool_receives_asset = ( op.amount_to_sell.asset_id == _pool->asset_a ? &asset_obj_a : &asset_obj_b ); + + _maker_market_fee = d.calculate_market_fee( *_pool_receives_asset, op.amount_to_sell, true ); + FC_ASSERT( _maker_market_fee < op.amount_to_sell, + "Aborting since the maker market fee of the selling asset is too high" ); + _pool_receives = op.amount_to_sell - _maker_market_fee; + + fc::uint128_t delta; + if( op.amount_to_sell.asset_id == _pool->asset_a ) + { + share_type new_balance_a = _pool->balance_a + _pool_receives.amount; + // round up + fc::uint128_t new_balance_b = ( _pool->virtual_value + new_balance_a.value - 1 ) / new_balance_a.value; + FC_ASSERT( new_balance_b <= _pool->balance_b, "Internal error" ); + delta = fc::uint128_t( _pool->balance_b.value ) - new_balance_b; + _pool_pays_asset = &asset_obj_b; + } + else + { + share_type new_balance_b = _pool->balance_b + _pool_receives.amount; + // round up + fc::uint128_t new_balance_a = ( _pool->virtual_value + new_balance_b.value - 1 ) / new_balance_b.value; + FC_ASSERT( new_balance_a <= _pool->balance_a, "Internal error" ); + delta = fc::uint128_t( _pool->balance_a.value ) - new_balance_a; + _pool_pays_asset = &asset_obj_a; + } + + fc::uint128_t pool_taker_fee = delta * _pool->taker_fee_percent / GRAPHENE_100_PERCENT; + FC_ASSERT( pool_taker_fee <= delta, "Taker fee percent of the pool is too high" ); + + _pool_pays = asset( static_cast( delta - pool_taker_fee ), op.min_to_receive.asset_id ); + + _taker_market_fee = d.calculate_market_fee( *_pool_pays_asset, _pool_pays, false ); + FC_ASSERT( _taker_market_fee <= _pool_pays, "Market fee should not be greater than the amount to receive" ); + _account_receives = _pool_pays - _taker_market_fee; + + FC_ASSERT( _account_receives.amount >= op.min_to_receive.amount, "Unable to exchange at expected price" ); + + return void_result(); +} FC_CAPTURE_AND_RETHROW( (op) ) } + +generic_exchange_operation_result liquidity_pool_exchange_evaluator::do_apply( + const liquidity_pool_exchange_operation& op) +{ try { + database& d = db(); + generic_exchange_operation_result result; + + d.adjust_balance( op.account, -op.amount_to_sell ); + d.adjust_balance( op.account, _account_receives ); + + // TODO whose registrar and referrer should receive the shared maker market fee? + d.pay_market_fees( &_pool->share_asset(d).issuer(d), *_pool_receives_asset, op.amount_to_sell, true, + _maker_market_fee ); + d.pay_market_fees( fee_paying_account, *_pool_pays_asset, _pool_pays, false, _taker_market_fee ); + + const auto old_virtual_value = _pool->virtual_value; + if( op.amount_to_sell.asset_id == _pool->asset_a ) + { + d.modify( *_pool, [&op,this]( liquidity_pool_object& lpo ){ + lpo.balance_a += _pool_receives.amount; + lpo.balance_b -= _pool_pays.amount; + lpo.update_virtual_value(); + }); + } + else + { + d.modify( *_pool, [&op,this]( liquidity_pool_object& lpo ){ + lpo.balance_b += _pool_receives.amount; + lpo.balance_a -= _pool_pays.amount; + lpo.update_virtual_value(); + }); + } + + FC_ASSERT( _pool->balance_a > 0 && _pool->balance_b > 0, "Internal error" ); + FC_ASSERT( _pool->virtual_value >= old_virtual_value, "Internal error" ); + + result.paid.emplace_back( op.amount_to_sell ); + result.received.emplace_back( _account_receives ); + result.fees.emplace_back( _maker_market_fee ); + result.fees.emplace_back( _taker_market_fee ); + + return result; +} FC_CAPTURE_AND_RETHROW( (op) ) } + +} } // graphene::chain diff --git a/libraries/chain/proposal_evaluator.cpp b/libraries/chain/proposal_evaluator.cpp index 827b92d725..9c5b27c85d 100644 --- a/libraries/chain/proposal_evaluator.cpp +++ b/libraries/chain/proposal_evaluator.cpp @@ -147,6 +147,18 @@ struct proposal_operation_hardfork_visitor FC_ASSERT(!op.new_parameters.current_fees->exists(), "Unable to define fees for ticket operations prior to hardfork 2103"); } + if (!HARDFORK_LIQUIDITY_POOL_PASSED(block_time)) { + FC_ASSERT(!op.new_parameters.current_fees->exists(), + "Unable to define fees for liquidity pool operations prior to the LP hardfork"); + FC_ASSERT(!op.new_parameters.current_fees->exists(), + "Unable to define fees for liquidity pool operations prior to the LP hardfork"); + FC_ASSERT(!op.new_parameters.current_fees->exists(), + "Unable to define fees for liquidity pool operations prior to the LP hardfork"); + FC_ASSERT(!op.new_parameters.current_fees->exists(), + "Unable to define fees for liquidity pool operations prior to the LP hardfork"); + FC_ASSERT(!op.new_parameters.current_fees->exists(), + "Unable to define fees for liquidity pool operations prior to the LP hardfork"); + } } void operator()(const graphene::chain::htlc_create_operation &op) const { FC_ASSERT( block_time >= HARDFORK_CORE_1468_TIME, "Not allowed until hardfork 1468" ); @@ -182,6 +194,21 @@ struct proposal_operation_hardfork_visitor void operator()(const graphene::chain::ticket_update_operation &op) const { FC_ASSERT( HARDFORK_CORE_2103_PASSED(block_time), "Not allowed until hardfork 2103" ); } + void operator()(const graphene::chain::liquidity_pool_create_operation &op) const { + FC_ASSERT( HARDFORK_LIQUIDITY_POOL_PASSED(block_time), "Not allowed until the LP hardfork" ); + } + void operator()(const graphene::chain::liquidity_pool_delete_operation &op) const { + FC_ASSERT( HARDFORK_LIQUIDITY_POOL_PASSED(block_time), "Not allowed until the LP hardfork" ); + } + void operator()(const graphene::chain::liquidity_pool_deposit_operation &op) const { + FC_ASSERT( HARDFORK_LIQUIDITY_POOL_PASSED(block_time), "Not allowed until the LP hardfork" ); + } + void operator()(const graphene::chain::liquidity_pool_withdraw_operation &op) const { + FC_ASSERT( HARDFORK_LIQUIDITY_POOL_PASSED(block_time), "Not allowed until the LP hardfork" ); + } + void operator()(const graphene::chain::liquidity_pool_exchange_operation &op) const { + FC_ASSERT( HARDFORK_LIQUIDITY_POOL_PASSED(block_time), "Not allowed until the LP hardfork" ); + } // loop and self visit in proposals void operator()(const graphene::chain::proposal_create_operation &v) const { diff --git a/libraries/chain/small_objects.cpp b/libraries/chain/small_objects.cpp index e4e3d09192..8868306ef6 100644 --- a/libraries/chain/small_objects.cpp +++ b/libraries/chain/small_objects.cpp @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -101,6 +102,7 @@ FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::dynamic_global_property_object, (time) (current_witness) (next_maintenance_time) + (last_vote_tally_time) (last_budget_time) (witness_budget) (total_pob) @@ -196,6 +198,18 @@ FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::custom_authority_object, (graph (account)(enabled)(valid_from)(valid_to)(operation_type) (auth)(restrictions)(restriction_counter) ) +FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::liquidity_pool_object, (graphene::db::object), + (asset_a) + (asset_b) + (balance_a) + (balance_b) + (share_asset) + (taker_fee_percent) + (withdrawal_fee_percent) + (virtual_value) + ) + + GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::balance_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::block_summary_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::budget_record ) @@ -218,3 +232,4 @@ GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::witness_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::witness_schedule_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::worker_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::custom_authority_object ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::liquidity_pool_object ) diff --git a/libraries/chain/ticket_evaluator.cpp b/libraries/chain/ticket_evaluator.cpp index 5962e9d28a..24eef0ee33 100644 --- a/libraries/chain/ticket_evaluator.cpp +++ b/libraries/chain/ticket_evaluator.cpp @@ -47,11 +47,14 @@ object_id_type ticket_create_evaluator::do_apply(const ticket_create_operation& { try { database& d = db(); const auto block_time = d.head_block_time(); + const auto maint_time = d.get_dynamic_global_properties().next_maintenance_time; + + ticket_version version = ( HARDFORK_CORE_2262_PASSED(maint_time) ? ticket_v2 : ticket_v1 ); d.adjust_balance( op.account, -op.amount ); - const auto& new_ticket_object = d.create([&op,block_time](ticket_object& obj){ - obj.init_new( block_time, op.account, op.target_type, op.amount ); + const auto& new_ticket_object = d.create([&op,block_time,version](ticket_object& obj){ + obj.init_new( block_time, op.account, op.target_type, op.amount, version ); }); // Note: amount.asset_id is checked in validate(), so no check here @@ -88,6 +91,9 @@ generic_operation_result ticket_update_evaluator::do_apply(const ticket_update_o { try { database& d = db(); const auto block_time = d.head_block_time(); + const auto maint_time = d.get_dynamic_global_properties().next_maintenance_time; + + ticket_version version = ( HARDFORK_CORE_2262_PASSED(maint_time) ? ticket_v2 : ticket_v1 ); generic_operation_result result; @@ -97,21 +103,21 @@ generic_operation_result ticket_update_evaluator::do_apply(const ticket_update_o // To partially update the ticket, aka splitting if ( op.amount_for_new_target.valid() && *op.amount_for_new_target < _ticket->amount ) { - const auto& new_ticket_object = d.create([&op,this,block_time](ticket_object& obj){ - obj.init_split( block_time, *_ticket, op.target_type, *op.amount_for_new_target ); + const auto& new_ticket_object = d.create([&op,this,block_time,version](ticket_object& obj){ + obj.init_split( block_time, *_ticket, op.target_type, *op.amount_for_new_target, version ); }); result.new_objects.insert( new_ticket_object.id ); - d.modify( *_ticket, [&op](ticket_object& obj){ - obj.adjust_amount( -(*op.amount_for_new_target) ); + d.modify( *_ticket, [&op,version](ticket_object& obj){ + obj.adjust_amount( -(*op.amount_for_new_target), version ); }); delta_value = new_ticket_object.value + _ticket->value - old_value; } else // To update the whole ticket { - d.modify( *_ticket, [&op,block_time](ticket_object& obj){ - obj.update_target_type( block_time, op.target_type ); + d.modify( *_ticket, [&op,block_time,version](ticket_object& obj){ + obj.update_target_type( block_time, op.target_type, version ); }); delta_value = _ticket->value - old_value; } diff --git a/libraries/chain/ticket_object.cpp b/libraries/chain/ticket_object.cpp index f77a69716c..346d4dd34f 100644 --- a/libraries/chain/ticket_object.cpp +++ b/libraries/chain/ticket_object.cpp @@ -29,7 +29,7 @@ using namespace graphene::chain; void ticket_object::init_new( time_point_sec now, account_id_type new_account, - ticket_type new_target_type, const asset& new_amount ) + ticket_type new_target_type, const asset& new_amount, ticket_version version ) { account = new_account; target_type = new_target_type; @@ -40,11 +40,11 @@ void ticket_object::init_new( time_point_sec now, account_id_type new_account, next_auto_update_time = now + seconds_per_charging_step; next_type_downgrade_time = time_point_sec::maximum(); - update_value(); + update_value( version ); } void ticket_object::init_split( time_point_sec now, const ticket_object& old_ticket, - ticket_type new_target_type, const asset& new_amount ) + ticket_type new_target_type, const asset& new_amount, ticket_version version ) { account = old_ticket.account; target_type = old_ticket.target_type; @@ -55,10 +55,10 @@ void ticket_object::init_split( time_point_sec now, const ticket_object& old_tic next_auto_update_time = old_ticket.next_auto_update_time; next_type_downgrade_time = old_ticket.next_type_downgrade_time; - update_target_type( now, new_target_type ); + update_target_type( now, new_target_type, version ); } -void ticket_object::update_target_type( time_point_sec now, ticket_type new_target_type ) +void ticket_object::update_target_type( time_point_sec now, ticket_type new_target_type, ticket_version version ) { if( current_type < new_target_type ) { @@ -89,16 +89,16 @@ void ticket_object::update_target_type( time_point_sec now, ticket_type new_targ } target_type = new_target_type; - update_value(); + update_value( version ); } -void ticket_object::adjust_amount( const asset& delta_amount ) +void ticket_object::adjust_amount( const asset& delta_amount, ticket_version version ) { amount += delta_amount; - update_value(); + update_value( version ); } -void ticket_object::auto_update() +void ticket_object::auto_update( ticket_version version ) { if( status == charging ) { @@ -119,7 +119,7 @@ void ticket_object::auto_update() { status = withdrawing; next_auto_update_time += seconds_per_lock_forever_update_step; - value = amount.amount * value_multiplier(current_type); + value = amount.amount * value_multiplier( current_type, version ); } } } @@ -128,7 +128,8 @@ void ticket_object::auto_update() // Note: current_type != liquid, guaranteed by the caller if( current_type == lock_forever ) { - share_type delta_value = amount.amount * value_multiplier(current_type) / lock_forever_update_steps; + share_type delta_value = amount.amount * value_multiplier( current_type, version ) + / lock_forever_update_steps; if( delta_value <= 0 ) delta_value = 1; if( value <= delta_value ) @@ -157,14 +158,14 @@ void ticket_object::auto_update() } } - update_value(); + update_value( version ); } -void ticket_object::update_value() +void ticket_object::update_value( ticket_version version ) { if( current_type != lock_forever ) { - value = amount.amount * value_multiplier(current_type); + value = amount.amount * value_multiplier( current_type, version ); } // else lock forever and to be updated, do nothing here } diff --git a/libraries/db/include/graphene/db/object.hpp b/libraries/db/include/graphene/db/object.hpp index b5a6724a2b..254b43483d 100644 --- a/libraries/db/include/graphene/db/object.hpp +++ b/libraries/db/include/graphene/db/object.hpp @@ -65,10 +65,6 @@ namespace graphene { namespace db { object(){} virtual ~object(){} - static const uint8_t space_id = 0; - static const uint8_t type_id = 0; - - // serialized object_id_type id; diff --git a/libraries/egenesis/CMakeLists.txt b/libraries/egenesis/CMakeLists.txt index 9b208974d5..e9e665f127 100644 --- a/libraries/egenesis/CMakeLists.txt +++ b/libraries/egenesis/CMakeLists.txt @@ -1,13 +1,11 @@ -message( STATUS "Generating egenesis" ) - if( GRAPHENE_EGENESIS_JSON ) set( embed_genesis_args "${GRAPHENE_EGENESIS_JSON}" ) else( GRAPHENE_EGENESIS_JSON ) set( embed_genesis_args "genesis.json" ) endif( GRAPHENE_EGENESIS_JSON ) -add_custom_command( - OUTPUT +add_custom_target( build_egenesis_cpp + BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/egenesis_brief.cpp" "${CMAKE_CURRENT_BINARY_DIR}/egenesis_full.cpp" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} @@ -16,6 +14,7 @@ add_custom_command( -DINIT_SOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR} -Dembed_genesis_args=${embed_genesis_args} -P ${CMAKE_CURRENT_SOURCE_DIR}/embed_genesis.cmake + COMMENT "Generating egenesis" DEPENDS "${GRAPHENE_EGENESIS_JSON}" "${CMAKE_CURRENT_SOURCE_DIR}/egenesis_brief.cpp.tmpl" @@ -26,8 +25,10 @@ add_library( graphene_egenesis_none egenesis_none.cpp include/graphene/egenesis/egenesis.hpp ) add_library( graphene_egenesis_brief "${CMAKE_CURRENT_BINARY_DIR}/egenesis_brief.cpp" include/graphene/egenesis/egenesis.hpp ) +add_dependencies( graphene_egenesis_brief build_egenesis_cpp ) add_library( graphene_egenesis_full "${CMAKE_CURRENT_BINARY_DIR}/egenesis_full.cpp" include/graphene/egenesis/egenesis.hpp ) +add_dependencies( graphene_egenesis_full build_egenesis_cpp ) target_link_libraries( graphene_egenesis_none graphene_chain fc ) target_link_libraries( graphene_egenesis_brief graphene_chain fc ) diff --git a/libraries/fc b/libraries/fc index 73a7f08f00..7d0411252a 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 73a7f08f00456b0984cd431dd8f55bd901282e15 +Subproject commit 7d0411252a76ac57837763200788901cd30c7e6f diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 5f5227ba98..f2fd144488 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -332,18 +332,22 @@ namespace graphene { namespace net { namespace detail { ilog( "cleaning up node" ); _node_is_shutting_down = true; - for (const peer_connection_ptr& active_peer : _active_connections) { - fc::optional inbound_endpoint = active_peer->get_endpoint_for_connecting(); - if (inbound_endpoint) - { - fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*inbound_endpoint); - if (updated_peer_record) - { - updated_peer_record->last_seen_time = fc::time_point::now(); - _potential_peer_db.update_entry(*updated_peer_record); - } - } + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& active_peer : _active_connections) + { + fc::optional inbound_endpoint = active_peer->get_endpoint_for_connecting(); + if (inbound_endpoint) + { + fc::optional updated_peer_record = _potential_peer_db + .lookup_entry_for_endpoint(*inbound_endpoint); + if (updated_peer_record) + { + updated_peer_record->last_seen_time = fc::time_point::now(); + _potential_peer_db.update_entry(*updated_peer_record); + } + } + } } try @@ -567,10 +571,10 @@ namespace graphene { namespace net { namespace detail { std::map > sync_item_requests_to_send; { - ASSERT_TASK_NOT_PREEMPTED(); std::set sync_items_to_request; // for each idle peer that we're syncing with + fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& peer : _active_connections ) { if( peer->we_need_sync_items_from_peer && @@ -629,6 +633,7 @@ namespace graphene { namespace net { namespace detail { bool node_impl::is_item_in_any_peers_inventory(const item_id& item) const { + fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& peer : _active_connections ) { if (peer->inventory_peer_advertised_to_us.find(item) != peer->inventory_peer_advertised_to_us.end() ) @@ -668,9 +673,12 @@ namespace graphene { namespace net { namespace detail { fetch_messages_to_send_set items_by_peer; // initialize the fetch_messages_to_send with an empty set of items for all idle peers - for (const peer_connection_ptr& peer : _active_connections) - if (peer->idle()) - items_by_peer.insert(peer_and_items_to_fetch(peer)); + { + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& peer : _active_connections) + if (peer->idle()) + items_by_peer.insert(peer_and_items_to_fetch(peer)); + } // now loop over all items we want to fetch for (auto item_iter = _items_to_fetch.begin(); item_iter != _items_to_fetch.end();) @@ -778,9 +786,10 @@ namespace graphene { namespace net { namespace detail { // first, then send them all in a batch (to avoid any fiber interruption points while // we're computing the messages) std::list > inventory_messages_to_send; - - for (const peer_connection_ptr& peer : _active_connections) { + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& peer : _active_connections) + { // only advertise to peers who are in sync with us idump((peer->peer_needs_sync_items_from_us)); if( !peer->peer_needs_sync_items_from_us ) @@ -822,7 +831,8 @@ namespace graphene { namespace net { namespace detail { inventory_messages_to_send.push_back(std::make_pair(peer, item_ids_inventory_message(items_group.first, items_group.second))); } peer->clear_old_inventory(); - } + } + } // lock_guard for (auto iter = inventory_messages_to_send.begin(); iter != inventory_messages_to_send.end(); ++iter) iter->first->send_message(iter->second); @@ -862,173 +872,197 @@ namespace graphene { namespace net { namespace detail { // them (but they won't have sent us anything since they aren't getting blocks either). // This might not be so bad because it could make us initiate more connections and // reconnect with the rest of the network, or it might just futher isolate us. - { - // As usual, the first step is to walk through all our peers and figure out which - // peers need action (disconneting, sending keepalives, etc), then we walk through - // those lists yielding at our leisure later. - ASSERT_TASK_NOT_PREEMPTED(); - - uint32_t handshaking_timeout = _peer_inactivity_timeout; - fc::time_point handshaking_disconnect_threshold = fc::time_point::now() - fc::seconds(handshaking_timeout); - for( const peer_connection_ptr handshaking_peer : _handshaking_connections ) - if( handshaking_peer->connection_initiation_time < handshaking_disconnect_threshold && - handshaking_peer->get_last_message_received_time() < handshaking_disconnect_threshold && - handshaking_peer->get_last_message_sent_time() < handshaking_disconnect_threshold ) - { - wlog( "Forcibly disconnecting from handshaking peer ${peer} due to inactivity of at least ${timeout} seconds", - ( "peer", handshaking_peer->get_remote_endpoint() )("timeout", handshaking_timeout ) ); - wlog("Peer's negotiating status: ${status}, bytes sent: ${sent}, bytes received: ${received}", - ("status", handshaking_peer->negotiation_status) - ("sent", handshaking_peer->get_total_bytes_sent()) - ("received", handshaking_peer->get_total_bytes_received())); - handshaking_peer->connection_closed_error = fc::exception(FC_LOG_MESSAGE(warn, "Terminating handshaking connection due to inactivity of ${timeout} seconds. Negotiating status: ${status}, bytes sent: ${sent}, bytes received: ${received}", - ("peer", handshaking_peer->get_remote_endpoint()) - ("timeout", handshaking_timeout) - ("status", handshaking_peer->negotiation_status) - ("sent", handshaking_peer->get_total_bytes_sent()) - ("received", handshaking_peer->get_total_bytes_received()))); - peers_to_disconnect_forcibly.push_back( handshaking_peer ); - } + // As usual, the first step is to walk through all our peers and figure out which + // peers need action (disconneting, sending keepalives, etc), then we walk through + // those lists yielding at our leisure later. - // timeout for any active peers is two block intervals - uint32_t active_disconnect_timeout = 10 * _recent_block_interval_in_seconds; - uint32_t active_send_keepalive_timeout = active_disconnect_timeout / 2; - - // set the ignored request time out to 6 second. When we request a block - // or transaction from a peer, this timeout determines how long we wait for them - // to reply before we give up and ask another peer for the item. - // Ideally this should be significantly shorter than the block interval, because - // we'd like to realize the block isn't coming and fetch it from a different - // peer before the next block comes in. - // Increased to 6 from 1 in #1660 due to heavy load. May need to adjust further - // Note: #1660 is https://github.com/steemit/steem/issues/1660 - fc::microseconds active_ignored_request_timeout = fc::seconds(6); - - fc::time_point active_disconnect_threshold = fc::time_point::now() - fc::seconds(active_disconnect_timeout); - fc::time_point active_send_keepalive_threshold = fc::time_point::now() - fc::seconds(active_send_keepalive_timeout); - fc::time_point active_ignored_request_threshold = fc::time_point::now() - active_ignored_request_timeout; - for( const peer_connection_ptr& active_peer : _active_connections ) - { - if( active_peer->connection_initiation_time < active_disconnect_threshold && - active_peer->get_last_message_received_time() < active_disconnect_threshold ) - { - wlog( "Closing connection with peer ${peer} due to inactivity of at least ${timeout} seconds", - ( "peer", active_peer->get_remote_endpoint() )("timeout", active_disconnect_timeout ) ); - peers_to_disconnect_gently.push_back( active_peer ); - } - else - { - bool disconnect_due_to_request_timeout = false; - if (!active_peer->sync_items_requested_from_peer.empty() && - active_peer->last_sync_item_received_time < active_ignored_request_threshold) + uint32_t handshaking_timeout = _peer_inactivity_timeout; + fc::time_point handshaking_disconnect_threshold = fc::time_point::now() - fc::seconds(handshaking_timeout); + { + fc::scoped_lock lock(_handshaking_connections.get_mutex()); + for( const peer_connection_ptr handshaking_peer : _handshaking_connections ) + { + if( handshaking_peer->connection_initiation_time < handshaking_disconnect_threshold && + handshaking_peer->get_last_message_received_time() < handshaking_disconnect_threshold && + handshaking_peer->get_last_message_sent_time() < handshaking_disconnect_threshold ) { - wlog("Disconnecting peer ${peer} because they haven't made any progress on my remaining ${count} sync item requests", - ("peer", active_peer->get_remote_endpoint())("count", active_peer->sync_items_requested_from_peer.size())); - disconnect_due_to_request_timeout = true; - } - if (!disconnect_due_to_request_timeout && - active_peer->item_ids_requested_from_peer && - active_peer->item_ids_requested_from_peer->get<1>() < active_ignored_request_threshold) - { - wlog("Disconnecting peer ${peer} because they didn't respond to my request for sync item ids after ${synopsis}", - ("peer", active_peer->get_remote_endpoint()) - ("synopsis", active_peer->item_ids_requested_from_peer->get<0>())); - disconnect_due_to_request_timeout = true; - } - if (!disconnect_due_to_request_timeout) - for (const peer_connection::item_to_time_map_type::value_type& item_and_time : active_peer->items_requested_from_peer) - if (item_and_time.second < active_ignored_request_threshold) - { - wlog("Disconnecting peer ${peer} because they didn't respond to my request for item ${id}", - ("peer", active_peer->get_remote_endpoint())("id", item_and_time.first.item_hash)); - disconnect_due_to_request_timeout = true; - break; - } - if (disconnect_due_to_request_timeout) + wlog( "Forcibly disconnecting from handshaking peer ${peer} due to inactivity of at least ${timeout} seconds", + ( "peer", handshaking_peer->get_remote_endpoint() )("timeout", handshaking_timeout ) ); + wlog("Peer's negotiating status: ${status}, bytes sent: ${sent}, bytes received: ${received}", + ("status", handshaking_peer->negotiation_status) + ("sent", handshaking_peer->get_total_bytes_sent()) + ("received", handshaking_peer->get_total_bytes_received())); + handshaking_peer->connection_closed_error = fc::exception(FC_LOG_MESSAGE(warn, + "Terminating handshaking connection due to inactivity of ${timeout} seconds. Negotiating status: ${status}, bytes sent: ${sent}, bytes received: ${received}", + ("peer", handshaking_peer->get_remote_endpoint()) + ("timeout", handshaking_timeout) + ("status", handshaking_peer->negotiation_status) + ("sent", handshaking_peer->get_total_bytes_sent()) + ("received", handshaking_peer->get_total_bytes_received()))); + peers_to_disconnect_forcibly.push_back( handshaking_peer ); + } // if + } // for + } // scoped_lock + // timeout for any active peers is two block intervals + uint32_t active_disconnect_timeout = 10 * _recent_block_interval_in_seconds; + uint32_t active_send_keepalive_timeout = active_disconnect_timeout / 2; + + // set the ignored request time out to 6 second. When we request a block + // or transaction from a peer, this timeout determines how long we wait for them + // to reply before we give up and ask another peer for the item. + // Ideally this should be significantly shorter than the block interval, because + // we'd like to realize the block isn't coming and fetch it from a different + // peer before the next block comes in. + // Increased to 6 from 1 in #1660 due to heavy load. May need to adjust further + // Note: #1660 is https://github.com/steemit/steem/issues/1660 + fc::microseconds active_ignored_request_timeout = fc::seconds(6); + + fc::time_point active_disconnect_threshold = fc::time_point::now() - fc::seconds(active_disconnect_timeout); + fc::time_point active_send_keepalive_threshold = fc::time_point::now() - fc::seconds(active_send_keepalive_timeout); + fc::time_point active_ignored_request_threshold = fc::time_point::now() - active_ignored_request_timeout; + { + fc::scoped_lock lock(_active_connections.get_mutex()); + + for( const peer_connection_ptr& active_peer : _active_connections ) + { + if( active_peer->connection_initiation_time < active_disconnect_threshold && + active_peer->get_last_message_received_time() < active_disconnect_threshold ) { - // we should probably disconnect nicely and give them a reason, but right now the logic - // for rescheduling the requests only executes when the connection is fully closed, - // and we want to get those requests rescheduled as soon as possible - peers_to_disconnect_forcibly.push_back(active_peer); + wlog( "Closing connection with peer ${peer} due to inactivity of at least ${timeout} seconds", + ( "peer", active_peer->get_remote_endpoint() )("timeout", active_disconnect_timeout ) ); + peers_to_disconnect_gently.push_back( active_peer ); } - else if (active_peer->connection_initiation_time < active_send_keepalive_threshold && - active_peer->get_last_message_received_time() < active_send_keepalive_threshold) + else { - wlog( "Sending a keepalive message to peer ${peer} who hasn't sent us any messages in the last ${timeout} seconds", - ( "peer", active_peer->get_remote_endpoint() )("timeout", active_send_keepalive_timeout ) ); - peers_to_send_keep_alive.push_back(active_peer); - } - else if (active_peer->we_need_sync_items_from_peer && + bool disconnect_due_to_request_timeout = false; + if (!active_peer->sync_items_requested_from_peer.empty() && + active_peer->last_sync_item_received_time < active_ignored_request_threshold) + { + wlog("Disconnecting peer ${peer} because they haven't made any progress on my remaining ${count} sync item requests", + ("peer", active_peer->get_remote_endpoint())("count", + active_peer->sync_items_requested_from_peer.size())); + disconnect_due_to_request_timeout = true; + } + if (!disconnect_due_to_request_timeout && + active_peer->item_ids_requested_from_peer && + active_peer->item_ids_requested_from_peer->get<1>() < active_ignored_request_threshold) + { + wlog("Disconnecting peer ${peer} because they didn't respond to my request for sync item ids after ${synopsis}", + ("peer", active_peer->get_remote_endpoint()) + ("synopsis", active_peer->item_ids_requested_from_peer->get<0>())); + disconnect_due_to_request_timeout = true; + } + if (!disconnect_due_to_request_timeout) + for (const peer_connection::item_to_time_map_type::value_type& item_and_time : active_peer->items_requested_from_peer) + if (item_and_time.second < active_ignored_request_threshold) + { + wlog("Disconnecting peer ${peer} because they didn't respond to my request for item ${id}", + ("peer", active_peer->get_remote_endpoint())("id", item_and_time.first.item_hash)); + disconnect_due_to_request_timeout = true; + break; + } + if (disconnect_due_to_request_timeout) + { + // we should probably disconnect nicely and give them a reason, but right now the logic + // for rescheduling the requests only executes when the connection is fully closed, + // and we want to get those requests rescheduled as soon as possible + peers_to_disconnect_forcibly.push_back(active_peer); + } + else if (active_peer->connection_initiation_time < active_send_keepalive_threshold && + active_peer->get_last_message_received_time() < active_send_keepalive_threshold) + { + wlog( "Sending a keepalive message to peer ${peer} who hasn't sent us any messages in the last ${timeout} seconds", + ( "peer", active_peer->get_remote_endpoint() )("timeout", active_send_keepalive_timeout ) ); + peers_to_send_keep_alive.push_back(active_peer); + } + else if (active_peer->we_need_sync_items_from_peer && !active_peer->is_currently_handling_message() && !active_peer->item_ids_requested_from_peer && active_peer->ids_of_items_to_get.empty()) + { + // This is a state we should never get into in the first place, but if we do, we should disconnect the peer + // to re-establish the connection. + fc_wlog(fc::logger::get("sync"), "Disconnecting peer ${peer} because we think we need blocks from them but sync has stalled.", + ("peer", active_peer->get_remote_endpoint())); + wlog("Disconnecting peer ${peer} because we think we need blocks from them but sync has stalled.", + ("peer", active_peer->get_remote_endpoint())); + peers_to_disconnect_forcibly.push_back(active_peer); + } + } // else + } // for + } // scoped_lock + + fc::time_point closing_disconnect_threshold = fc::time_point::now() - fc::seconds(GRAPHENE_NET_PEER_DISCONNECT_TIMEOUT); + { + fc::scoped_lock lock(_closing_connections.get_mutex()); + for( const peer_connection_ptr& closing_peer : _closing_connections ) + { + if( closing_peer->connection_closed_time < closing_disconnect_threshold ) { - // This is a state we should never get into in the first place, but if we do, we should disconnect the peer - // to re-establish the connection. - fc_wlog(fc::logger::get("sync"), "Disconnecting peer ${peer} because we think we need blocks from them but sync has stalled.", - ("peer", active_peer->get_remote_endpoint())); - wlog("Disconnecting peer ${peer} because we think we need blocks from them but sync has stalled.", - ("peer", active_peer->get_remote_endpoint())); - peers_to_disconnect_forcibly.push_back(active_peer); + // we asked this peer to close their connectoin to us at least GRAPHENE_NET_PEER_DISCONNECT_TIMEOUT + // seconds ago, but they haven't done it yet. Terminate the connection now + wlog( "Forcibly disconnecting peer ${peer} who failed to close their connection in a timely manner", + ( "peer", closing_peer->get_remote_endpoint() ) ); + peers_to_disconnect_forcibly.push_back( closing_peer ); } - } - } - - fc::time_point closing_disconnect_threshold = fc::time_point::now() - fc::seconds(GRAPHENE_NET_PEER_DISCONNECT_TIMEOUT); - for( const peer_connection_ptr& closing_peer : _closing_connections ) - if( closing_peer->connection_closed_time < closing_disconnect_threshold ) - { - // we asked this peer to close their connectoin to us at least GRAPHENE_NET_PEER_DISCONNECT_TIMEOUT - // seconds ago, but they haven't done it yet. Terminate the connection now - wlog( "Forcibly disconnecting peer ${peer} who failed to close their connection in a timely manner", - ( "peer", closing_peer->get_remote_endpoint() ) ); - peers_to_disconnect_forcibly.push_back( closing_peer ); - } - - uint32_t failed_terminate_timeout_seconds = 120; - fc::time_point failed_terminate_threshold = fc::time_point::now() - fc::seconds(failed_terminate_timeout_seconds); - for (const peer_connection_ptr& peer : _terminating_connections ) - if (peer->get_connection_terminated_time() != fc::time_point::min() && - peer->get_connection_terminated_time() < failed_terminate_threshold) - { - wlog("Terminating connection with peer ${peer}, closing the connection didn't work", ("peer", peer->get_remote_endpoint())); - peers_to_terminate.push_back(peer); - } - - // That's the end of the sorting step; now all peers that require further processing are now in one of the - // lists peers_to_disconnect_gently, peers_to_disconnect_forcibly, peers_to_send_keep_alive, or peers_to_terminate + } // for + } // scoped_lock + uint32_t failed_terminate_timeout_seconds = 120; + fc::time_point failed_terminate_threshold = fc::time_point::now() - fc::seconds(failed_terminate_timeout_seconds); + { + fc::scoped_lock lock(_terminating_connections.get_mutex()); + for (const peer_connection_ptr& peer : _terminating_connections ) + { + if (peer->get_connection_terminated_time() != fc::time_point::min() && + peer->get_connection_terminated_time() < failed_terminate_threshold) + { + wlog("Terminating connection with peer ${peer}, closing the connection didn't work", ("peer", peer->get_remote_endpoint())); + peers_to_terminate.push_back(peer); + } + } + } // scoped_lock + // That's the end of the sorting step; now all peers that require further processing are now in one of the + // lists peers_to_disconnect_gently, peers_to_disconnect_forcibly, peers_to_send_keep_alive, or peers_to_terminate - // if we've decided to delete any peers, do it now; in its current implementation this doesn't yield, - // and once we start yielding, we may find that we've moved that peer to another list (closed or active) - // and that triggers assertions, maybe even errors - for (const peer_connection_ptr& peer : peers_to_terminate ) - { - assert(_terminating_connections.find(peer) != _terminating_connections.end()); - _terminating_connections.erase(peer); - schedule_peer_for_deletion(peer); - } - peers_to_terminate.clear(); + // if we've decided to delete any peers, do it now; in its current implementation this doesn't yield, + // and once we start yielding, we may find that we've moved that peer to another list (closed or active) + // and that triggers assertions, maybe even errors + { + fc::scoped_lock lock(_terminating_connections.get_mutex()); + for (const peer_connection_ptr& peer : peers_to_terminate ) + { + assert(_terminating_connections.find(peer) != _terminating_connections.end()); + _terminating_connections.erase(peer); + schedule_peer_for_deletion(peer); + } + } // scoped_lock + peers_to_terminate.clear(); - // if we're going to abruptly disconnect anyone, do it here - // (it doesn't yield). I don't think there would be any harm if this were - // moved to the yielding section - for( const peer_connection_ptr& peer : peers_to_disconnect_forcibly ) - { - move_peer_to_terminating_list(peer); - peer->close_connection(); - } - peers_to_disconnect_forcibly.clear(); - } // end ASSERT_TASK_NOT_PREEMPTED() + // if we're going to abruptly disconnect anyone, do it here + // (it doesn't yield). I don't think there would be any harm if this were + // moved to the yielding section + for( const peer_connection_ptr& peer : peers_to_disconnect_forcibly ) + { + move_peer_to_terminating_list(peer); + peer->close_connection(); + } + peers_to_disconnect_forcibly.clear(); // Now process the peers that we need to do yielding functions with (disconnect sends a message with the // disconnect reason, so it may yield) for( const peer_connection_ptr& peer : peers_to_disconnect_gently ) { - fc::exception detailed_error( FC_LOG_MESSAGE(warn, "Disconnecting due to inactivity", - ( "last_message_received_seconds_ago", (peer->get_last_message_received_time() - fc::time_point::now() ).count() / fc::seconds(1 ).count() ) - ( "last_message_sent_seconds_ago", (peer->get_last_message_sent_time() - fc::time_point::now() ).count() / fc::seconds(1 ).count() ) - ( "inactivity_timeout", _active_connections.find(peer ) != _active_connections.end() ? _peer_inactivity_timeout * 10 : _peer_inactivity_timeout ) ) ); - disconnect_from_peer( peer.get(), "Disconnecting due to inactivity", false, detailed_error ); + { + fc::scoped_lock lock(_active_connections.get_mutex()); + fc::exception detailed_error( FC_LOG_MESSAGE(warn, "Disconnecting due to inactivity", + ( "last_message_received_seconds_ago", (peer->get_last_message_received_time() + - fc::time_point::now() ).count() / fc::seconds(1 ).count() ) + ( "last_message_sent_seconds_ago", (peer->get_last_message_sent_time() + - fc::time_point::now() ).count() / fc::seconds(1 ).count() ) + ( "inactivity_timeout", _active_connections.find(peer ) != _active_connections.end() + ? _peer_inactivity_timeout * 10 : _peer_inactivity_timeout ) ) ); + disconnect_from_peer( peer.get(), "Disconnecting due to inactivity", false, detailed_error ); + } } peers_to_disconnect_gently.clear(); @@ -1046,23 +1080,27 @@ namespace graphene { namespace net { namespace detail { void node_impl::fetch_updated_peer_lists_loop() { VERIFY_CORRECT_THREAD(); - - std::list original_active_peers(_active_connections.begin(), _active_connections.end()); - for( const peer_connection_ptr& active_peer : original_active_peers ) + { - try - { - active_peer->send_message(address_request_message()); - } - catch ( const fc::canceled_exception& ) - { - throw; - } - catch (const fc::exception& e) - { - dlog("Caught exception while sending address request message to peer ${peer} : ${e}", - ("peer", active_peer->get_remote_endpoint())("e", e)); - } + fc::scoped_lock lock(_active_connections.get_mutex()); + // JMJ 2018-10-22 Unsure why we're making a copy here, but this is probably unnecessary + std::list original_active_peers(_active_connections.begin(), _active_connections.end()); + for( const peer_connection_ptr& active_peer : original_active_peers ) + { + try + { + active_peer->send_message(address_request_message()); + } + catch ( const fc::canceled_exception& ) + { + throw; + } + catch (const fc::exception& e) + { + dlog("Caught exception while sending address request message to peer ${peer} : ${e}", + ("peer", active_peer->get_remote_endpoint())("e", e)); + } + } } // this has nothing to do with updating the peer list, but we need to prune this list @@ -1216,12 +1254,18 @@ namespace graphene { namespace net { namespace detail { peer_connection_ptr node_impl::get_peer_by_node_id(const node_id_t& node_id) { - for (const peer_connection_ptr& active_peer : _active_connections) - if (node_id == active_peer->node_id) - return active_peer; - for (const peer_connection_ptr& handshaking_peer : _handshaking_connections) - if (node_id == handshaking_peer->node_id) - return handshaking_peer; + { + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& active_peer : _active_connections) + if (node_id == active_peer->node_id) + return active_peer; + } + { + fc::scoped_lock lock(_handshaking_connections.get_mutex()); + for (const peer_connection_ptr& handshaking_peer : _handshaking_connections) + if (node_id == handshaking_peer->node_id) + return handshaking_peer; + } return peer_connection_ptr(); } @@ -1233,18 +1277,26 @@ namespace graphene { namespace net { namespace detail { dlog("is_already_connected_to_id returning true because the peer is us"); return true; } - for (const peer_connection_ptr active_peer : _active_connections) - if (node_id == active_peer->node_id) - { - dlog("is_already_connected_to_id returning true because the peer is already in our active list"); - return true; - } - for (const peer_connection_ptr handshaking_peer : _handshaking_connections) - if (node_id == handshaking_peer->node_id) - { - dlog("is_already_connected_to_id returning true because the peer is already in our handshaking list"); - return true; - } + { + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr active_peer : _active_connections) + { + if (node_id == active_peer->node_id) + { + dlog("is_already_connected_to_id returning true because the peer is already in our active list"); + return true; + } + } + } + { + fc::scoped_lock lock(_handshaking_connections.get_mutex()); + for (const peer_connection_ptr handshaking_peer : _handshaking_connections) + if (node_id == handshaking_peer->node_id) + { + dlog("is_already_connected_to_id returning true because the peer is already in our handshaking list"); + return true; + } + } return false; } @@ -1276,19 +1328,25 @@ namespace graphene { namespace net { namespace detail { ("max", _maximum_number_of_connections)); dlog(" my id is ${id}", ("id", _node_id)); - for (const peer_connection_ptr& active_connection : _active_connections) { - dlog(" active: ${endpoint} with ${id} [${direction}]", - ("endpoint", active_connection->get_remote_endpoint()) - ("id", active_connection->node_id) - ("direction", active_connection->direction)); + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& active_connection : _active_connections) + { + dlog(" active: ${endpoint} with ${id} [${direction}]", + ("endpoint", active_connection->get_remote_endpoint()) + ("id", active_connection->node_id) + ("direction", active_connection->direction)); + } } - for (const peer_connection_ptr& handshaking_connection : _handshaking_connections) { - dlog(" handshaking: ${endpoint} with ${id} [${direction}]", - ("endpoint", handshaking_connection->get_remote_endpoint()) - ("id", handshaking_connection->node_id) - ("direction", handshaking_connection->direction)); + fc::scoped_lock lock(_handshaking_connections.get_mutex()); + for (const peer_connection_ptr& handshaking_connection : _handshaking_connections) + { + dlog(" handshaking: ${endpoint} with ${id} [${direction}]", + ("endpoint", handshaking_connection->get_remote_endpoint()) + ("id", handshaking_connection->node_id) + ("direction", handshaking_connection->direction)); + } } } @@ -1705,6 +1763,7 @@ namespace graphene { namespace net { namespace detail { if (!_peer_advertising_disabled) { reply.addresses.reserve(_active_connections.size()); + fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& active_peer : _active_connections) { fc::optional updated_peer_record = _potential_peer_db.lookup_entry_for_endpoint(*active_peer->get_remote_endpoint()); @@ -1890,6 +1949,7 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); uint32_t max_number_of_unfetched_items = 0; + fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& peer : _active_connections ) { uint32_t this_peer_number_of_unfetched_items = (uint32_t)peer->ids_of_items_to_get.size() + peer->number_of_unfetched_item_ids; @@ -2107,17 +2167,22 @@ namespace graphene { namespace net { namespace detail { originating_peer->ids_of_items_to_get.empty()) { bool is_first_item_for_other_peer = false; - for (const peer_connection_ptr& peer : _active_connections) - if (peer != originating_peer->shared_from_this() && - !peer->ids_of_items_to_get.empty() && - peer->ids_of_items_to_get.front() == blockchain_item_ids_inventory_message_received.item_hashes_available.front()) - { - dlog("The item ${newitem} is the first item for peer ${peer}", - ("newitem", blockchain_item_ids_inventory_message_received.item_hashes_available.front()) - ("peer", peer->get_remote_endpoint())); - is_first_item_for_other_peer = true; - break; - } + { + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& peer : _active_connections) + { + if (peer != originating_peer->shared_from_this() && + !peer->ids_of_items_to_get.empty() && + peer->ids_of_items_to_get.front() == blockchain_item_ids_inventory_message_received.item_hashes_available.front()) + { + dlog("The item ${newitem} is the first item for peer ${peer}", + ("newitem", blockchain_item_ids_inventory_message_received.item_hashes_available.front()) + ("peer", peer->get_remote_endpoint())); + is_first_item_for_other_peer = true; + break; + } + } + } dlog("is_first_item_for_other_peer: ${is_first}. item_hashes_received.size() = ${size}", ("is_first", is_first_item_for_other_peer)("size", item_hashes_received.size())); if (!is_first_item_for_other_peer) @@ -2404,15 +2469,18 @@ namespace graphene { namespace net { namespace detail { item_id advertised_item_id(item_ids_inventory_message_received.item_type, item_hash); bool we_advertised_this_item_to_a_peer = false; bool we_requested_this_item_from_a_peer = false; - for (const peer_connection_ptr peer : _active_connections) { - if (peer->inventory_advertised_to_peer.find(advertised_item_id) != peer->inventory_advertised_to_peer.end()) - { - we_advertised_this_item_to_a_peer = true; - break; - } - if (peer->items_requested_from_peer.find(advertised_item_id) != peer->items_requested_from_peer.end()) - we_requested_this_item_from_a_peer = true; + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr peer : _active_connections) + { + if (peer->inventory_advertised_to_peer.find(advertised_item_id) != peer->inventory_advertised_to_peer.end()) + { + we_advertised_this_item_to_a_peer = true; + break; + } + if (peer->items_requested_from_peer.find(advertised_item_id) != peer->items_requested_from_peer.end()) + we_requested_this_item_from_a_peer = true; + } } // if we have already advertised it to a peer, we must have it, no need to do anything else @@ -2639,87 +2707,89 @@ namespace graphene { namespace net { namespace detail { if( client_accepted_block ) { - --_total_number_of_unfetched_items; - dlog("sync: client accpted the block, we now have only ${count} items left to fetch before we're in sync", - ("count", _total_number_of_unfetched_items)); - bool is_fork_block = is_hard_fork_block(block_message_to_send.block.block_num()); - for (const peer_connection_ptr& peer : _active_connections) - { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - bool disconnecting_this_peer = false; - if (is_fork_block) - { - // we just pushed a hard fork block. Find out if this peer is running a client - // that will be unable to process future blocks - if (peer->last_known_fork_block_number != 0) + --_total_number_of_unfetched_items; + dlog("sync: client accpted the block, we now have only ${count} items left to fetch before we're in sync", + ("count", _total_number_of_unfetched_items)); + bool is_fork_block = is_hard_fork_block(block_message_to_send.block.block_num()); + { + fc::scoped_lock lock(_active_connections.get_mutex()); + + for (const peer_connection_ptr& peer : _active_connections) { - uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(peer->last_known_fork_block_number); - if (next_fork_block_number != 0 && - next_fork_block_number <= block_message_to_send.block.block_num()) - { - std::ostringstream disconnect_reason_stream; - disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.block_num(); - peers_to_disconnect[peer] = std::make_pair(disconnect_reason_stream.str(), - fc::oexception(fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client due to hard fork at block ${block_number}", - ("block_number", block_message_to_send.block.block_num()))))); + bool disconnecting_this_peer = false; + if (is_fork_block) + { + // we just pushed a hard fork block. Find out if this peer is running a client + // that will be unable to process future blocks + if (peer->last_known_fork_block_number != 0) + { + uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(peer->last_known_fork_block_number); + if (next_fork_block_number != 0 && + next_fork_block_number <= block_message_to_send.block.block_num()) + { + std::ostringstream disconnect_reason_stream; + disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.block_num(); + peers_to_disconnect[peer] = std::make_pair(disconnect_reason_stream.str(), + fc::oexception(fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client due to hard fork at block ${block_number}", + ("block_number", block_message_to_send.block.block_num()))))); #ifdef ENABLE_DEBUG_ULOGS - ulog("Disconnecting from peer during sync because their version is too old. Their version date: ${date}", ("date", peer->graphene_git_revision_unix_timestamp)); + ulog("Disconnecting from peer during sync because their version is too old. Their version date: ${date}", ("date", peer->graphene_git_revision_unix_timestamp)); #endif - disconnecting_this_peer = true; - } - } - } - if (!disconnecting_this_peer && - peer->ids_of_items_to_get.empty() && peer->ids_of_items_being_processed.empty()) - { - dlog( "Cannot pop first element off peer ${peer}'s list, its list is empty", ("peer", peer->get_remote_endpoint() ) ); - // we don't know for sure that this peer has the item we just received. - // If peer is still syncing to us, we know they will ask us for - // sync item ids at least one more time and we'll notify them about - // the item then, so there's no need to do anything. If we still need items - // from them, we'll be asking them for more items at some point, and - // that will clue them in that they are out of sync. If we're fully in sync - // we need to kick off another round of synchronization with them so they can - // find out about the new item. - if (!peer->peer_needs_sync_items_from_us && !peer->we_need_sync_items_from_peer) - { - dlog("We will be restarting synchronization with peer ${peer}", ("peer", peer->get_remote_endpoint())); - peers_we_need_to_sync_to.insert(peer); - } - } - else if (!disconnecting_this_peer) - { - auto items_being_processed_iter = peer->ids_of_items_being_processed.find(block_message_to_send.block_id); - if (items_being_processed_iter != peer->ids_of_items_being_processed.end()) - { - peer->last_block_delegate_has_seen = block_message_to_send.block_id; - peer->last_block_time_delegate_has_seen = block_message_to_send.block.timestamp; - - peer->ids_of_items_being_processed.erase(items_being_processed_iter); - dlog("Removed item from ${endpoint}'s list of items being processed, still processing ${len} blocks", - ("endpoint", peer->get_remote_endpoint())("len", peer->ids_of_items_being_processed.size())); - - // if we just received the last item in our list from this peer, we will want to - // send another request to find out if we are in sync, but we can't do this yet - // (we don't want to allow a fiber swap in the middle of popping items off the list) - if (peer->ids_of_items_to_get.empty() && - peer->number_of_unfetched_item_ids == 0 && - peer->ids_of_items_being_processed.empty()) - peers_with_newly_empty_item_lists.insert(peer); - - // in this case, we know the peer was offering us this exact item, no need to - // try to inform them of its existence - } - } - } + disconnecting_this_peer = true; + } + } + } + if (!disconnecting_this_peer && + peer->ids_of_items_to_get.empty() && peer->ids_of_items_being_processed.empty()) + { + dlog( "Cannot pop first element off peer ${peer}'s list, its list is empty", ("peer", peer->get_remote_endpoint() ) ); + // we don't know for sure that this peer has the item we just received. + // If peer is still syncing to us, we know they will ask us for + // sync item ids at least one more time and we'll notify them about + // the item then, so there's no need to do anything. If we still need items + // from them, we'll be asking them for more items at some point, and + // that will clue them in that they are out of sync. If we're fully in sync + // we need to kick off another round of synchronization with them so they can + // find out about the new item. + if (!peer->peer_needs_sync_items_from_us && !peer->we_need_sync_items_from_peer) + { + dlog("We will be restarting synchronization with peer ${peer}", ("peer", peer->get_remote_endpoint())); + peers_we_need_to_sync_to.insert(peer); + } + } + else if (!disconnecting_this_peer) + { + auto items_being_processed_iter = peer->ids_of_items_being_processed.find(block_message_to_send.block_id); + if (items_being_processed_iter != peer->ids_of_items_being_processed.end()) + { + peer->last_block_delegate_has_seen = block_message_to_send.block_id; + peer->last_block_time_delegate_has_seen = block_message_to_send.block.timestamp; + + peer->ids_of_items_being_processed.erase(items_being_processed_iter); + dlog("Removed item from ${endpoint}'s list of items being processed, still processing ${len} blocks", + ("endpoint", peer->get_remote_endpoint())("len", peer->ids_of_items_being_processed.size())); + + // if we just received the last item in our list from this peer, we will want to + // send another request to find out if we are in sync, but we can't do this yet + // (we don't want to allow a fiber swap in the middle of popping items off the list) + if (peer->ids_of_items_to_get.empty() && + peer->number_of_unfetched_item_ids == 0 && + peer->ids_of_items_being_processed.empty()) + peers_with_newly_empty_item_lists.insert(peer); + + // in this case, we know the peer was offering us this exact item, no need to + // try to inform them of its existence + } + } + } // for + } // lock_guard } else { // invalid message received + fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - if (peer->ids_of_items_being_processed.find(block_message_to_send.block_id) != peer->ids_of_items_being_processed.end()) { @@ -2821,15 +2891,17 @@ namespace graphene { namespace net { namespace detail { // find out if this block is the next block on the active chain or one of the forks bool potential_first_block = false; - for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - if (!peer->ids_of_items_to_get.empty() && - peer->ids_of_items_to_get.front() == received_block_iter->block_id) + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& peer : _active_connections) { - potential_first_block = true; - peer->ids_of_items_to_get.pop_front(); - peer->ids_of_items_being_processed.insert(received_block_iter->block_id); + if (!peer->ids_of_items_to_get.empty() && + peer->ids_of_items_to_get.front() == received_block_iter->block_id) + { + potential_first_block = true; + peer->ids_of_items_to_get.pop_front(); + peer->ids_of_items_being_processed.insert(received_block_iter->block_id); + } } } @@ -2857,6 +2929,7 @@ namespace graphene { namespace net { namespace detail { { dlog("Already received and accepted this block (presumably through normal inventory mechanism), treating it as accepted"); std::vector< peer_connection_ptr > peers_needing_next_batch; + fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& peer : _active_connections) { auto items_being_processed_iter = peer->ids_of_items_being_processed.find(received_block_iter->block_id); @@ -2980,22 +3053,22 @@ namespace graphene { namespace net { namespace detail { item_id block_message_item_id(core_message_type_enum::block_message_type, message_hash); uint32_t block_number = block_message_to_process.block.block_num(); fc::time_point_sec block_time = block_message_to_process.block.timestamp; - - for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - - auto iter = peer->inventory_peer_advertised_to_us.find(block_message_item_id); - if (iter != peer->inventory_peer_advertised_to_us.end()) - { - // this peer offered us the item. It will eventually expire from the peer's - // inventory_peer_advertised_to_us list after some time has passed (currently 2 minutes). - // For now, it will remain there, which will prevent us from offering the peer this - // block back when we rebroadcast the block below - peer->last_block_delegate_has_seen = block_message_to_process.block_id; - peer->last_block_time_delegate_has_seen = block_time; - } - peer->clear_old_inventory(); + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& peer : _active_connections) + { + auto iter = peer->inventory_peer_advertised_to_us.find(block_message_item_id); + if (iter != peer->inventory_peer_advertised_to_us.end()) + { + // this peer offered us the item. It will eventually expire from the peer's + // inventory_peer_advertised_to_us list after some time has passed (currently 2 minutes). + // For now, it will remain there, which will prevent us from offering the peer this + // block back when we rebroadcast the block below + peer->last_block_delegate_has_seen = block_message_to_process.block_id; + peer->last_block_time_delegate_has_seen = block_time; + } + peer->clear_old_inventory(); + } } message_propagation_data propagation_data{message_receive_time, message_validated_time, originating_peer->node_id}; broadcast( block_message_to_process, propagation_data ); @@ -3005,6 +3078,7 @@ namespace graphene { namespace net { namespace detail { { // we just pushed a hard fork block. Find out if any of our peers are running clients // that will be unable to process future blocks + fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& peer : _active_connections) { if (peer->last_known_fork_block_number != 0) @@ -3059,6 +3133,7 @@ namespace graphene { namespace net { namespace detail { disconnect_reason = "You offered me a block that I have deemed to be invalid"; peers_to_disconnect.insert( originating_peer->shared_from_this() ); + fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& peer : _active_connections) if (!peer->ids_of_items_to_get.empty() && peer->ids_of_items_to_get.front() == block_message_to_process.block_id) peers_to_disconnect.insert(peer); @@ -3182,25 +3257,28 @@ namespace graphene { namespace net { namespace detail { void node_impl::forward_firewall_check_to_next_available_peer(firewall_check_state_data* firewall_check_state) { - for (const peer_connection_ptr& peer : _active_connections) { - if (firewall_check_state->expected_node_id != peer->node_id && // it's not the node who is asking us to test - !peer->firewall_check_state && // the peer isn't already performing a check for another node - firewall_check_state->nodes_already_tested.find(peer->node_id) == firewall_check_state->nodes_already_tested.end() && - peer->core_protocol_version >= 106) - { - wlog("forwarding firewall check for node ${to_check} to peer ${checker}", - ("to_check", firewall_check_state->endpoint_to_test) - ("checker", peer->get_remote_endpoint())); - firewall_check_state->nodes_already_tested.insert(peer->node_id); - peer->firewall_check_state = firewall_check_state; - check_firewall_message check_request; - check_request.endpoint_to_check = firewall_check_state->endpoint_to_test; - check_request.node_id = firewall_check_state->expected_node_id; - peer->send_message(check_request); - return; - } - } + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& peer : _active_connections) + { + if (firewall_check_state->expected_node_id != peer->node_id && // it's not the node who is asking us to test + !peer->firewall_check_state && // the peer isn't already performing a check for another node + firewall_check_state->nodes_already_tested.find(peer->node_id) == firewall_check_state->nodes_already_tested.end() && + peer->core_protocol_version >= 106) + { + wlog("forwarding firewall check for node ${to_check} to peer ${checker}", + ("to_check", firewall_check_state->endpoint_to_test) + ("checker", peer->get_remote_endpoint())); + firewall_check_state->nodes_already_tested.insert(peer->node_id); + peer->firewall_check_state = firewall_check_state; + check_firewall_message check_request; + check_request.endpoint_to_check = firewall_check_state->endpoint_to_test; + check_request.node_id = firewall_check_state->expected_node_id; + peer->send_message(check_request); + return; + } + } + } // lock_guard wlog("Unable to forward firewall check for node ${to_check} to any other peers, returning 'unable'", ("to_check", firewall_check_state->endpoint_to_test)); @@ -3373,10 +3451,9 @@ namespace graphene { namespace net { namespace detail { } fc::time_point now = fc::time_point::now(); + fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - current_connection_data data_for_this_peer; data_for_this_peer.connection_duration = now.sec_since_epoch() - peer->connection_initiation_time.sec_since_epoch(); if (peer->get_remote_endpoint()) // should always be set for anyone we're actively connected to @@ -3518,6 +3595,7 @@ namespace graphene { namespace net { namespace detail { void node_impl::start_synchronizing() { + fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& peer : _active_connections ) start_synchronizing_with_peer( peer ); } @@ -3722,9 +3800,19 @@ namespace graphene { namespace net { namespace detail { // the read loop before it gets an EOF). // operate off copies of the lists in case they change during iteration std::list all_peers; - boost::push_back(all_peers, _active_connections); - boost::push_back(all_peers, _handshaking_connections); - boost::push_back(all_peers, _closing_connections); + auto p_back = [&all_peers](const peer_connection_ptr& conn) { all_peers.push_back(conn); }; + { + fc::scoped_lock lock(_active_connections.get_mutex()); + std::for_each(_active_connections.begin(), _active_connections.end(), p_back); + } + { + fc::scoped_lock lock(_handshaking_connections.get_mutex()); + std::for_each(_handshaking_connections.begin(), _handshaking_connections.end(), p_back); + } + { + fc::scoped_lock lock(_closing_connections.get_mutex()); + std::for_each(_closing_connections.begin(), _closing_connections.end(), p_back); + } for (const peer_connection_ptr& peer : all_peers) { @@ -4317,17 +4405,23 @@ namespace graphene { namespace net { namespace detail { peer_connection_ptr node_impl::get_connection_to_endpoint( const fc::ip::endpoint& remote_endpoint ) { VERIFY_CORRECT_THREAD(); - for( const peer_connection_ptr& active_peer : _active_connections ) { - fc::optional endpoint_for_this_peer( active_peer->get_remote_endpoint() ); - if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) - return active_peer; + fc::scoped_lock lock(_active_connections.get_mutex()); + for( const peer_connection_ptr& active_peer : _active_connections ) + { + fc::optional endpoint_for_this_peer( active_peer->get_remote_endpoint() ); + if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) + return active_peer; + } } - for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) { - fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); - if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) - return handshaking_peer; + fc::scoped_lock lock(_handshaking_connections.get_mutex()); + for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) + { + fc::optional endpoint_for_this_peer( handshaking_peer->get_remote_endpoint() ); + if( endpoint_for_this_peer && *endpoint_for_this_peer == remote_endpoint ) + return handshaking_peer; + } } return peer_connection_ptr(); } @@ -4372,23 +4466,28 @@ namespace graphene { namespace net { namespace detail { ilog( " number of peers: ${active} active, ${handshaking}, ${closing} closing. attempting to maintain ${desired} - ${maximum} peers", ( "active", _active_connections.size() )("handshaking", _handshaking_connections.size() )("closing",_closing_connections.size() ) ( "desired", _desired_number_of_connections )("maximum", _maximum_number_of_connections ) ); - for( const peer_connection_ptr& peer : _active_connections ) { - ilog( " active peer ${endpoint} peer_is_in_sync_with_us:${in_sync_with_us} we_are_in_sync_with_peer:${in_sync_with_them}", - ( "endpoint", peer->get_remote_endpoint() ) - ( "in_sync_with_us", !peer->peer_needs_sync_items_from_us )("in_sync_with_them", !peer->we_need_sync_items_from_peer ) ); - if( peer->we_need_sync_items_from_peer ) - ilog( " above peer has ${count} sync items we might need", ("count", peer->ids_of_items_to_get.size() ) ); - if (peer->inhibit_fetching_sync_blocks) - ilog( " we are not fetching sync blocks from the above peer (inhibit_fetching_sync_blocks == true)" ); + fc::scoped_lock lock(_active_connections.get_mutex()); + for( const peer_connection_ptr& peer : _active_connections ) + { + ilog( " active peer ${endpoint} peer_is_in_sync_with_us:${in_sync_with_us} we_are_in_sync_with_peer:${in_sync_with_them}", + ( "endpoint", peer->get_remote_endpoint() ) + ( "in_sync_with_us", !peer->peer_needs_sync_items_from_us )("in_sync_with_them", !peer->we_need_sync_items_from_peer ) ); + if( peer->we_need_sync_items_from_peer ) + ilog( " above peer has ${count} sync items we might need", ("count", peer->ids_of_items_to_get.size() ) ); + if (peer->inhibit_fetching_sync_blocks) + ilog( " we are not fetching sync blocks from the above peer (inhibit_fetching_sync_blocks == true)" ); + } } - for( const peer_connection_ptr& peer : _handshaking_connections ) { - ilog( " handshaking peer ${endpoint} in state ours(${our_state}) theirs(${their_state})", - ( "endpoint", peer->get_remote_endpoint() )("our_state", peer->our_state )("their_state", peer->their_state ) ); + fc::scoped_lock lock(_handshaking_connections.get_mutex()); + for( const peer_connection_ptr& peer : _handshaking_connections ) + { + ilog( " handshaking peer ${endpoint} in state ours(${our_state}) theirs(${their_state})", + ( "endpoint", peer->get_remote_endpoint() )("our_state", peer->our_state )("their_state", peer->their_state ) ); + } } - ilog( "--------- MEMORY USAGE ------------" ); ilog( "node._active_sync_requests size: ${size}", ("size", _active_sync_requests.size() ) ); ilog( "node._received_sync_items size: ${size}", ("size", _received_sync_items.size() ) ); @@ -4396,6 +4495,7 @@ namespace graphene { namespace net { namespace detail { ilog( "node._items_to_fetch size: ${size}", ("size", _items_to_fetch.size() ) ); ilog( "node._new_inventory size: ${size}", ("size", _new_inventory.size() ) ); ilog( "node._message_cache size: ${size}", ("size", _message_cache.size() ) ); + fc::scoped_lock lock(_active_connections.get_mutex()); for( const peer_connection_ptr& peer : _active_connections ) { ilog( " peer ${endpoint}", ("endpoint", peer->get_remote_endpoint() ) ); @@ -4493,10 +4593,9 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); std::vector statuses; + fc::scoped_lock lock(_active_connections.get_mutex()); for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections - peer_status this_peer_status; this_peer_status.version = 0; fc::optional endpoint = peer->get_remote_endpoint(); @@ -4684,9 +4783,12 @@ namespace graphene { namespace net { namespace detail { _allowed_peers.insert(allowed_peers.begin(), allowed_peers.end()); std::list peers_to_disconnect; if (!_allowed_peers.empty()) - for (const peer_connection_ptr& peer : _active_connections) - if (_allowed_peers.find(peer->node_id) == _allowed_peers.end()) - peers_to_disconnect.push_back(peer); + { + fc::scoped_lock lock(_active_connections.get_mutex()); + for (const peer_connection_ptr& peer : _active_connections) + if (_allowed_peers.find(peer->node_id) == _allowed_peers.end()) + peers_to_disconnect.push_back(peer); + } for (const peer_connection_ptr& peer : peers_to_disconnect) disconnect_from_peer(peer.get(), "My allowed_peers list has changed, and you're no longer allowed. Bye."); #endif // ENABLE_P2P_DEBUGGING_API diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index da12718b98..a106cbc019 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -1,5 +1,6 @@ #pragma once #include +#include #include #include #include @@ -11,6 +12,107 @@ namespace graphene { namespace net { namespace detail { +/******* + * A class to wrap std::unordered_set for multithreading + */ +template , class Pred = std::equal_to > +class concurrent_unordered_set : private std::unordered_set +{ +private: + mutable fc::mutex mux; + +public: + // iterations require a lock. This exposes the mutex. Use with care (i.e. lock_guard) + fc::mutex& get_mutex()const { return mux; } + + // insertion + std::pair< typename std::unordered_set::iterator, bool> emplace( Key key) + { + fc::scoped_lock lock(mux); + return std::unordered_set::emplace( key ); + } + std::pair< typename std::unordered_set::iterator, bool> insert (const Key& val) + { + fc::scoped_lock lock(mux); + return std::unordered_set::insert( val ); + } + // size + size_t size() const + { + fc::scoped_lock lock(mux); + return std::unordered_set::size(); + } + bool empty() const noexcept + { + fc::scoped_lock lock(mux); + return std::unordered_set::empty(); + } + // removal + void clear() noexcept + { + fc::scoped_lock lock(mux); + std::unordered_set::clear(); + } + typename std::unordered_set::iterator erase( + typename std::unordered_set::const_iterator itr) + { + fc::scoped_lock lock(mux); + return std::unordered_set::erase( itr); + } + size_t erase( const Key& key) + { + fc::scoped_lock lock(mux); + return std::unordered_set::erase( key ); + } + // iteration + typename std::unordered_set::iterator begin() noexcept + { + fc::scoped_lock lock(mux); + return std::unordered_set::begin(); + } + typename std::unordered_set::const_iterator begin() const noexcept + { + fc::scoped_lock lock(mux); + return std::unordered_set::begin(); + } + typename std::unordered_set::local_iterator begin(size_t n) + { + fc::scoped_lock lock(mux); + return std::unordered_set::begin(n); + } + typename std::unordered_set::const_local_iterator begin(size_t n) const + { + fc::scoped_lock lock(mux); + return std::unordered_set::begin(n); + } + typename std::unordered_set::iterator end() noexcept + { + fc::scoped_lock lock(mux); + return std::unordered_set::end(); + } + typename std::unordered_set::const_iterator end() const noexcept + { + fc::scoped_lock lock(mux); + return std::unordered_set::end(); + } + typename std::unordered_set::local_iterator end(size_t n) + { + fc::scoped_lock lock(mux); + return std::unordered_set::end(n); + } + typename std::unordered_set::const_local_iterator end(size_t n) const + { + fc::scoped_lock lock(mux); + return std::unordered_set::end(n); + } + // search + typename std::unordered_set::const_iterator find(Key key) + { + fc::scoped_lock lock(mux); + return std::unordered_set::find(key); + } +}; + // when requesting items from peers, we want to prioritize any blocks before // transactions, but otherwise request items in the order we heard about them struct prioritized_item_id @@ -272,13 +374,13 @@ class node_impl : public peer_connection_delegate /** Stores all connections which have not yet finished key exchange or are still sending initial handshaking messages * back and forth (not yet ready to initiate syncing) */ - std::unordered_set _handshaking_connections; + concurrent_unordered_set _handshaking_connections; /** stores fully established connections we're either syncing with or in normal operation with */ - std::unordered_set _active_connections; + concurrent_unordered_set _active_connections; /** stores connections we've closed (sent closing message, not actually closed), but are still waiting for the remote end to close before we delete them */ - std::unordered_set _closing_connections; + concurrent_unordered_set _closing_connections; /** stores connections we've closed, but are still waiting for the OS to notify us that the socket is really closed */ - std::unordered_set _terminating_connections; + concurrent_unordered_set _terminating_connections; boost::circular_buffer _most_recent_blocks_accepted; // the /n/ most recent blocks we've accepted (currently tuned to the max number of connections) diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 2a6e62114e..47878ba006 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -64,9 +64,12 @@ class account_history_plugin_impl account_history_plugin& _self; flat_set _tracked_accounts; + flat_set _extended_history_accounts; + flat_set _extended_history_registrars; bool _partial_operations = false; primary_index< operation_history_index >* _oho_index; uint64_t _max_ops_per_account = -1; + uint64_t _extended_max_ops_per_account = -1; private: /** add one history record, then check and remove the earliest history record */ void add_account_history( const account_id_type account_id, const operation_history_id_type op_id ); @@ -212,9 +215,25 @@ void account_history_plugin_impl::add_account_history( const account_id_type acc obj.most_recent_op = ath.id; obj.total_ops = ath.sequence; }); - // remove the earliest account history entry if too many - // _max_ops_per_account is guaranteed to be non-zero outside - if( stats_obj.total_ops - stats_obj.removed_ops > _max_ops_per_account ) + // Amount of history to keep depends on if account is in the "extended history" list + bool extended_hist = false; + for ( auto eh_account_id : _extended_history_accounts ) { + extended_hist |= (account_id == eh_account_id); + } + if ( _extended_history_registrars.size() > 0 ) { + const account_id_type registrar_id = account_id(db).registrar; + for ( auto eh_registrar_id : _extended_history_registrars ) { + extended_hist |= (registrar_id == eh_registrar_id); + } + } + // _max_ops_per_account is guaranteed to be non-zero outside; max_ops_to_keep + // will likewise be non-zero, and also non-negative (it is unsigned). + auto max_ops_to_keep = _max_ops_per_account; + if (extended_hist && _extended_max_ops_per_account > max_ops_to_keep) { + max_ops_to_keep = _extended_max_ops_per_account; + } + // Remove the earliest account history entry if too many. + if( stats_obj.total_ops - stats_obj.removed_ops > max_ops_to_keep ) { // look for the earliest entry const auto& his_idx = db.get_index_type(); @@ -283,9 +302,20 @@ void account_history_plugin::plugin_set_program_options( ) { cli.add_options() - ("track-account", boost::program_options::value>()->composing()->multitoken(), "Account ID to track history for (may specify multiple times)") - ("partial-operations", boost::program_options::value(), "Keep only those operations in memory that are related to account history tracking") - ("max-ops-per-account", boost::program_options::value(), "Maximum number of operations per account will be kept in memory") + ("track-account", boost::program_options::value>()->composing()->multitoken(), + "Account ID to track history for (may specify multiple times; if unset will track all accounts)") + ("partial-operations", boost::program_options::value(), + "Keep only those operations in memory that are related to account history tracking") + ("max-ops-per-account", boost::program_options::value(), + "Maximum number of operations per account that will be kept in memory") + ("extended-max-ops-per-account", boost::program_options::value(), + "Maximum number of operations to keep for accounts for which extended history is kept") + ("extended-history-by-account", + boost::program_options::value>()->composing()->multitoken(), + "Track longer history for these accounts (may specify multiple times)") + ("extended-history-by-registrar", + boost::program_options::value>()->composing()->multitoken(), + "Track longer history for accounts with this registrar (may specify multiple times)") ; cfg.add(cli); } @@ -303,6 +333,14 @@ void account_history_plugin::plugin_initialize(const boost::program_options::var if (options.count("max-ops-per-account")) { my->_max_ops_per_account = options["max-ops-per-account"].as(); } + if (options.count("extended-max-ops-per-account")) { + auto emopa = options["extended-max-ops-per-account"].as(); + my->_extended_max_ops_per_account = (emopa > my->_max_ops_per_account) ? emopa : my->_max_ops_per_account; + } + LOAD_VALUE_SET(options, "extended-history-by-account", my->_extended_history_accounts, + graphene::chain::account_id_type); + LOAD_VALUE_SET(options, "extended-history-by-registrar", my->_extended_history_registrars, + graphene::chain::account_id_type); } void account_history_plugin::plugin_startup() diff --git a/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp b/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp index ceea704342..b5f6684c89 100644 --- a/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp +++ b/libraries/plugins/custom_operations/include/graphene/custom_operations/custom_objects.hpp @@ -42,8 +42,8 @@ enum types { struct account_storage_object : public abstract_object { - static const uint8_t space_id = CUSTOM_OPERATIONS_SPACE_ID; - static const uint8_t type_id = account_map; + static constexpr uint8_t space_id = CUSTOM_OPERATIONS_SPACE_ID; + static constexpr uint8_t type_id = account_map; account_id_type account; string catalog; diff --git a/libraries/plugins/delayed_node/delayed_node_plugin.cpp b/libraries/plugins/delayed_node/delayed_node_plugin.cpp index 01f4e48b31..4f08260207 100644 --- a/libraries/plugins/delayed_node/delayed_node_plugin.cpp +++ b/libraries/plugins/delayed_node/delayed_node_plugin.cpp @@ -63,10 +63,24 @@ void delayed_node_plugin::plugin_set_program_options(bpo::options_description& c void delayed_node_plugin::connect() { + fc::http::websocket_connection_ptr con; + try + { + con = my->client.connect(my->remote_endpoint); + } + catch( const fc::exception& e ) + { + wlog("Error while connecting: ${e}", ("e", e.to_detail_string())); + connection_failed(); + return; + } my->client_connection = std::make_shared( - my->client.connect(my->remote_endpoint), - GRAPHENE_NET_MAX_NESTED_OBJECTS ); + con, GRAPHENE_NET_MAX_NESTED_OBJECTS ); my->database_api = my->client_connection->get_remote_api(0); + my->database_api->set_block_applied_callback([this]( const fc::variant& block_id ) + { + fc::from_variant( block_id, my->last_received_remote_head, GRAPHENE_MAX_NESTED_OBJECTS ); + } ); my->client_connection_closed = my->client_connection->closed.connect([this] { connection_failed(); }); @@ -141,24 +155,12 @@ void delayed_node_plugin::plugin_startup() mainloop(); }); - try - { - connect(); - my->database_api->set_block_applied_callback([this]( const fc::variant& block_id ) - { - fc::from_variant( block_id, my->last_received_remote_head, GRAPHENE_MAX_NESTED_OBJECTS ); - } ); - return; - } - catch (const fc::exception& e) - { - elog("Error during connection: ${e}", ("e", e.to_detail_string())); - } - fc::async([this]{connection_failed();}); + connect(); } void delayed_node_plugin::connection_failed() { + my->last_received_remote_head = my->last_processed_remote_head; elog("Connection to trusted node failed; retrying in 5 seconds..."); fc::schedule([this]{connect();}, fc::time_point::now() + fc::seconds(5)); } diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index b7c0f366b0..80bb62cb81 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -173,7 +173,11 @@ bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b for( auto& account_id : impacted ) { if(!add_elasticsearch( account_id, oho, b.block_num() )) + { + elog( "Error adding data to Elastic Search: block num ${b}, account ${a}, data ${d}", + ("b",b.block_num()) ("a",account_id) ("d", oho) ); return false; + } } } // we send bulk at end of block when we are in sync for better real time client experience @@ -184,7 +188,16 @@ bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b { prepare.clear(); if(!graphene::utilities::SendBulk(std::move(es))) + { + // Note: although called with `std::move()`, `es` is not updated in `SendBulk()` + elog( "Error sending ${n} lines of bulk data to Elastic Search, the first lines are:", + ("n",es.bulk_lines.size()) ); + for( size_t i = 0; i < es.bulk_lines.size() && i < 10; ++i ) + { + edump( (es.bulk_lines[i]) ); + } return false; + } else bulk_lines.clear(); } @@ -300,7 +313,16 @@ bool elasticsearch_plugin_impl::add_elasticsearch( const account_id_type account prepare.clear(); populateESstruct(); if(!graphene::utilities::SendBulk(std::move(es))) + { + // Note: although called with `std::move()`, `es` is not updated in `SendBulk()` + elog( "Error sending ${n} lines of bulk data to Elastic Search, the first lines are:", + ("n",es.bulk_lines.size()) ); + for( size_t i = 0; i < es.bulk_lines.size() && i < 10; ++i ) + { + edump( (es.bulk_lines[i]) ); + } return false; + } else bulk_lines.clear(); } @@ -588,7 +610,12 @@ vector elasticsearch_plugin::get_account_history( variant variant_response = fc::json::from_string(response); const auto hits = variant_response["hits"]["total"]; - const auto size = std::min(static_cast(hits.as_uint64()), limit); + uint32_t size; + if( hits.is_object() ) // ES-7 ? + size = static_cast(hits["value"].as_uint64()); + else // probably ES-6 + size = static_cast(hits.as_uint64()); + size = std::min( size, limit ); for(unsigned i=0; i { - static const uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static const uint8_t type_id = bucket_object_type; + static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static constexpr uint8_t type_id = bucket_object_type; price high()const { return asset( high_base, key.base ) / asset( high_quote, key.quote ); } price low()const { return asset( low_base, key.base ) / asset( low_quote, key.quote ); } @@ -112,8 +112,8 @@ struct history_key { }; struct order_history_object : public abstract_object { - static const uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static const uint8_t type_id = order_history_object_type; + static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static constexpr uint8_t type_id = order_history_object_type; history_key key; fc::time_point_sec time; @@ -137,8 +137,8 @@ struct order_history_object_key_sequence_extractor struct market_ticker_object : public abstract_object { - static const uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static const uint8_t type_id = market_ticker_object_type; + static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static constexpr uint8_t type_id = market_ticker_object_type; asset_id_type base; asset_id_type quote; @@ -152,8 +152,8 @@ struct market_ticker_object : public abstract_object struct market_ticker_meta_object : public abstract_object { - static const uint8_t space_id = MARKET_HISTORY_SPACE_ID; - static const uint8_t type_id = market_ticker_meta_object_type; + static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static constexpr uint8_t type_id = market_ticker_meta_object_type; object_id_type rolling_min_order_his_id; bool skip_min_order_his_id = false; diff --git a/libraries/protocol/CMakeLists.txt b/libraries/protocol/CMakeLists.txt index 8d6b096e84..74721affd8 100644 --- a/libraries/protocol/CMakeLists.txt +++ b/libraries/protocol/CMakeLists.txt @@ -22,6 +22,7 @@ list(APPEND SOURCES account.cpp committee_member.cpp custom.cpp market.cpp + liquidity_pool.cpp ticket.cpp operations.cpp pts_address.cpp diff --git a/libraries/protocol/include/graphene/protocol/account.hpp b/libraries/protocol/include/graphene/protocol/account.hpp index 37bfb03eb9..d0519082a2 100644 --- a/libraries/protocol/include/graphene/protocol/account.hpp +++ b/libraries/protocol/include/graphene/protocol/account.hpp @@ -130,7 +130,7 @@ namespace graphene { namespace protocol { * @brief Update an existing account * * This operation is used to update an existing account. It can be used to update the authorities, or adjust the options on the account. - * See @ref account_object::options_type for the options which may be updated. + * See @ref account_object::options for the options which may be updated. */ struct account_update_operation : public base_operation { diff --git a/libraries/protocol/include/graphene/protocol/base.hpp b/libraries/protocol/include/graphene/protocol/base.hpp index a51990374c..d8a4869000 100644 --- a/libraries/protocol/include/graphene/protocol/base.hpp +++ b/libraries/protocol/include/graphene/protocol/base.hpp @@ -92,7 +92,20 @@ namespace graphene { namespace protocol { flat_set removed_objects; }; - typedef fc::static_variant operation_result; + struct generic_exchange_operation_result + { + vector paid; + vector received; + vector fees; + }; + + typedef fc::static_variant < + void_result, + object_id_type, + asset, + generic_operation_result, + generic_exchange_operation_result + > operation_result; struct base_operation { @@ -135,5 +148,8 @@ FC_REFLECT_TYPENAME( graphene::protocol::operation_result ) FC_REFLECT_TYPENAME( graphene::protocol::future_extensions ) FC_REFLECT( graphene::protocol::void_result, ) FC_REFLECT( graphene::protocol::generic_operation_result, (new_objects)(updated_objects)(removed_objects) ) +FC_REFLECT( graphene::protocol::generic_exchange_operation_result, (paid)(received)(fees) ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::generic_operation_result ) // impl in operations.cpp +// impl in operations.cpp +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::generic_exchange_operation_result ) diff --git a/libraries/protocol/include/graphene/protocol/liquidity_pool.hpp b/libraries/protocol/include/graphene/protocol/liquidity_pool.hpp new file mode 100644 index 0000000000..d86a498b5e --- /dev/null +++ b/libraries/protocol/include/graphene/protocol/liquidity_pool.hpp @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2020 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once +#include +#include + +namespace graphene { namespace protocol { + + /** + * @brief Create a new liquidity pool + * @ingroup operations + */ + struct liquidity_pool_create_operation : public base_operation + { + struct fee_parameters_type { uint64_t fee = 50 * GRAPHENE_BLOCKCHAIN_PRECISION; }; + + asset fee; ///< Operation fee + account_id_type account; ///< The account who creates the liquidity pool + asset_id_type asset_a; ///< Type of the first asset in the pool + asset_id_type asset_b; ///< Type of the second asset in the pool + asset_id_type share_asset; ///< Type of the share asset aka the LP token + uint16_t taker_fee_percent = 0; ///< Taker fee percent + uint16_t withdrawal_fee_percent = 0; ///< Withdrawal fee percent + + extensions_type extensions; ///< Unused. Reserved for future use. + + account_id_type fee_payer()const { return account; } + void validate()const; + }; + + /** + * @brief Delete a liquidity pool + * @ingroup operations + */ + struct liquidity_pool_delete_operation : public base_operation + { + struct fee_parameters_type { uint64_t fee = 0; }; + + asset fee; ///< Operation fee + account_id_type account; ///< The account who owns the liquidity pool + liquidity_pool_id_type pool; ///< ID of the liquidity pool + + extensions_type extensions; ///< Unused. Reserved for future use. + + account_id_type fee_payer()const { return account; } + void validate()const; + }; + + /** + * @brief Deposit to a liquidity pool + * @ingroup operations + */ + struct liquidity_pool_deposit_operation : public base_operation + { + struct fee_parameters_type { uint64_t fee = GRAPHENE_BLOCKCHAIN_PRECISION / 10; }; + + asset fee; ///< Operation fee + account_id_type account; ///< The account who deposits to the liquidity pool + liquidity_pool_id_type pool; ///< ID of the liquidity pool + asset amount_a; ///< The amount of the first asset to deposit + asset amount_b; ///< The amount of the second asset to deposit + + extensions_type extensions; ///< Unused. Reserved for future use. + + account_id_type fee_payer()const { return account; } + void validate()const; + }; + + /** + * @brief Withdraw from a liquidity pool + * @ingroup operations + */ + struct liquidity_pool_withdraw_operation : public base_operation + { + struct fee_parameters_type { uint64_t fee = 5 * GRAPHENE_BLOCKCHAIN_PRECISION; }; + + asset fee; ///< Operation fee + account_id_type account; ///< The account who withdraws from the liquidity pool + liquidity_pool_id_type pool; ///< ID of the liquidity pool + asset share_amount; ///< The amount of the share asset to use + + extensions_type extensions; ///< Unused. Reserved for future use. + + account_id_type fee_payer()const { return account; } + void validate()const; + }; + + /** + * @brief Exchange with a liquidity pool + * @ingroup operations + */ + struct liquidity_pool_exchange_operation : public base_operation + { + struct fee_parameters_type { uint64_t fee = 1 * GRAPHENE_BLOCKCHAIN_PRECISION; }; + + asset fee; ///< Operation fee + account_id_type account; ///< The account who exchanges with the liquidity pool + liquidity_pool_id_type pool; ///< ID of the liquidity pool + asset amount_to_sell; ///< The amount of one asset type to sell + asset min_to_receive; ///< The minimum amount of the other asset type to receive + + extensions_type extensions; ///< Unused. Reserved for future use. + + account_id_type fee_payer()const { return account; } + void validate()const; + }; + +} } // graphene::protocol + +FC_REFLECT( graphene::protocol::liquidity_pool_create_operation::fee_parameters_type, (fee) ) +FC_REFLECT( graphene::protocol::liquidity_pool_delete_operation::fee_parameters_type, (fee) ) +FC_REFLECT( graphene::protocol::liquidity_pool_deposit_operation::fee_parameters_type, (fee) ) +FC_REFLECT( graphene::protocol::liquidity_pool_withdraw_operation::fee_parameters_type, (fee) ) +FC_REFLECT( graphene::protocol::liquidity_pool_exchange_operation::fee_parameters_type, (fee) ) + +FC_REFLECT( graphene::protocol::liquidity_pool_create_operation, + (fee)(account)(asset_a)(asset_b)(share_asset) + (taker_fee_percent)(withdrawal_fee_percent)(extensions) ) +FC_REFLECT( graphene::protocol::liquidity_pool_delete_operation, + (fee)(account)(pool)(extensions) ) +FC_REFLECT( graphene::protocol::liquidity_pool_deposit_operation, + (fee)(account)(pool)(amount_a)(amount_b)(extensions) ) +FC_REFLECT( graphene::protocol::liquidity_pool_withdraw_operation, + (fee)(account)(pool)(share_amount)(extensions) ) +FC_REFLECT( graphene::protocol::liquidity_pool_exchange_operation, + (fee)(account)(pool)(amount_to_sell)(min_to_receive)(extensions) ) + +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_create_operation::fee_parameters_type ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_delete_operation::fee_parameters_type ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_deposit_operation::fee_parameters_type ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_withdraw_operation::fee_parameters_type ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_exchange_operation::fee_parameters_type ) + +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_create_operation ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_delete_operation ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_deposit_operation ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_withdraw_operation ) +GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_exchange_operation ) diff --git a/libraries/protocol/include/graphene/protocol/object_id.hpp b/libraries/protocol/include/graphene/protocol/object_id.hpp index 7f627e4b19..34aa51c481 100644 --- a/libraries/protocol/include/graphene/protocol/object_id.hpp +++ b/libraries/protocol/include/graphene/protocol/object_id.hpp @@ -107,8 +107,8 @@ namespace graphene { namespace db { template struct object_id { - static const uint8_t space_id = SpaceID; - static const uint8_t type_id = TypeID; + static constexpr uint8_t space_id = SpaceID; + static constexpr uint8_t type_id = TypeID; object_id() = default; object_id( unsigned_int i ):instance(i){} diff --git a/libraries/protocol/include/graphene/protocol/operations.hpp b/libraries/protocol/include/graphene/protocol/operations.hpp index e57fec60fd..c963f3c6b2 100644 --- a/libraries/protocol/include/graphene/protocol/operations.hpp +++ b/libraries/protocol/include/graphene/protocol/operations.hpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -108,7 +109,12 @@ namespace graphene { namespace protocol { custom_authority_update_operation, custom_authority_delete_operation, ticket_create_operation, - ticket_update_operation + ticket_update_operation, + liquidity_pool_create_operation, + liquidity_pool_delete_operation, + liquidity_pool_deposit_operation, + liquidity_pool_withdraw_operation, + liquidity_pool_exchange_operation > operation; /// @} // operations group diff --git a/libraries/protocol/include/graphene/protocol/types.hpp b/libraries/protocol/include/graphene/protocol/types.hpp index 6fd06eb4c5..5e1c3cfc5d 100644 --- a/libraries/protocol/include/graphene/protocol/types.hpp +++ b/libraries/protocol/include/graphene/protocol/types.hpp @@ -314,6 +314,7 @@ GRAPHENE_DEFINE_IDS(protocol, protocol_ids, /*protocol objects are not prefixed* (htlc) (custom_authority) (ticket) + (liquidity_pool) ) FC_REFLECT(graphene::protocol::public_key_type, (key_data)) diff --git a/libraries/protocol/liquidity_pool.cpp b/libraries/protocol/liquidity_pool.cpp new file mode 100644 index 0000000000..956273609d --- /dev/null +++ b/libraries/protocol/liquidity_pool.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2020 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include + +#include + +namespace graphene { namespace protocol { + +void liquidity_pool_create_operation::validate()const +{ + FC_ASSERT( fee.amount >= 0, "Fee should not be negative" ); + FC_ASSERT( asset_a < asset_b, "ID of the first asset should be smaller than ID of the second asset" ); + FC_ASSERT( asset_a != share_asset && asset_b != share_asset, + "Share asset can not be the same as one of the assets in the pool" ); + FC_ASSERT( taker_fee_percent <= GRAPHENE_100_PERCENT, "Taker fee percent should not exceed 100%" ); + FC_ASSERT( withdrawal_fee_percent <= GRAPHENE_100_PERCENT, "Withdrawal fee percent should not exceed 100%" ); +} + +void liquidity_pool_delete_operation::validate()const +{ + FC_ASSERT( fee.amount >= 0, "Fee should not be negative" ); +} + +void liquidity_pool_deposit_operation::validate()const +{ + FC_ASSERT( fee.amount >= 0, "Fee should not be negative" ); + FC_ASSERT( amount_a.amount > 0 && amount_b.amount > 0, "Both amounts of the assets should be positive" ); + FC_ASSERT( amount_a.asset_id < amount_b.asset_id, + "ID of the first asset should be smaller than ID of the second asset" ); +} + +void liquidity_pool_withdraw_operation::validate()const +{ + FC_ASSERT( fee.amount >= 0, "Fee should not be negative" ); + FC_ASSERT( share_amount.amount > 0, "Amount of the share asset should be positive" ); +} + +void liquidity_pool_exchange_operation::validate()const +{ + FC_ASSERT( fee.amount >= 0, "Fee should not be negative" ); + FC_ASSERT( amount_to_sell.amount > 0, "Amount to sell should be positive" ); + FC_ASSERT( min_to_receive.amount > 0, "Minimum amount to receive should be positive" ); + FC_ASSERT( amount_to_sell.asset_id != min_to_receive.asset_id, + "ID of the two assets should not be the same" ); +} + +} } // graphene::protocol + +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_create_operation::fee_parameters_type ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_delete_operation::fee_parameters_type ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_deposit_operation::fee_parameters_type ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_withdraw_operation::fee_parameters_type ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_exchange_operation::fee_parameters_type ) + +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_create_operation ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_delete_operation ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_deposit_operation ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_withdraw_operation ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::liquidity_pool_exchange_operation ) diff --git a/libraries/protocol/operations.cpp b/libraries/protocol/operations.cpp index a50bf3a75b..12914d2310 100644 --- a/libraries/protocol/operations.cpp +++ b/libraries/protocol/operations.cpp @@ -112,5 +112,6 @@ void operation_get_required_authorities( const operation& op, } } // namespace graphene::protocol GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::generic_operation_result ) +GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::generic_exchange_operation_result ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::protocol::op_wrapper ) diff --git a/libraries/utilities/elasticsearch.cpp b/libraries/utilities/elasticsearch.cpp index 0b94de50c9..27f3b186c0 100644 --- a/libraries/utilities/elasticsearch.cpp +++ b/libraries/utilities/elasticsearch.cpp @@ -100,18 +100,19 @@ bool handleBulkResponse(long http_code, const std::string& CurlReadBuffer) fc::variant j = fc::json::from_string(CurlReadBuffer); bool errors = j["errors"].as_bool(); if(errors == true) { + elog( "ES returned 200 but with errors: ${e}", ("e", CurlReadBuffer) ); return false; } } else { if(http_code == 413) { - elog( "413 error: Can be low disk space" ); + elog( "413 error: Can be low disk space. ${e}", ("e", CurlReadBuffer) ); } else if(http_code == 401) { - elog( "401 error: Unauthorized" ); + elog( "401 error: Unauthorized. ${e}", ("e", CurlReadBuffer) ); } else { - elog( std::to_string(http_code) + " error: Unknown error" ); + elog( "${code} error: ${e}", ("code", std::to_string(http_code)) ("e", CurlReadBuffer) ); } return false; } diff --git a/libraries/wallet/CMakeLists.txt b/libraries/wallet/CMakeLists.txt index 016abb23bf..c268c362a7 100644 --- a/libraries/wallet/CMakeLists.txt +++ b/libraries/wallet/CMakeLists.txt @@ -17,7 +17,7 @@ if( PERL_FOUND AND DOXYGEN_FOUND AND NOT "${CMAKE_GENERATOR}" STREQUAL "Ninja" ) DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/doxygen/perlmod/DoxyDocs.pm ) else(MSVC) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp - COMMAND PERLLIB=${CMAKE_CURRENT_BINARY_DIR} ${PERL_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new + COMMAND ${PERL_EXECUTABLE} -I "${CMAKE_CURRENT_BINARY_DIR}" ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp COMMAND ${CMAKE_COMMAND} -E remove ${CMAKE_CURRENT_BINARY_DIR}/api_documentation.cpp.new DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_api_documentation.pl ${CMAKE_CURRENT_BINARY_DIR}/doxygen/perlmod/DoxyDocs.pm ) diff --git a/libraries/wallet/operation_printer.cpp b/libraries/wallet/operation_printer.cpp index 3b16a4f247..3d880ea18b 100644 --- a/libraries/wallet/operation_printer.cpp +++ b/libraries/wallet/operation_printer.cpp @@ -218,7 +218,13 @@ std::string operation_result_printer::operator()(const asset& a) std::string operation_result_printer::operator()(const generic_operation_result& r) { - return fc::json::to_pretty_string(r); + return fc::json::to_string(r); +} + +std::string operation_result_printer::operator()(const generic_exchange_operation_result& r) +{ + // TODO show pretty amounts instead of raw json + return fc::json::to_string(r); } }}} // graphene::wallet::detail diff --git a/libraries/wallet/operation_printer.hpp b/libraries/wallet/operation_printer.hpp index e12c344311..1af15fd04d 100644 --- a/libraries/wallet/operation_printer.hpp +++ b/libraries/wallet/operation_printer.hpp @@ -48,6 +48,7 @@ struct operation_result_printer std::string operator()(const graphene::protocol::object_id_type& oid); std::string operator()(const graphene::protocol::asset& a); std::string operator()(const graphene::protocol::generic_operation_result& r); + std::string operator()(const graphene::protocol::generic_exchange_operation_result& r); }; // BLOCK TRX OP VOP diff --git a/programs/build_helpers/run-node-test b/programs/build_helpers/run-node-test new file mode 100755 index 0000000000..d85c3c5105 --- /dev/null +++ b/programs/build_helpers/run-node-test @@ -0,0 +1,54 @@ +#!/bin/sh + +DATA_DIR="`mktemp -d`" + +cleanup () { + kill -9 $NODE_PID $CLI_PID + if [ "$1" != 0 ]; then + echo "----- node.log -----" + cat node.log + echo "----- cli.log -----" + cat cli.log + fi + rm -rf node.log cli.log "$DATA_DIR" + exit $1 +} + +echo "Starting witness node..." 1>&2 +programs/witness_node/witness_node --data-dir "$DATA_DIR" \ + --checkpoint '[131072,"0000000000000000000000000000000000000001"]' \ + --rpc-endpoint 127.0.0.1:8090 --force-validate >node.log 2>&1 & +NODE_PID=$! + +echo "Waiting for cli_wallet start..." 1>&2 & +CLI_PID=$! +sleep 5 +_START="`date +%s`" +while ! ps -p "$CLI_PID" >/dev/null && [ $((`date +%s` - $_START)) -lt 120 ]; do + programs/cli_wallet/cli_wallet -sws://127.0.0.1:8090 -d -H127.0.0.1:8091 >cli.log 2>&1 & + CLI_PID=$! + sleep 10 +done + +if ! ps -p "$CLI_PID" >/dev/null; then + echo "Failed to start?!" 1>&2 + cleanup 1 +fi + +echo "Waiting for head_block 131071..." 1>&2 +touch "$DATA_DIR"/info.json +_START="`date +%s`" +while [ $(( `date +%s` - $_START )) -lt 180 ]; do + sleep 2 + curl --silent -o "$DATA_DIR"/info.json --data '{"id":0,"method":"info","params":[]}' \ + http://127.0.0.1:8091/rpc + tr , '\n' <"$DATA_DIR"/info.json | grep head_block_num + if tr , '\n' <"$DATA_DIR"/info.json | grep head_block_num.*131071; then + echo "Success!" 1>&2 + cleanup 0 + fi +done + +echo "Failed to sync?!" 1>&2 + +cleanup 1 diff --git a/programs/cli_wallet/main.cpp b/programs/cli_wallet/main.cpp index abe317cd31..dc6249d596 100644 --- a/programs/cli_wallet/main.cpp +++ b/programs/cli_wallet/main.cpp @@ -245,7 +245,7 @@ int main( int argc, char** argv ) std::shared_ptr _websocket_server; if( options.count("rpc-endpoint") ) { - _websocket_server = std::make_shared(); + _websocket_server = std::make_shared(""); _websocket_server->on_connection([&wapi]( const fc::http::websocket_connection_ptr& c ){ auto wsc = std::make_shared(c, GRAPHENE_MAX_NESTED_OBJECTS); wsc->register_api(wapi); @@ -263,7 +263,7 @@ int main( int argc, char** argv ) std::shared_ptr _websocket_tls_server; if( options.count("rpc-tls-endpoint") ) { - _websocket_tls_server = std::make_shared(cert_pem); + _websocket_tls_server = std::make_shared(cert_pem, "", ""); _websocket_tls_server->on_connection([&wapi]( const fc::http::websocket_connection_ptr& c ){ auto wsc = std::make_shared(c, GRAPHENE_MAX_NESTED_OBJECTS); wsc->register_api(wapi); @@ -278,7 +278,7 @@ int main( int argc, char** argv ) std::shared_ptr _http_ws_server; if( options.count("rpc-http-endpoint" ) ) { - _http_ws_server = std::make_shared(); + _http_ws_server = std::make_shared(""); ilog( "Listening for incoming HTTP and WS RPC requests on ${p}", ("p", options.at("rpc-http-endpoint").as()) ); _http_ws_server->on_connection([&wapi]( const fc::http::websocket_connection_ptr& c ){ diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 001c817300..44069f2560 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,13 @@ file(GLOB COMMON_SOURCES "common/*.cpp") +file(GLOB COMMON_HEADERS "common/*.hpp") + +add_library( database_fixture + ${COMMON_SOURCES} + ${COMMON_HEADERS} + ) +target_link_libraries( database_fixture PUBLIC graphene_app graphene_es_objects graphene_egenesis_none ) +target_include_directories( database_fixture + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/common" ) find_package( Gperftools QUIET ) if( GPERFTOOLS_FOUND ) @@ -7,52 +16,36 @@ if( GPERFTOOLS_FOUND ) endif() file(GLOB UNIT_TESTS "tests/*.cpp") -add_executable( chain_test ${COMMON_SOURCES} ${UNIT_TESTS} ) -target_link_libraries( chain_test - graphene_chain graphene_app graphene_witness graphene_account_history graphene_elasticsearch - graphene_es_objects graphene_egenesis_none graphene_api_helper_indexes graphene_custom_operations - fc graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) +add_executable( chain_test ${UNIT_TESTS} ) +target_link_libraries( chain_test graphene_app database_fixture + graphene_witness graphene_wallet ${PLATFORM_SPECIFIC_LIBS} ) if(MSVC) set_source_files_properties( tests/serialization_tests.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) set_source_files_properties( tests/common/database_fixture.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) file(GLOB PERFORMANCE_TESTS "performance/*.cpp") -add_executable( performance_test ${COMMON_SOURCES} ${PERFORMANCE_TESTS} ) -target_link_libraries( performance_test - graphene_chain graphene_app graphene_account_history graphene_elasticsearch - graphene_es_objects graphene_egenesis_none graphene_api_helper_indexes - graphene_custom_operations - fc ${PLATFORM_SPECIFIC_LIBS} ) - -file(GLOB BENCH_MARKS "benchmarks/*.cpp") -add_executable( chain_bench ${COMMON_SOURCES} ${BENCH_MARKS} ) -target_link_libraries( chain_bench - graphene_chain graphene_app graphene_account_history graphene_elasticsearch - graphene_es_objects graphene_egenesis_none graphene_api_helper_indexes - graphene_custom_operations - fc ${PLATFORM_SPECIFIC_LIBS} ) +add_executable( performance_test ${PERFORMANCE_TESTS} ) +target_link_libraries( performance_test database_fixture ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB APP_SOURCES "app/*.cpp") add_executable( app_test ${APP_SOURCES} ) -target_link_libraries( app_test graphene_app graphene_account_history graphene_net graphene_witness graphene_chain graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( app_test graphene_app graphene_witness graphene_egenesis_none + ${PLATFORM_SPECIFIC_LIBS} ) file(GLOB CLI_SOURCES "cli/*.cpp") add_executable( cli_test ${CLI_SOURCES} ) if(WIN32) list(APPEND PLATFORM_SPECIFIC_LIBS ws2_32) endif() -target_link_libraries( cli_test graphene_app graphene_wallet graphene_witness graphene_account_history graphene_net graphene_chain graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( cli_test graphene_app graphene_wallet graphene_witness graphene_egenesis_none + ${PLATFORM_SPECIFIC_LIBS} ) if(MSVC) set_source_files_properties( cli/main.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) endif(MSVC) file(GLOB ES_SOURCES "elasticsearch/*.cpp") -add_executable( es_test ${COMMON_SOURCES} ${ES_SOURCES} ) -target_link_libraries( es_test - graphene_chain graphene_app graphene_account_history graphene_elasticsearch - graphene_es_objects graphene_egenesis_none graphene_api_helper_indexes - graphene_custom_operations - fc ${PLATFORM_SPECIFIC_LIBS} ) +add_executable( es_test ${ES_SOURCES} ) +target_link_libraries( es_test database_fixture ${PLATFORM_SPECIFIC_LIBS} ) add_subdirectory( generate_empty_blocks ) diff --git a/tests/app/main.cpp b/tests/app/main.cpp index bd7905870d..a0d1b9b168 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -53,6 +53,17 @@ namespace bpo = boost::program_options; namespace fc { extern std::unordered_map &get_logger_map(); extern std::unordered_map &get_appender_map(); + + /** Waits for F() to return true before max_duration has passed. + */ + template + static void wait_for( const fc::microseconds max_duration, const Functor&& f ) + { + const auto start = fc::time_point::now(); + while( !f() && fc::time_point::now() < start + max_duration ) + fc::usleep(fc::milliseconds(100)); + BOOST_REQUIRE( f() ); + } } BOOST_AUTO_TEST_CASE(load_configuration_options_test_config_logging_files_created) @@ -234,7 +245,15 @@ BOOST_AUTO_TEST_CASE( two_node_network ) app1.initialize(app_dir.path(), cfg); BOOST_TEST_MESSAGE( "Starting app1 and waiting 500 ms" ); app1.startup(); - fc::usleep(fc::milliseconds(500)); + #ifdef NDEBUG + #define NODE_STARTUP_WAIT_TIME (fc::milliseconds(30000)) + #else + #define NODE_STARTUP_WAIT_TIME (fc::milliseconds(120000)) + #endif + fc::wait_for( NODE_STARTUP_WAIT_TIME, [&app1] () { + const auto status = app1.p2p_node()->network_get_info(); + return status["listening_on"].as( 5 ).port() == 3939; + }); BOOST_TEST_MESSAGE( "Creating and initializing app2" ); @@ -245,17 +264,16 @@ BOOST_AUTO_TEST_CASE( two_node_network ) app2.register_plugin< graphene::witness_plugin::witness_plugin >(); app2.register_plugin< graphene::grouped_orders::grouped_orders_plugin>(); app2.startup_plugins(); - auto cfg2 = cfg; - cfg2.erase("p2p-endpoint"); + boost::program_options::variables_map cfg2; cfg2.emplace("p2p-endpoint", boost::program_options::variable_value(string("127.0.0.1:4040"), false)); cfg2.emplace("genesis-json", boost::program_options::variable_value(create_genesis_file(app_dir), false)); - cfg2.emplace("seed-node", boost::program_options::variable_value(vector{"127.0.0.1:3939"}, false)); - cfg2.emplace("seed-nodes", boost::program_options::variable_value(string("[]"), false)); + cfg2.emplace("seed-nodes", boost::program_options::variable_value(string("[\"127.0.0.1:3939\"]"), false)); app2.initialize(app2_dir.path(), cfg2); - BOOST_TEST_MESSAGE( "Starting app2 and waiting 500 ms" ); + BOOST_TEST_MESSAGE( "Starting app2 and waiting for connection" ); app2.startup(); - fc::usleep(fc::milliseconds(500)); + + fc::wait_for( NODE_STARTUP_WAIT_TIME, [&app1] () { return app1.p2p_node()->get_connection_count() > 0; } ); BOOST_REQUIRE_EQUAL(app1.p2p_node()->get_connection_count(), 1u); BOOST_CHECK_EQUAL(std::string(app1.p2p_node()->get_connected_peers().front().host.get_address()), "127.0.0.1"); @@ -303,7 +321,14 @@ BOOST_AUTO_TEST_CASE( two_node_network ) BOOST_TEST_MESSAGE( "Broadcasting tx" ); app1.p2p_node()->broadcast(graphene::net::trx_message(trx)); - fc::usleep(fc::milliseconds(500)); + #ifdef NDEBUG + #define BROADCAST_WAIT_TIME (fc::milliseconds(15000)) + #else + #define BROADCAST_WAIT_TIME (fc::milliseconds(60000)) + #endif + fc::wait_for( BROADCAST_WAIT_TIME, [db2] () { + return db2->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value == 1000000; + }); BOOST_CHECK_EQUAL( db1->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 1000000 ); BOOST_CHECK_EQUAL( db2->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 1000000 ); @@ -317,15 +342,25 @@ BOOST_AUTO_TEST_CASE( two_node_network ) committee_key, database::skip_nothing); + BOOST_CHECK_EQUAL( db1->head_block_num(), 0u ); + BOOST_CHECK_EQUAL( db2->head_block_num(), 1u ); + BOOST_CHECK_EQUAL( block_1.block_num(), 1u ); + BOOST_TEST_MESSAGE( "Broadcasting block" ); app2.p2p_node()->broadcast(graphene::net::block_message( block_1 )); - fc::usleep(fc::milliseconds(500)); + fc::wait_for( BROADCAST_WAIT_TIME, [db1] () { + return db1->head_block_num() == 1; + }); + BOOST_TEST_MESSAGE( "Verifying nodes are still connected" ); BOOST_CHECK_EQUAL(app1.p2p_node()->get_connection_count(), 1u); BOOST_CHECK_EQUAL(app1.chain_database()->head_block_num(), 1u); BOOST_TEST_MESSAGE( "Checking GRAPHENE_NULL_ACCOUNT has balance" ); + BOOST_CHECK_EQUAL( db1->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 1000000 ); + BOOST_CHECK_EQUAL( db2->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 1000000 ); + } catch( fc::exception& e ) { edump((e.to_detail_string())); throw; diff --git a/tests/benchmarks/main.cpp b/tests/benchmarks/main.cpp deleted file mode 100644 index 85e21ee02e..0000000000 --- a/tests/benchmarks/main.cpp +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2015 Cryptonomex, Inc., and contributors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -#define BOOST_TEST_MODULE "C++ Benchmarks for BitShares Blockchain Database" -#include - diff --git a/tests/cli/main.cpp b/tests/cli/main.cpp index 0615f8dff6..61f3a516d1 100644 --- a/tests/cli/main.cpp +++ b/tests/cli/main.cpp @@ -150,7 +150,6 @@ std::shared_ptr start_application(fc::temp_directory app1->startup_plugins(); app1->startup(); - fc::usleep(fc::milliseconds(500)); return app1; } @@ -299,16 +298,6 @@ class client_connection struct cli_fixture { - class dummy - { - public: - ~dummy() - { - // wait for everything to finish up - fc::usleep(fc::milliseconds(500)); - } - }; - dummy dmy; int server_port_number; fc::temp_directory app_dir; std::shared_ptr app1; @@ -1001,17 +990,12 @@ BOOST_AUTO_TEST_CASE( cli_multisig_transaction ) } } - // wait for everything to finish up - fc::usleep(fc::seconds(1)); } catch( fc::exception& e ) { edump((e.to_detail_string())); throw; } app1->shutdown(); app1.reset(); - // Intentional delay after app1->shutdown - std::cout << "cli_multisig_transaction conclusion: Intentional delay" << std::endl; - fc::usleep(fc::seconds(1)); } graphene::wallet::plain_keys decrypt_keys( const std::string& password, const vector& cipher_keys ) @@ -1041,6 +1025,7 @@ BOOST_AUTO_TEST_CASE( saving_keys_wallet_test ) { BOOST_CHECK( pk.keys.size() == 1 ); // nathan key BOOST_CHECK( generate_block( cli.app1 ) ); + // Intentional delay fc::usleep( fc::seconds(1) ); wallet = fc::json::from_file( path ).as( 2 * GRAPHENE_MAX_NESTED_OBJECTS ); @@ -1225,17 +1210,12 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc ) BOOST_CHECK(generate_block(app1)); } - // wait for everything to finish up - fc::usleep(fc::seconds(1)); } catch( fc::exception& e ) { edump((e.to_detail_string())); throw; } app1->shutdown(); app1.reset(); - // Intentional delay after app1->shutdown - std::cout << "cli_create_htlc conclusion: Intentional delay" << std::endl; - fc::usleep(fc::seconds(1)); } static string encapsulate( const graphene::wallet::signed_message& msg ) @@ -1807,15 +1787,10 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc_bsip64 ) } con.wallet_api_ptr->unlock("supersecret"); - // wait for everything to finish up - fc::usleep(fc::seconds(1)); } catch( fc::exception& e ) { edump((e.to_detail_string())); throw; } app1->shutdown(); app1.reset(); - // Intentional delay after app1->shutdown - std::cout << "cli_create_htlc conclusion: Intentional delay" << std::endl; - fc::usleep(fc::seconds(1)); } diff --git a/tests/common/database_fixture.cpp b/tests/common/database_fixture.cpp index 23f490b310..fb3367ee4b 100644 --- a/tests/common/database_fixture.cpp +++ b/tests/common/database_fixture.cpp @@ -576,6 +576,11 @@ void database_fixture::verify_asset_supplies( const database& db ) } total_balances[ to.amount.asset_id ] += to.amount.amount; } + for( const liquidity_pool_object& o : db.get_index_type().indices() ) + { + total_balances[o.asset_a] += o.balance_a; + total_balances[o.asset_b] += o.balance_b; + } total_balances[asset_id_type()] += db.get_dynamic_global_properties().witness_budget; @@ -1388,6 +1393,155 @@ generic_operation_result database_fixture::update_ticket( const ticket_object& t return op_result.get(); } +liquidity_pool_create_operation database_fixture::make_liquidity_pool_create_op( + account_id_type account, asset_id_type asset_a, + asset_id_type asset_b, asset_id_type share_asset, + uint16_t taker_fee_percent, uint16_t withdrawal_fee_percent )const +{ + liquidity_pool_create_operation op; + op.account = account; + op.asset_a = asset_a; + op.asset_b = asset_b; + op.share_asset = share_asset; + op.taker_fee_percent = taker_fee_percent; + op.withdrawal_fee_percent = withdrawal_fee_percent; + return op; +} + +const liquidity_pool_object& database_fixture::create_liquidity_pool( account_id_type account, asset_id_type asset_a, + asset_id_type asset_b, asset_id_type share_asset, + uint16_t taker_fee_percent, uint16_t withdrawal_fee_percent ) +{ + liquidity_pool_create_operation op = make_liquidity_pool_create_op( account, asset_a, asset_b, share_asset, + taker_fee_percent, withdrawal_fee_percent ); + trx.operations.clear(); + trx.operations.push_back( op ); + + for( auto& o : trx.operations ) db.current_fee_schedule().set_fee(o); + trx.validate(); + set_expiration( db, trx ); + processed_transaction ptx = PUSH_TX(db, trx, ~0); + const operation_result& op_result = ptx.operation_results.front(); + trx.operations.clear(); + verify_asset_supplies(db); + return db.get( *op_result.get().new_objects.begin() ); +} + +liquidity_pool_delete_operation database_fixture::make_liquidity_pool_delete_op( account_id_type account, + liquidity_pool_id_type pool )const +{ + liquidity_pool_delete_operation op; + op.account = account; + op.pool = pool; + return op; +} + +generic_operation_result database_fixture::delete_liquidity_pool( account_id_type account, + liquidity_pool_id_type pool ) +{ + liquidity_pool_delete_operation op = make_liquidity_pool_delete_op( account, pool ); + trx.operations.clear(); + trx.operations.push_back( op ); + + for( auto& o : trx.operations ) db.current_fee_schedule().set_fee(o); + trx.validate(); + set_expiration( db, trx ); + processed_transaction ptx = PUSH_TX(db, trx, ~0); + const operation_result& op_result = ptx.operation_results.front(); + trx.operations.clear(); + verify_asset_supplies(db); + return op_result.get(); +} + +liquidity_pool_deposit_operation database_fixture::make_liquidity_pool_deposit_op( account_id_type account, + liquidity_pool_id_type pool, const asset& amount_a, + const asset& amount_b )const +{ + liquidity_pool_deposit_operation op; + op.account = account; + op.pool = pool; + op.amount_a = amount_a; + op.amount_b = amount_b; + return op; +} + +generic_exchange_operation_result database_fixture::deposit_to_liquidity_pool( account_id_type account, + liquidity_pool_id_type pool, const asset& amount_a, + const asset& amount_b ) +{ + liquidity_pool_deposit_operation op = make_liquidity_pool_deposit_op( account, pool, amount_a, amount_b ); + trx.operations.clear(); + trx.operations.push_back( op ); + + for( auto& o : trx.operations ) db.current_fee_schedule().set_fee(o); + trx.validate(); + set_expiration( db, trx ); + processed_transaction ptx = PUSH_TX(db, trx, ~0); + const operation_result& op_result = ptx.operation_results.front(); + trx.operations.clear(); + verify_asset_supplies(db); + return op_result.get(); +} + +liquidity_pool_withdraw_operation database_fixture::make_liquidity_pool_withdraw_op( account_id_type account, + liquidity_pool_id_type pool, const asset& share_amount )const +{ + liquidity_pool_withdraw_operation op; + op.account = account; + op.pool = pool; + op.share_amount = share_amount; + return op; +} + +generic_exchange_operation_result database_fixture::withdraw_from_liquidity_pool( account_id_type account, + liquidity_pool_id_type pool, const asset& share_amount ) +{ + liquidity_pool_withdraw_operation op = make_liquidity_pool_withdraw_op( account, pool, share_amount ); + trx.operations.clear(); + trx.operations.push_back( op ); + + for( auto& o : trx.operations ) db.current_fee_schedule().set_fee(o); + trx.validate(); + set_expiration( db, trx ); + processed_transaction ptx = PUSH_TX(db, trx, ~0); + const operation_result& op_result = ptx.operation_results.front(); + trx.operations.clear(); + verify_asset_supplies(db); + return op_result.get(); +} + +liquidity_pool_exchange_operation database_fixture::make_liquidity_pool_exchange_op( account_id_type account, + liquidity_pool_id_type pool, const asset& amount_to_sell, + const asset& min_to_receive )const +{ + liquidity_pool_exchange_operation op; + op.account = account; + op.pool = pool; + op.amount_to_sell = amount_to_sell; + op.min_to_receive = min_to_receive; + return op; +} + +generic_exchange_operation_result database_fixture::exchange_with_liquidity_pool( account_id_type account, + liquidity_pool_id_type pool, const asset& amount_to_sell, + const asset& min_to_receive ) +{ + liquidity_pool_exchange_operation op = make_liquidity_pool_exchange_op( account, pool, amount_to_sell, + min_to_receive ); + trx.operations.clear(); + trx.operations.push_back( op ); + + for( auto& o : trx.operations ) db.current_fee_schedule().set_fee(o); + trx.validate(); + set_expiration( db, trx ); + processed_transaction ptx = PUSH_TX(db, trx, ~0); + const operation_result& op_result = ptx.operation_results.front(); + trx.operations.clear(); + verify_asset_supplies(db); + return op_result.get(); +} + + void database_fixture::enable_fees() { db.modify(global_property_id_type()(db), [](global_property_object& gpo) diff --git a/tests/common/database_fixture.hpp b/tests/common/database_fixture.hpp index fc57a30077..b4be97852e 100644 --- a/tests/common/database_fixture.hpp +++ b/tests/common/database_fixture.hpp @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -407,6 +408,33 @@ struct database_fixture { const optional& amount )const; generic_operation_result update_ticket( const ticket_object& ticket, ticket_type type, const optional& amount ); + + liquidity_pool_create_operation make_liquidity_pool_create_op( account_id_type account, asset_id_type asset_a, + asset_id_type asset_b, asset_id_type share_asset, + uint16_t taker_fee_percent, uint16_t withdrawal_fee_percent )const; + const liquidity_pool_object& create_liquidity_pool( account_id_type account, asset_id_type asset_a, + asset_id_type asset_b, asset_id_type share_asset, + uint16_t taker_fee_percent, uint16_t withdrawal_fee_percent ); + liquidity_pool_delete_operation make_liquidity_pool_delete_op( account_id_type account, + liquidity_pool_id_type pool )const; + generic_operation_result delete_liquidity_pool( account_id_type account, liquidity_pool_id_type pool ); + liquidity_pool_deposit_operation make_liquidity_pool_deposit_op( account_id_type account, + liquidity_pool_id_type pool, const asset& amount_a, + const asset& amount_b )const; + generic_exchange_operation_result deposit_to_liquidity_pool( account_id_type account, + liquidity_pool_id_type pool, const asset& amount_a, + const asset& amount_b ); + liquidity_pool_withdraw_operation make_liquidity_pool_withdraw_op( account_id_type account, + liquidity_pool_id_type pool, const asset& share_amount )const; + generic_exchange_operation_result withdraw_from_liquidity_pool( account_id_type account, + liquidity_pool_id_type pool, const asset& share_amount ); + liquidity_pool_exchange_operation make_liquidity_pool_exchange_op( account_id_type account, + liquidity_pool_id_type pool, const asset& amount_to_sell, + const asset& min_to_receive )const; + generic_exchange_operation_result exchange_with_liquidity_pool( account_id_type account, + liquidity_pool_id_type pool, const asset& amount_to_sell, + const asset& min_to_receive ); + /** * NOTE: This modifies the database directly. You will probably have to call this each time you * finish creating a block diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 24eb9382e8..5e3b0458f2 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -34,6 +34,12 @@ #define BOOST_TEST_MODULE Elastic Search Database Tests #include +#ifdef NDEBUG + #define ES_WAIT_TIME (fc::milliseconds(1000)) +#else + #define ES_WAIT_TIME (fc::milliseconds(3000)) +#endif + using namespace graphene::chain; using namespace graphene::chain::test; using namespace graphene::app; @@ -54,7 +60,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { // delete all first auto delete_account_history = graphene::utilities::deleteAll(es); - fc::usleep(fc::milliseconds(1000)); // this is because index.refresh_interval, nothing to worry + fc::usleep(ES_WAIT_TIME); // this is because index.refresh_interval, nothing to worry if(delete_account_history) { // all records deleted @@ -64,7 +70,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { auto bob = create_account("bob"); generate_block(); - fc::usleep(fc::milliseconds(1000)); + fc::usleep(ES_WAIT_TIME); // for later use //int asset_create_op_id = operation::tag::value; @@ -83,13 +89,13 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { res = graphene::utilities::simpleQuery(es); j = fc::json::from_string(res); auto first_id = j["hits"]["hits"][size_t(0)]["_id"].as_string(); - BOOST_CHECK_EQUAL(first_id, "2.9.1"); // this should be 0? are they inserted in the right order? + BOOST_CHECK_EQUAL(first_id, "2.9.0"); generate_block(); auto willie = create_account("willie"); generate_block(); - fc::usleep(fc::milliseconds(1000)); // index.refresh_interval + fc::usleep(ES_WAIT_TIME); // index.refresh_interval es.endpoint = es.index_prefix + "*/data/_count"; res = graphene::utilities::simpleQuery(es); @@ -104,7 +110,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { transfer(account_id_type()(db), bob, asset(300)); generate_block(); - fc::usleep(fc::milliseconds(1000)); // index.refresh_interval + fc::usleep(ES_WAIT_TIME); // index.refresh_interval res = graphene::utilities::simpleQuery(es); j = fc::json::from_string(res); @@ -145,14 +151,14 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { auto delete_objects = graphene::utilities::deleteAll(es); generate_block(); - fc::usleep(fc::milliseconds(1000)); + fc::usleep(ES_WAIT_TIME); if(delete_objects) { // all records deleted // asset and bitasset create_bitasset("USD", account_id_type()); generate_block(); - fc::usleep(fc::milliseconds(1000)); + fc::usleep(ES_WAIT_TIME); string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; es.endpoint = es.index_prefix + "*/data/_count"; @@ -195,10 +201,10 @@ BOOST_AUTO_TEST_CASE(elasticsearch_suite) { es.elasticsearch_url = "http://localhost:9200/"; es.index_prefix = "bitshares-"; auto delete_account_history = graphene::utilities::deleteAll(es); - fc::usleep(fc::milliseconds(1000)); + fc::usleep(ES_WAIT_TIME); es.index_prefix = "objects-"; auto delete_objects = graphene::utilities::deleteAll(es); - fc::usleep(fc::milliseconds(1000)); + fc::usleep(ES_WAIT_TIME); if(delete_account_history && delete_objects) { // all records deleted @@ -224,7 +230,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_history_api) { auto delete_account_history = graphene::utilities::deleteAll(es); generate_block(); - fc::usleep(fc::milliseconds(1000)); + fc::usleep(ES_WAIT_TIME); if(delete_account_history) { @@ -237,7 +243,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_history_api) { create_bitasset("OIL", dan.id); // create op 6 generate_block(); - fc::usleep(fc::milliseconds(1000)); + fc::usleep(ES_WAIT_TIME); graphene::app::history_api hist_api(app); app.enable_plugin("elasticsearch"); @@ -506,7 +512,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_history_api) { create_account("alice"); generate_block(); - fc::usleep(fc::milliseconds(1000)); + fc::usleep(ES_WAIT_TIME); // f(C, 0, 4, 10) = { 7 } histories = hist_api.get_account_history("alice", operation_history_id_type(0), 4, operation_history_id_type(10)); diff --git a/tests/benchmarks/genesis_allocation.cpp b/tests/performance/genesis_allocation.cpp similarity index 57% rename from tests/benchmarks/genesis_allocation.cpp rename to tests/performance/genesis_allocation.cpp index 63e75db568..f3c06fe4a8 100644 --- a/tests/benchmarks/genesis_allocation.cpp +++ b/tests/performance/genesis_allocation.cpp @@ -22,6 +22,7 @@ * THE SOFTWARE. */ #include +#include #include #include @@ -29,6 +30,10 @@ #include +#include + +extern uint32_t GRAPHENE_TESTING_GENESIS_TIMESTAMP; + using namespace graphene::chain; BOOST_AUTO_TEST_CASE( operation_sanity_check ) @@ -47,7 +52,21 @@ BOOST_AUTO_TEST_CASE( operation_sanity_check ) BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench ) { try { + const auto witness_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); + const auto witness_pub_key = witness_priv_key.get_public_key(); + genesis_state_type genesis_state; + genesis_state.initial_timestamp = fc::time_point_sec( GRAPHENE_TESTING_GENESIS_TIMESTAMP ); + genesis_state.initial_parameters.get_mutable_fees().zero_all_fees(); + genesis_state.initial_active_witnesses = 10; + genesis_state.initial_chain_id = fc::sha256::hash(string("dummy_id")); + for( unsigned int i = 0; i < genesis_state.initial_active_witnesses; ++i ) + { + auto name = "init"+fc::to_string(i); + genesis_state.initial_accounts.emplace_back(name, witness_pub_key, witness_pub_key, true); + genesis_state.initial_committee_candidates.push_back({name}); + genesis_state.initial_witness_candidates.push_back({name, witness_pub_key}); + } #ifdef NDEBUG ilog("Running in release mode."); @@ -59,18 +78,19 @@ BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench ) const int blocks_to_produce = 1000; #endif + const auto account_pub_key = fc::ecc::private_key::regenerate(fc::digest(account_count)).get_public_key(); for( int i = 0; i < account_count; ++i ) genesis_state.initial_accounts.emplace_back("target"+fc::to_string(i), - public_key_type(fc::ecc::private_key::regenerate(fc::digest(i)).get_public_key())); + public_key_type(account_pub_key)); fc::temp_directory data_dir( graphene::utilities::temp_directory_path() ); { database db; - db.open(data_dir.path(), [&]{return genesis_state;}, "test"); + db.open(data_dir.path(), [&genesis_state]{return genesis_state;}, "test"); for( int i = 11; i < account_count + 11; ++i) - BOOST_CHECK(db.get_balance(account_id_type(i), asset_id_type()).amount == GRAPHENE_MAX_SHARE_SUPPLY / account_count); + BOOST_CHECK(db.get_balance(account_id_type(i), asset_id_type()).amount == 0); fc::time_point start_time = fc::time_point::now(); db.close(); @@ -80,46 +100,59 @@ BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench ) database db; fc::time_point start_time = fc::time_point::now(); - db.open(data_dir.path(), [&]{return genesis_state;}, "test"); + db.open(data_dir.path(), [&genesis_state]{return genesis_state;}, "test"); ilog("Opened database in ${t} milliseconds.", ("t", (fc::time_point::now() - start_time).count() / 1000)); for( int i = 11; i < account_count + 11; ++i) - BOOST_CHECK(db.get_balance(account_id_type(i), asset_id_type()).amount == GRAPHENE_MAX_SHARE_SUPPLY / account_count); + BOOST_CHECK(db.get_balance(account_id_type(i), asset_id_type()).amount == 0); int blocks_out = 0; - auto witness_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) ); - auto aw = db.get_global_properties().active_witnesses; - auto b = db.generate_block( db.get_slot_time( 1 ), db.get_scheduled_witness( 1 ), witness_priv_key, ~0 ); + db.generate_block( db.get_slot_time( 1 ), db.get_scheduled_witness( 1 ), witness_priv_key, ~0 ); start_time = fc::time_point::now(); - /* TODO: get this buliding again + transfer_operation top; + top.amount = asset(1); + top.from = account_id_type(); for( int i = 0; i < blocks_to_produce; ++i ) { signed_transaction trx; - trx.operations.emplace_back(transfer_operation(asset(1), account_id_type(i + 11), account_id_type(), asset(1), memo_data())); - db.push_transaction(trx, ~0); - - aw = db.get_global_properties().active_witnesses; - b = db.generate_block( db.get_slot_time( 1 ), db.get_scheduled_witness( 1 ), witness_priv_key, ~0 ); + test::set_expiration( db, trx ); + top.to = account_id_type(i + 11); + trx.operations.push_back( top ); + db.push_transaction(trx, ~graphene::chain::database::skip_transaction_dupe_check); + db.generate_block( db.get_slot_time( 1 ), db.get_scheduled_witness( 1 ), witness_priv_key, + ~graphene::chain::database::skip_transaction_dupe_check ); } - */ ilog("Pushed ${c} blocks (1 op each, no validation) in ${t} milliseconds.", ("c", blocks_out)("t", (fc::time_point::now() - start_time).count() / 1000)); + for( int i = 0; i < blocks_to_produce; ++i ) + BOOST_CHECK_EQUAL( 1, db.get_balance(account_id_type(i + 11), asset_id_type()).amount.value ); + start_time = fc::time_point::now(); db.close(); ilog("Closed database in ${t} milliseconds.", ("t", (fc::time_point::now() - start_time).count() / 1000)); } { database db; + const auto skip = graphene::chain::database::skip_witness_signature | + graphene::chain::database::skip_block_size_check | + graphene::chain::database::skip_merkle_check | + graphene::chain::database::skip_transaction_signatures | + graphene::chain::database::skip_transaction_dupe_check | + graphene::chain::database::skip_tapos_check | + graphene::chain::database::skip_witness_schedule_check; auto start_time = fc::time_point::now(); wlog( "about to start reindex..." ); - db.open(data_dir.path(), [&]{return genesis_state;}, "force_wipe"); + graphene::chain::detail::with_skip_flags( db, skip, [&data_dir,&db,&genesis_state] () { + db.open(data_dir.path(), [&genesis_state]{return genesis_state;}, "force_wipe"); + }); + ilog("Replayed database in ${t} milliseconds.", ("t", (fc::time_point::now() - start_time).count() / 1000)); for( int i = 0; i < blocks_to_produce; ++i ) - BOOST_CHECK(db.get_balance(account_id_type(i + 11), asset_id_type()).amount == GRAPHENE_MAX_SHARE_SUPPLY / account_count - 2); + BOOST_CHECK( db.get_balance(account_id_type(i + 11), asset_id_type()).amount == 1 ); } } catch(fc::exception& e) { diff --git a/tests/tests/liquidity_pool_tests.cpp b/tests/tests/liquidity_pool_tests.cpp new file mode 100644 index 0000000000..1fab599a12 --- /dev/null +++ b/tests/tests/liquidity_pool_tests.cpp @@ -0,0 +1,901 @@ +/* + * Copyright (c) 2020 Abit More, and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "../common/database_fixture.hpp" + +#include +#include +#include + +#include + +using namespace graphene::chain; +using namespace graphene::chain::test; + +BOOST_FIXTURE_TEST_SUITE( liquidity_pool_tests, database_fixture ) + +BOOST_AUTO_TEST_CASE( hardfork_time_test ) +{ + try { + + // Proceeds to a recent hard fork + generate_blocks( HARDFORK_BSIP_86_TIME ); + generate_block(); + set_expiration( db, trx ); + + ACTORS((sam)); + + auto init_amount = 10000000 * GRAPHENE_BLOCKCHAIN_PRECISION; + fund( sam, asset(init_amount) ); + + const asset_object& core = asset_id_type()(db); + const asset_object& usd = create_user_issued_asset( "MYUSD" ); + const asset_object& lpa = create_user_issued_asset( "LPATEST", sam, charge_market_fee ); + + // Before the hard fork, unable to create a liquidity pool or transact against a liquidity pool, + // or do any of them with proposals + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa.id, 0, 0 ), fc::exception ); + + liquidity_pool_id_type tmp_lp_id; + BOOST_CHECK_THROW( delete_liquidity_pool( sam_id, tmp_lp_id ), fc::exception ); + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, tmp_lp_id, core.amount(100), usd.amount(100) ), + fc::exception ); + BOOST_CHECK_THROW( withdraw_from_liquidity_pool( sam_id, tmp_lp_id, lpa.amount(100) ), + fc::exception ); + BOOST_CHECK_THROW( exchange_with_liquidity_pool( sam_id, tmp_lp_id, core.amount(100), usd.amount(100) ), + fc::exception ); + + liquidity_pool_create_operation cop = + make_liquidity_pool_create_op( sam_id, core.id, usd.id, lpa.id, 0, 0 ); + BOOST_CHECK_THROW( propose( cop ), fc::exception ); + + liquidity_pool_delete_operation delop = make_liquidity_pool_delete_op( sam_id, tmp_lp_id ); + BOOST_CHECK_THROW( propose( delop ), fc::exception ); + + liquidity_pool_deposit_operation depop = + make_liquidity_pool_deposit_op( sam_id, tmp_lp_id, core.amount(100), usd.amount(100) ); + BOOST_CHECK_THROW( propose( delop ), fc::exception ); + + liquidity_pool_withdraw_operation wop = + make_liquidity_pool_withdraw_op( sam_id, tmp_lp_id, lpa.amount(100) ); + BOOST_CHECK_THROW( propose( wop ), fc::exception ); + + liquidity_pool_exchange_operation exop = + make_liquidity_pool_exchange_op( sam_id, tmp_lp_id, core.amount(100), usd.amount(100) ); + BOOST_CHECK_THROW( propose( exop ), fc::exception ); + + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( create_delete_proposal_test ) +{ try { + + // Pass the hard fork time + generate_blocks( HARDFORK_LIQUIDITY_POOL_TIME ); + set_expiration( db, trx ); + + ACTORS((sam)(ted)); + + auto init_amount = 10000000 * GRAPHENE_BLOCKCHAIN_PRECISION; + fund( sam, asset(init_amount) ); + fund( ted, asset(init_amount) ); + + const asset_object& core = asset_id_type()(db); + + const asset_object& usd = create_user_issued_asset( "MYUSD" ); + issue_uia( sam, usd.amount(init_amount) ); + issue_uia( ted, usd.amount(init_amount) ); + + const asset_object& lpa = create_user_issued_asset( "LPATEST", sam, charge_market_fee ); + const asset_object& lpa1 = create_user_issued_asset( "LPATESTA", sam, charge_market_fee ); + const asset_object& lpa2 = create_user_issued_asset( "LPATESTB", sam, charge_market_fee ); + const asset_object& lpa3 = create_user_issued_asset( "LPATESTC", sam, charge_market_fee ); + const asset_object& ted_lpa = create_user_issued_asset( "LPATED", ted, charge_market_fee ); + + const asset_object& mpa = create_bitasset( "MPATEST", sam_id ); + const asset_object& pm = create_prediction_market( "PMTEST", sam_id ); + + BOOST_CHECK( !lpa1.is_liquidity_pool_share_asset() ); + + asset_id_type no_asset_id1( pm.id + 100 ); + asset_id_type no_asset_id2( pm.id + 200 ); + BOOST_REQUIRE( !db.find( no_asset_id1 ) ); + BOOST_REQUIRE( !db.find( no_asset_id2 ) ); + + // Able to propose + { + liquidity_pool_create_operation cop = + make_liquidity_pool_create_op( sam_id, core.id, usd.id, lpa.id, 0, 0 ); + propose( cop ); + + liquidity_pool_id_type tmp_lp_id; + + liquidity_pool_delete_operation delop = make_liquidity_pool_delete_op( sam_id, tmp_lp_id ); + propose( delop ); + + liquidity_pool_deposit_operation depop = + make_liquidity_pool_deposit_op( sam_id, tmp_lp_id, core.amount(100), usd.amount(100) ); + propose( depop ); + + liquidity_pool_withdraw_operation wop = + make_liquidity_pool_withdraw_op( sam_id, tmp_lp_id, lpa.amount(100) ); + propose( wop ); + + liquidity_pool_exchange_operation exop = + make_liquidity_pool_exchange_op( sam_id, tmp_lp_id, core.amount(100), usd.amount(100) ); + propose( exop ); + } + + // Able to create liquidity pools with valid data + const liquidity_pool_object& lpo1 = create_liquidity_pool( sam_id, core.id, usd.id, lpa1.id, 0, 0 ); + BOOST_CHECK( lpo1.asset_a == core.id ); + BOOST_CHECK( lpo1.asset_b == usd.id ); + BOOST_CHECK( lpo1.balance_a == 0 ); + BOOST_CHECK( lpo1.balance_b == 0 ); + BOOST_CHECK( lpo1.share_asset == lpa1.id ); + BOOST_CHECK( lpo1.taker_fee_percent == 0 ); + BOOST_CHECK( lpo1.withdrawal_fee_percent == 0 ); + BOOST_CHECK( lpo1.virtual_value == 0 ); + + liquidity_pool_id_type lp_id1 = lpo1.id; + BOOST_CHECK( lpa1.is_liquidity_pool_share_asset() ); + BOOST_CHECK( *lpa1.for_liquidity_pool == lp_id1 ); + + const liquidity_pool_object& lpo2 = create_liquidity_pool( sam_id, core.id, usd.id, lpa2.id, 200, 300 ); + BOOST_CHECK( lpo2.asset_a == core.id ); + BOOST_CHECK( lpo2.asset_b == usd.id ); + BOOST_CHECK( lpo2.balance_a == 0 ); + BOOST_CHECK( lpo2.balance_b == 0 ); + BOOST_CHECK( lpo2.share_asset == lpa2.id ); + BOOST_CHECK( lpo2.taker_fee_percent == 200 ); + BOOST_CHECK( lpo2.withdrawal_fee_percent == 300 ); + BOOST_CHECK( lpo2.virtual_value == 0 ); + + liquidity_pool_id_type lp_id2 = lpo2.id; + BOOST_CHECK( lpa2.is_liquidity_pool_share_asset() ); + BOOST_CHECK( *lpa2.for_liquidity_pool == lp_id2 ); + + const liquidity_pool_object& lpo3 = create_liquidity_pool( sam_id, usd.id, mpa.id, lpa3.id, 50, 50 ); + + BOOST_CHECK( lpo3.asset_a == usd.id ); + BOOST_CHECK( lpo3.asset_b == mpa.id ); + BOOST_CHECK( lpo3.balance_a == 0 ); + BOOST_CHECK( lpo3.balance_b == 0 ); + BOOST_CHECK( lpo3.share_asset == lpa3.id ); + BOOST_CHECK( lpo3.taker_fee_percent == 50 ); + BOOST_CHECK( lpo3.withdrawal_fee_percent == 50 ); + BOOST_CHECK( lpo3.virtual_value == 0 ); + + liquidity_pool_id_type lp_id3 = lpo3.id; + BOOST_CHECK( lpa3.is_liquidity_pool_share_asset() ); + BOOST_CHECK( *lpa3.for_liquidity_pool == lp_id3 ); + + // Unable to create a liquidity pool with invalid data + // the same assets in pool + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, core.id, lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.id, usd.id, lpa.id, 0, 0 ), fc::exception ); + // ID of the first asset is greater + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.id, core.id, lpa.id, 0, 0 ), fc::exception ); + // the share asset is one of the assets in pool + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, usd.id, lpa.id, lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, lpa.id, pm.id, lpa.id, 0, 0 ), fc::exception ); + // percentage too big + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa.id, 10001, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa.id, 0, 10001 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa.id, 10001, 10001 ), fc::exception ); + // asset does not exist + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, no_asset_id1, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, no_asset_id1, lpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, no_asset_id1, no_asset_id2, lpa.id, 0, 0 ), fc::exception ); + // the account does not own the share asset + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, ted_lpa.id, 0, 0 ), fc::exception ); + // the share asset is a MPA or a PM + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, mpa.id, 0, 0 ), fc::exception ); + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, pm.id, 0, 0 ), fc::exception ); + // the share asset is already bound to a liquidity pool + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, usd.id, lpa1.id, 0, 0 ), fc::exception ); + // current supply of the share asset is not zero + BOOST_CHECK_THROW( create_liquidity_pool( sam_id, core.id, lpa.id, usd.id, 0, 0 ), fc::exception ); + + // Unable to issue a liquidity pool share asset + BOOST_CHECK_THROW( issue_uia( sam, lpa1.amount(1) ), fc::exception ); + + // Sam is able to delete an empty pool owned by him + generic_operation_result result = delete_liquidity_pool( sam_id, lpo1.id ); + BOOST_CHECK( !db.find( lp_id1 ) ); + BOOST_CHECK( !lpa1.is_liquidity_pool_share_asset() ); + BOOST_CHECK_EQUAL( result.new_objects.size(), 0u ); + BOOST_REQUIRE_EQUAL( result.updated_objects.size(), 1u ); + BOOST_CHECK( *result.updated_objects.begin() == lpa1.id ); + BOOST_REQUIRE_EQUAL( result.removed_objects.size(), 1u ); + BOOST_CHECK( *result.removed_objects.begin() == lp_id1 ); + + // Other pools are still there + BOOST_CHECK( db.find( lp_id2 ) ); + BOOST_CHECK( db.find( lp_id3 ) ); + + // Ted is not able to delete a pool that does not exist + BOOST_CHECK_THROW( delete_liquidity_pool( ted_id, lp_id1 ), fc::exception ); + // Ted is not able to delete a pool owned by sam + BOOST_CHECK_THROW( delete_liquidity_pool( ted_id, lp_id2 ), fc::exception ); + + // the asset is now a simple asset, able to issue + issue_uia( sam, lpa1.amount(1) ); + + generate_block(); + + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( deposit_withdrawal_test ) +{ try { + + // Pass the hard fork time + generate_blocks( HARDFORK_LIQUIDITY_POOL_TIME ); + set_expiration( db, trx ); + + ACTORS((sam)(ted)); + + additional_asset_options_t eur_options, usd_options; + eur_options.value.taker_fee_percent = 50; // 0.5% taker fee + usd_options.value.taker_fee_percent = 80; // 0.8% taker fee + + const asset_object& eur = create_user_issued_asset( "MYEUR", sam, charge_market_fee, + price(asset(1, asset_id_type(1)), asset(1)), + 4, 20, eur_options ); // 0.2% maker fee + const asset_object& usd = create_user_issued_asset( "MYUSD", ted, charge_market_fee, + price(asset(1, asset_id_type(1)), asset(1)), + 4, 30, usd_options ); // 0.3% maker fee + const asset_object& lpa = create_user_issued_asset( "LPATEST", sam, charge_market_fee ); + + asset_id_type core_id = asset_id_type(); + asset_id_type eur_id = eur.id; + asset_id_type usd_id = usd.id; + asset_id_type lpa_id = lpa.id; + + int64_t init_amount = 10000000 * GRAPHENE_BLOCKCHAIN_PRECISION; + fund( sam, asset(init_amount) ); + fund( ted, asset(init_amount) ); + issue_uia( sam, eur.amount(init_amount) ); + issue_uia( ted, eur.amount(init_amount) ); + issue_uia( sam, usd.amount(init_amount) ); + issue_uia( ted, usd.amount(init_amount) ); + + int64_t expected_balance_sam_eur = init_amount; + int64_t expected_balance_sam_usd = init_amount; + int64_t expected_balance_sam_lpa = 0; + int64_t expected_balance_ted_eur = init_amount; + int64_t expected_balance_ted_usd = init_amount; + int64_t expected_balance_ted_lpa = 0; + + const auto& check_balances = [&]() { + BOOST_CHECK_EQUAL( db.get_balance( sam_id, eur_id ).amount.value, expected_balance_sam_eur ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, usd_id ).amount.value, expected_balance_sam_usd ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, lpa_id ).amount.value, expected_balance_sam_lpa ); + BOOST_CHECK_EQUAL( db.get_balance( ted_id, eur_id ).amount.value, expected_balance_ted_eur ); + BOOST_CHECK_EQUAL( db.get_balance( ted_id, usd_id ).amount.value, expected_balance_ted_usd ); + BOOST_CHECK_EQUAL( db.get_balance( ted_id, lpa_id ).amount.value, expected_balance_ted_lpa ); + }; + + check_balances(); + + int64_t expected_pool_balance_a = 0; + int64_t expected_pool_balance_b = 0; + int64_t expected_lp_supply = 0; + + // create a liquidity pool + const liquidity_pool_object& lpo = create_liquidity_pool( sam_id, eur.id, usd.id, lpa.id, 200, 300 ); + liquidity_pool_id_type lp_id = lpo.id; + + BOOST_CHECK( lpo.asset_a == eur_id ); + BOOST_CHECK( lpo.asset_b == usd_id ); + BOOST_CHECK( lpo.share_asset == lpa_id ); + BOOST_CHECK( lpo.taker_fee_percent == 200 ); + BOOST_CHECK( lpo.withdrawal_fee_percent == 300 ); + + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + BOOST_CHECK( lpa.is_liquidity_pool_share_asset() ); + BOOST_CHECK( *lpa.for_liquidity_pool == lp_id ); + + check_balances(); + + // Unable to deposit to a liquidity pool with invalid data + // non-positive amounts + for( int64_t i = -1; i <= 1; ++i ) + { + for( int64_t j = -1; j <= 1; ++j ) + { + if( i > 0 && j > 0 ) + continue; + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( i, eur_id ), asset( j, usd_id ) ), + fc::exception ); + } + } + // Insufficient balance + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, + asset( init_amount + 1, eur_id ), asset( 1, usd_id ) ), fc::exception ); + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, + asset( 1, eur_id ), asset( init_amount + 1, usd_id ) ), fc::exception ); + // asset ID mismatch + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( 1, core_id ), asset( 1, usd_id ) ), + fc::exception ); + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( 1, eur_id ), asset( 1, lpa_id ) ), + fc::exception ); + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( 1, usd_id ), asset( 1, eur_id ) ), + fc::exception ); + // non-exist pool + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id+1, asset( 1, eur_id ), asset( 1, usd_id ) ), + fc::exception ); + // pool empty but not owner depositting + BOOST_CHECK_THROW( deposit_to_liquidity_pool( ted_id, lp_id, asset( 1, eur_id ), asset( 1, usd_id ) ), + fc::exception ); + + // The owner is able to do the initial deposit + generic_exchange_operation_result result; + result = deposit_to_liquidity_pool( sam_id, lp_id, asset( 1000, eur_id ), asset( 1200, usd_id ) ); + + BOOST_REQUIRE_EQUAL( result.paid.size(), 2u ); + BOOST_CHECK( result.paid.front() == asset( 1000, eur_id ) ); + BOOST_CHECK( result.paid.back() == asset( 1200, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( 1200, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a = 1000; + expected_pool_balance_b = 1200; + expected_lp_supply = 1200; + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_sam_eur -= 1000; + expected_balance_sam_usd -= 1200; + expected_balance_sam_lpa += 1200; + check_balances(); + + // unable to delete a pool that is not empty + BOOST_CHECK_THROW( delete_liquidity_pool( sam_id, lp_id ), fc::exception ); + + // Sam tries to deposit more + result = deposit_to_liquidity_pool( sam_id, lp_id, asset( 200, eur_id ), asset( 120, usd_id ) ); + + BOOST_REQUIRE_EQUAL( result.paid.size(), 2u ); + BOOST_CHECK( result.paid.front() == asset( 100, eur_id ) ); + BOOST_CHECK( result.paid.back() == asset( 120, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( 120, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a += 100; + expected_pool_balance_b += 120; + expected_lp_supply += 120; + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_sam_eur -= 100; + expected_balance_sam_usd -= 120; + expected_balance_sam_lpa += 120; + check_balances(); + + // Unable to reserve all the supply of the LP token + BOOST_CHECK_THROW( reserve_asset( sam_id, asset( expected_balance_sam_lpa, lpa_id ) ), fc::exception ); + + // Ted deposits + result = deposit_to_liquidity_pool( ted_id, lp_id, asset( 12347, eur_id ), asset( 56890, usd_id ) ); + + int64_t new_lp_supply = 14816; // 1320 * 12347 / 1100, round down + int64_t new_a = 12347; + int64_t new_b = 14816; + + BOOST_REQUIRE_EQUAL( result.paid.size(), 2u ); + BOOST_CHECK( result.paid.front() == asset( new_a, eur_id ) ); + BOOST_CHECK( result.paid.back() == asset( new_b, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( new_lp_supply, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a += new_a; // 1100 + 12347 = 13447 + expected_pool_balance_b += new_b; // 1320 + 14816 = 16136 + expected_lp_supply += new_lp_supply; // 16136 + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_ted_eur -= new_a; + expected_balance_ted_usd -= new_b; + expected_balance_ted_lpa += new_lp_supply; + check_balances(); + + // Unable to withdraw with invalid data + // non-positive amount + BOOST_CHECK_THROW( withdraw_from_liquidity_pool( ted_id, lp_id, asset( -1, lpa_id ) ), + fc::exception ); + BOOST_CHECK_THROW( withdraw_from_liquidity_pool( ted_id, lp_id, asset( 0, lpa_id ) ), + fc::exception ); + // insufficient balance + BOOST_CHECK_THROW( withdraw_from_liquidity_pool( ted_id, lp_id, asset( expected_balance_ted_lpa + 1, lpa_id ) ), + fc::exception ); + // asset ID mismatch + BOOST_CHECK_THROW( withdraw_from_liquidity_pool( ted_id, lp_id, asset( 10, core_id ) ), + fc::exception ); + BOOST_CHECK_THROW( withdraw_from_liquidity_pool( ted_id, lp_id, asset( 10, usd_id ) ), + fc::exception ); + BOOST_CHECK_THROW( withdraw_from_liquidity_pool( ted_id, lp_id, asset( 10, eur_id ) ), + fc::exception ); + // non-exist pool + BOOST_CHECK_THROW( withdraw_from_liquidity_pool( ted_id, lp_id+1, asset( 10, usd_id ) ), + fc::exception ); + + // Ted reserve some LP token + reserve_asset( ted_id, asset( 14810, lpa_id ) ); + + expected_lp_supply -= 14810; // 16136 - 14810 = 1326 + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_ted_lpa -= 14810; // 6 + check_balances(); + + // Ted fails to deposit with too small amounts + BOOST_CHECK_THROW( deposit_to_liquidity_pool( ted_id, lp_id, asset( 8, eur_id ), asset( 8, usd_id ) ), + fc::exception ); + + // Ted deposits again + result = deposit_to_liquidity_pool( ted_id, lp_id, asset( 12347, eur_id ), asset( 56890, usd_id ) ); + + new_lp_supply = 1217; // 1326 * 12347 / 13447, round down + new_a = 12342; // 1217 * 13447 / 1326, round up + new_b = 14810; // 1217 * 16136 / 1326, round up + + BOOST_REQUIRE_EQUAL( result.paid.size(), 2u ); + BOOST_CHECK( result.paid.front() == asset( new_a, eur_id ) ); + BOOST_CHECK( result.paid.back() == asset( new_b, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( new_lp_supply, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a += new_a; // 13447 + 12342 = 25789 + expected_pool_balance_b += new_b; // 16136 + 14810 = 30946 + expected_lp_supply += new_lp_supply; // 1326 + 1217 = 2543 + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_ted_eur -= new_a; + expected_balance_ted_usd -= new_b; + expected_balance_ted_lpa += new_lp_supply; + check_balances(); + + // Ted withdraws some LP token + result = withdraw_from_liquidity_pool( ted_id, lp_id, asset( 7, lpa_id ) ); + + new_lp_supply = -7; + new_a = -68; // - (7 * 25789 / 2543, round down, = 70, deduct withdrawal fee 70 * 3%, round down, = 2) + new_b = -83; // - (7 * 30946 / 2543, round down, = 85, deduct withdrawal fee 85 * 3%, round down, = 2) + + BOOST_REQUIRE_EQUAL( result.paid.size(), 1u ); + BOOST_CHECK( result.paid.front() == asset( -new_lp_supply, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 2u ); + BOOST_CHECK( result.received.front() == asset( -new_a, eur_id ) ); + BOOST_CHECK( result.received.back() == asset( -new_b, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a += new_a; // 25789 - 68 = 25721 + expected_pool_balance_b += new_b; // 30946 - 83 = 30863 + expected_lp_supply += new_lp_supply; // 2543 - 7 = 2536 + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_ted_eur -= new_a; + expected_balance_ted_usd -= new_b; + expected_balance_ted_lpa += new_lp_supply; + check_balances(); + + // Ted reserve the rest LP token + reserve_asset( ted_id, asset( expected_balance_ted_lpa, lpa_id ) ); + + expected_lp_supply -= expected_balance_ted_lpa; // 1320 + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_ted_lpa = 0; + check_balances(); + + // Sam withdraws all + result = withdraw_from_liquidity_pool( sam_id, lp_id, asset( 1320, lpa_id ) ); + + new_lp_supply = -1320; + new_a = -25721; + new_b = -30863; + + BOOST_REQUIRE_EQUAL( result.paid.size(), 1u ); + BOOST_CHECK( result.paid.front() == asset( -new_lp_supply, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 2u ); + BOOST_CHECK( result.received.front() == asset( -new_a, eur_id ) ); + BOOST_CHECK( result.received.back() == asset( -new_b, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a = 0; + expected_pool_balance_b = 0; + expected_lp_supply = 0; + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_sam_eur -= new_a; + expected_balance_sam_usd -= new_b; + expected_balance_sam_lpa += new_lp_supply; // 0 + check_balances(); + + // prepare for asset update + asset_update_operation auop; + auop.issuer = sam_id; + auop.asset_to_update = lpa_id; + auop.new_options = lpa_id(db).options; + + // set max supply to a smaller number + auop.new_options.max_supply = 2000; + trx.operations.clear(); + trx.operations.push_back( auop ); + PUSH_TX(db, trx, ~0); + + BOOST_CHECK_EQUAL( lpa_id(db).options.max_supply.value, 2000 ); + + // Unable to do initial deposit if to create more than the max supply + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( 2001, eur_id ), asset( 100, usd_id ) ), + fc::exception ); + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( 100, eur_id ), asset( 2001, usd_id ) ), + fc::exception ); + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( 2001, eur_id ), asset( 2001, usd_id ) ), + fc::exception ); + + // Able to deposit less + result = deposit_to_liquidity_pool( sam_id, lp_id, asset( 1000, eur_id ), asset( 1200, usd_id ) ); + + BOOST_REQUIRE_EQUAL( result.paid.size(), 2u ); + BOOST_CHECK( result.paid.front() == asset( 1000, eur_id ) ); + BOOST_CHECK( result.paid.back() == asset( 1200, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( 1200, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a = 1000; + expected_pool_balance_b = 1200; + expected_lp_supply = 1200; + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_sam_eur -= 1000; + expected_balance_sam_usd -= 1200; + expected_balance_sam_lpa += 1200; + check_balances(); + + // Try to deposit more to create more than max supply, will be capped at max supply + result = deposit_to_liquidity_pool( sam_id, lp_id, asset( 1000, eur_id ), asset( 1200, usd_id ) ); + + new_lp_supply = 800; // 2000 - 1200 + new_a = 667; // 800 * 1000 / 1200, round up + new_b = 800; + + BOOST_REQUIRE_EQUAL( result.paid.size(), 2u ); + BOOST_CHECK( result.paid.front() == asset( new_a, eur_id ) ); + BOOST_CHECK( result.paid.back() == asset( new_b, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( new_lp_supply, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a += new_a; + expected_pool_balance_b += new_b; + expected_lp_supply = 2000; + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_sam_eur -= new_a; + expected_balance_sam_usd -= new_b; + expected_balance_sam_lpa += new_lp_supply; + check_balances(); + + // Unable to deposit more + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( 2, eur_id ), asset( 2, usd_id ) ), + fc::exception ); + + // set max supply to a bigger number + auop.new_options.max_supply = 3000; + trx.operations.clear(); + trx.operations.push_back( auop ); + PUSH_TX(db, trx, ~0); + + BOOST_CHECK_EQUAL( lpa_id(db).options.max_supply.value, 3000 ); + + // Able to deposit more + deposit_to_liquidity_pool( sam_id, lp_id, asset( 2, eur_id ), asset( 2, usd_id ) ); + + // update flag to disable creation of new supply + auop.new_options.flags |= disable_new_supply; + trx.operations.clear(); + trx.operations.push_back( auop ); + PUSH_TX(db, trx, ~0); + + BOOST_CHECK( !lpa_id(db).can_create_new_supply() ); + + // Unable to deposit more + BOOST_CHECK_THROW( deposit_to_liquidity_pool( sam_id, lp_id, asset( 2, eur_id ), asset( 2, usd_id ) ), + fc::exception ); + + generate_block(); + + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_CASE( exchange_test ) +{ try { + + // Pass the hard fork time + generate_blocks( HARDFORK_LIQUIDITY_POOL_TIME ); + set_expiration( db, trx ); + + ACTORS((sam)(ted)); + + additional_asset_options_t eur_options, usd_options; + eur_options.value.taker_fee_percent = 50; // 0.5% taker fee + usd_options.value.taker_fee_percent = 80; // 0.8% taker fee + + const asset_object& eur = create_user_issued_asset( "MYEUR", sam, charge_market_fee, + price(asset(1, asset_id_type(1)), asset(1)), + 4, 20, eur_options ); // 0.2% maker fee + const asset_object& usd = create_user_issued_asset( "MYUSD", ted, charge_market_fee, + price(asset(1, asset_id_type(1)), asset(1)), + 4, 30, usd_options ); // 0.3% maker fee + const asset_object& lpa = create_user_issued_asset( "LPATEST", sam, charge_market_fee ); + + asset_id_type core_id = asset_id_type(); + asset_id_type eur_id = eur.id; + asset_id_type usd_id = usd.id; + asset_id_type lpa_id = lpa.id; + + int64_t init_amount = 10000000 * GRAPHENE_BLOCKCHAIN_PRECISION; + fund( sam, asset(init_amount) ); + fund( ted, asset(init_amount) ); + issue_uia( sam, eur.amount(init_amount) ); + issue_uia( ted, eur.amount(init_amount) ); + issue_uia( sam, usd.amount(init_amount) ); + issue_uia( ted, usd.amount(init_amount) ); + + int64_t expected_balance_sam_eur = init_amount; + int64_t expected_balance_sam_usd = init_amount; + int64_t expected_balance_sam_lpa = 0; + int64_t expected_balance_ted_eur = init_amount; + int64_t expected_balance_ted_usd = init_amount; + int64_t expected_balance_ted_lpa = 0; + + int64_t expected_accumulated_fees_eur = 0; + int64_t expected_accumulated_fees_usd = 0; + + const auto& check_balances = [&]() { + BOOST_CHECK_EQUAL( db.get_balance( sam_id, eur_id ).amount.value, expected_balance_sam_eur ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, usd_id ).amount.value, expected_balance_sam_usd ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, lpa_id ).amount.value, expected_balance_sam_lpa ); + BOOST_CHECK_EQUAL( db.get_balance( ted_id, eur_id ).amount.value, expected_balance_ted_eur ); + BOOST_CHECK_EQUAL( db.get_balance( ted_id, usd_id ).amount.value, expected_balance_ted_usd ); + BOOST_CHECK_EQUAL( db.get_balance( ted_id, lpa_id ).amount.value, expected_balance_ted_lpa ); + }; + + check_balances(); + + int64_t expected_pool_balance_a = 0; + int64_t expected_pool_balance_b = 0; + int64_t expected_lp_supply = 0; + + // create a liquidity pool + const liquidity_pool_object& lpo = create_liquidity_pool( sam_id, eur.id, usd.id, lpa.id, 200, 300 ); + liquidity_pool_id_type lp_id = lpo.id; + + BOOST_CHECK( lpo.asset_a == eur_id ); + BOOST_CHECK( lpo.asset_b == usd_id ); + BOOST_CHECK( lpo.share_asset == lpa_id ); + BOOST_CHECK( lpo.taker_fee_percent == 200 ); + BOOST_CHECK( lpo.withdrawal_fee_percent == 300 ); + + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + BOOST_CHECK( lpa.is_liquidity_pool_share_asset() ); + BOOST_CHECK( *lpa.for_liquidity_pool == lp_id ); + + check_balances(); + + // Unable to exchange if the pool is not initialized + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, asset( 100, eur_id ), asset( 1, usd_id ) ), + fc::exception ); + + // The owner do the initial deposit + generic_exchange_operation_result result; + result = deposit_to_liquidity_pool( sam_id, lp_id, asset( 1000, eur_id ), asset( 1200, usd_id ) ); + + BOOST_REQUIRE_EQUAL( result.paid.size(), 2u ); + BOOST_CHECK( result.paid.front() == asset( 1000, eur_id ) ); + BOOST_CHECK( result.paid.back() == asset( 1200, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( 1200, lpa_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 0u ); + + expected_pool_balance_a = 1000; + expected_pool_balance_b = 1200; + expected_lp_supply = 1200; + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_balance_sam_eur -= 1000; + expected_balance_sam_usd -= 1200; + expected_balance_sam_lpa += 1200; + check_balances(); + + // Unable to exchange if data is invalid + // non-positive amounts + for( int64_t i = -1; i <= 1; ++i ) + { + for( int64_t j = -1; j <= 1; ++j ) + { + if( i > 0 && j > 0 ) + continue; + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, asset( i, eur_id ), asset( j, usd_id ) ), + fc::exception ); + } + } + // Insufficient balance + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, + asset( init_amount + 1, eur_id ), asset( 1, usd_id ) ), fc::exception ); + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, + asset( init_amount + 1, usd_id ), asset( 1, eur_id ) ), fc::exception ); + // asset ID mismatch + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, asset( 100, core_id ), asset( 1, usd_id ) ), + fc::exception ); + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, asset( 100, eur_id ), asset( 1, lpa_id ) ), + fc::exception ); + // non-exist pool + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id+1, asset( 100, eur_id ), asset( 1, usd_id ) ), + fc::exception ); + + + // trying to buy an amount that is equal to or more than the balance in the pool + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, asset( 9000, eur_id ), asset( 1200, usd_id ) ), + fc::exception ); + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, asset( 9000, usd_id ), asset( 1000, eur_id ) ), + fc::exception ); + + // Calculates if Ted sells 1000 EUR to the pool + int64_t maker_fee = 2; // 1000 * 0.2%, eur + int64_t delta_a = 998; // 1000 - 2 + // tmp_delta = 1200 - round_up( 1000 * 1200 / (1000+998) ) = 1200 - 601 = 599 + int64_t delta_b = -588; // - ( 599 - round_down(599 * 2%) ) = - ( 599 - 11 ) = -588 + int64_t taker_fee = 4; // 588 * 0.8%, usd + int64_t ted_receives = 584; // 588 - 4 + + // Ted fails to exchange if asks for more + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, asset( 1000, eur_id ), asset( 585, usd_id ) ), + fc::exception ); + + // Ted exchanges with the pool + result = exchange_with_liquidity_pool( ted_id, lp_id, asset( 1000, eur_id ), asset( 584, usd_id ) ); + + BOOST_REQUIRE_EQUAL( result.paid.size(), 1u ); + BOOST_CHECK( result.paid.front() == asset( 1000, eur_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( ted_receives, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 2u ); + BOOST_CHECK( result.fees.front() == asset( maker_fee, eur_id ) ); + BOOST_CHECK( result.fees.back() == asset( taker_fee, usd_id ) ); + + expected_pool_balance_a += delta_a; // 1000 + 998 = 1998 + expected_pool_balance_b += delta_b; // 1200 - 588 = 612 + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_accumulated_fees_eur += maker_fee; + expected_accumulated_fees_usd += taker_fee; + BOOST_CHECK_EQUAL( eur.dynamic_data(db).accumulated_fees.value, expected_accumulated_fees_eur ); + BOOST_CHECK_EQUAL( usd.dynamic_data(db).accumulated_fees.value, expected_accumulated_fees_usd ); + + expected_balance_ted_eur -= 1000; + expected_balance_ted_usd += ted_receives; + check_balances(); + + // Calculates if Ted sells 1000 USD to the pool + maker_fee = 3; // 1000 * 0.3%, usd + delta_b = 997; // 1000 - 3 + // tmp_delta = 1998 - round_up( 1998 * 612 / (612+997) ) = 1998 - 760 = 1238 + delta_a = -1214; // - ( 1238 - round_down(1238 * 2%) ) = - ( 1238 - 24 ) = -1214 + taker_fee = 6; // 1214 * 0.5%, eur + ted_receives = 1208; // 1214 - 6 + + // Ted fails to exchange if asks for more + BOOST_CHECK_THROW( exchange_with_liquidity_pool( ted_id, lp_id, asset( 1000, usd_id ), asset( 1209, eur_id ) ), + fc::exception ); + + // Ted exchanges with the pool + result = exchange_with_liquidity_pool( ted_id, lp_id, asset( 1000, usd_id ), asset( 600, eur_id ) ); + + BOOST_REQUIRE_EQUAL( result.paid.size(), 1u ); + BOOST_CHECK( result.paid.front() == asset( 1000, usd_id ) ); + BOOST_REQUIRE_EQUAL( result.received.size(), 1u ); + BOOST_CHECK( result.received.front() == asset( ted_receives, eur_id ) ); + BOOST_REQUIRE_EQUAL( result.fees.size(), 2u ); + BOOST_CHECK( result.fees.front() == asset( maker_fee, usd_id ) ); + BOOST_CHECK( result.fees.back() == asset( taker_fee, eur_id ) ); + + expected_pool_balance_a += delta_a; // 1998 - 1214 = 784 + expected_pool_balance_b += delta_b; // 612 + 997 = 1609 + BOOST_CHECK_EQUAL( lpo.balance_a.value, expected_pool_balance_a); + BOOST_CHECK_EQUAL( lpo.balance_b.value, expected_pool_balance_b); + BOOST_CHECK( lpo.virtual_value == fc::uint128_t(expected_pool_balance_a) * expected_pool_balance_b ); + BOOST_CHECK_EQUAL( lpa.dynamic_data(db).current_supply.value, expected_lp_supply ); + + expected_accumulated_fees_eur += taker_fee; + expected_accumulated_fees_usd += maker_fee; + BOOST_CHECK_EQUAL( eur.dynamic_data(db).accumulated_fees.value, expected_accumulated_fees_eur ); + BOOST_CHECK_EQUAL( usd.dynamic_data(db).accumulated_fees.value, expected_accumulated_fees_usd ); + + expected_balance_ted_eur += ted_receives; + expected_balance_ted_usd -= 1000; + check_balances(); + + // Generates a block + generate_block(); + + } catch (fc::exception& e) { + edump((e.to_detail_string())); + throw; + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/main.cpp b/tests/tests/main.cpp index 405e7c1059..0c3e9f7644 100644 --- a/tests/tests/main.cpp +++ b/tests/tests/main.cpp @@ -24,12 +24,14 @@ #include #include #include +#include extern uint32_t GRAPHENE_TESTING_GENESIS_TIMESTAMP; boost::unit_test::test_suite* init_unit_test_suite(int argc, char* argv[]) { - std::srand(time(NULL)); - std::cout << "Random number generator seeded to " << time(NULL) << std::endl; + const auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count(); + std::srand( seed ); + std::cout << "Random number generator seeded to " << seed << std::endl; const char* genesis_timestamp_str = getenv("GRAPHENE_TESTING_GENESIS_TIMESTAMP"); if( genesis_timestamp_str != nullptr ) { diff --git a/tests/tests/pob_tests.cpp b/tests/tests/pob_tests.cpp index e0048d843f..cf277bfae4 100644 --- a/tests/tests/pob_tests.cpp +++ b/tests/tests/pob_tests.cpp @@ -1511,11 +1511,12 @@ BOOST_AUTO_TEST_CASE( withdraw_lock_180_ticket ) set_expiration( db, trx ); // no change + bool has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == liquid ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == withdrawing ); BOOST_CHECK( tick_1_id(db).amount == asset(100) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 100 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 100 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); // 1 day passed @@ -1569,11 +1570,12 @@ BOOST_AUTO_TEST_CASE( withdraw_lock_360_ticket ) set_expiration( db, trx ); // the ticket should have downgraded + bool has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == liquid ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == withdrawing ); BOOST_CHECK( tick_1_id(db).amount == asset(100) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 100 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 100 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); // 179 days passed @@ -1581,11 +1583,12 @@ BOOST_AUTO_TEST_CASE( withdraw_lock_360_ticket ) set_expiration( db, trx ); // no change + has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == liquid ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == withdrawing ); BOOST_CHECK( tick_1_id(db).amount == asset(100) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 100 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 100 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); // 1 day passed @@ -1683,11 +1686,12 @@ BOOST_AUTO_TEST_CASE( withdraw_lock_720_ticket ) set_expiration( db, trx ); // the ticket should have downgraded + bool has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == liquid ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == withdrawing ); BOOST_CHECK( tick_1_id(db).amount == asset(100) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 100 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 100 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); // unable to update ticket if not to change target type @@ -1700,11 +1704,12 @@ BOOST_AUTO_TEST_CASE( withdraw_lock_720_ticket ) set_expiration( db, trx ); // no change + has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == liquid ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == withdrawing ); BOOST_CHECK( tick_1_id(db).amount == asset(100) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 100 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 100 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); // unable to update ticket if not to change target type @@ -2753,6 +2758,7 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); // downgrade again + bool has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); result = update_ticket( tick_1_id(db), liquid, {} ); BOOST_CHECK_EQUAL( result.new_objects.size(), 0u ); @@ -2760,7 +2766,7 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == withdrawing ); BOOST_CHECK( tick_1_id(db).amount == asset(90) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 90 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 90 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); BOOST_CHECK( tick_1_id(db).next_type_downgrade_time != time_point_sec::maximum() ); @@ -2774,11 +2780,12 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) result = update_ticket( tick_1_id(db), lock_720_days, {} ); BOOST_CHECK_EQUAL( result.new_objects.size(), 0u ); + has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == lock_720_days ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == charging ); BOOST_CHECK( tick_1_id(db).amount == asset(90) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 90 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 90 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); BOOST_CHECK( tick_1_id(db).next_type_downgrade_time == down_time ); @@ -2787,11 +2794,12 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) result = update_ticket( tick_1_id(db), liquid, {} ); BOOST_CHECK_EQUAL( result.new_objects.size(), 0u ); + has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == liquid ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == withdrawing ); BOOST_CHECK( tick_1_id(db).amount == asset(90) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 90 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 90 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); BOOST_CHECK( tick_1_id(db).next_type_downgrade_time == down_time ); @@ -2804,11 +2812,12 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) result = update_ticket( tick_1_id(db), lock_720_days, {} ); BOOST_CHECK_EQUAL( result.new_objects.size(), 0u ); + has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == lock_720_days ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == charging ); BOOST_CHECK( tick_1_id(db).amount == asset(90) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 90 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 90 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); BOOST_CHECK( tick_1_id(db).next_type_downgrade_time == down_time ); @@ -2818,11 +2827,12 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) set_expiration( db, trx ); // no change + has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == lock_720_days ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == charging ); BOOST_CHECK( tick_1_id(db).amount == asset(90) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 90 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 90 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); BOOST_CHECK( tick_1_id(db).next_type_downgrade_time == down_time ); @@ -2842,11 +2852,12 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) BOOST_CHECK( !db.find( tick_3_id ) ); // check the remainder + has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == lock_720_days ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == charging ); BOOST_CHECK( tick_1_id(db).amount == asset(75) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 75 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 75 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance + 15 ); BOOST_CHECK( tick_1_id(db).next_type_downgrade_time == down_time ); @@ -2857,11 +2868,12 @@ BOOST_AUTO_TEST_CASE( update_from_withdrawing_to_charging_then_withdraw_again ) generate_block(); // no change + has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); BOOST_CHECK( tick_1_id(db).target_type == lock_720_days ); BOOST_CHECK( tick_1_id(db).current_type == liquid ); BOOST_CHECK( tick_1_id(db).status == charging ); BOOST_CHECK( tick_1_id(db).amount == asset(75) ); - BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 75 ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, has_hf_2262 ? 0 : 75 ); BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance + 15 ); BOOST_CHECK( tick_1_id(db).next_type_downgrade_time == down_time ); @@ -3504,4 +3516,228 @@ BOOST_AUTO_TEST_CASE( multiple_tickets ) } } +BOOST_AUTO_TEST_CASE( hf2262_test ) +{ try { + + // Proceed to a time near the core-2262 hard fork. + // Note: only works if the maintenance interval is less than 14 days + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks( HARDFORK_CORE_2262_TIME - mi ); + set_expiration( db, trx ); + + ACTORS((sam)); + + auto init_amount = 10000000 * GRAPHENE_BLOCKCHAIN_PRECISION; + fund( sam, asset(init_amount) ); + + int64_t sam_balance = init_amount; + + // create a ticket + const ticket_object& tick_1 = create_ticket( sam_id, lock_180_days, asset(100) ); + ticket_id_type tick_1_id = tick_1.id; + + BOOST_CHECK( tick_1_id(db).account == sam_id ); + BOOST_CHECK( tick_1_id(db).target_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).current_type == liquid ); + BOOST_CHECK( tick_1_id(db).status == charging ); + BOOST_CHECK( tick_1_id(db).amount == asset(100) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 100 ); + sam_balance -= 100; + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + auto create_time = db.head_block_time(); + + // activate hf2262 + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + generate_block(); + + BOOST_REQUIRE( db.head_block_time() < create_time + fc::days(14) ); + + BOOST_CHECK( tick_1_id(db).account == sam_id ); + BOOST_CHECK( tick_1_id(db).target_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).current_type == liquid ); + BOOST_CHECK( tick_1_id(db).status == charging ); + BOOST_CHECK( tick_1_id(db).amount == asset(100) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 0 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + // 14 days passed + generate_blocks( create_time + fc::days(14) ); + set_expiration( db, trx ); + + // no change + BOOST_CHECK( tick_1_id(db).account == sam_id ); + BOOST_CHECK( tick_1_id(db).target_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).current_type == liquid ); + BOOST_CHECK( tick_1_id(db).status == charging ); + BOOST_CHECK( tick_1_id(db).amount == asset(100) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 0 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + // unable to update ticket if not to change target type + BOOST_CHECK_THROW( update_ticket( tick_1_id(db), lock_180_days, {} ), fc::exception ); + BOOST_CHECK_THROW( update_ticket( tick_1_id(db), lock_180_days, asset(1) ), fc::exception ); + BOOST_CHECK_THROW( update_ticket( tick_1_id(db), lock_180_days, asset(100) ), fc::exception ); + + // split ticket 1, cancel some + auto result = update_ticket( tick_1_id(db), liquid, asset(6) ); + + BOOST_CHECK( tick_1_id(db).account == sam_id ); + BOOST_CHECK( tick_1_id(db).target_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).current_type == liquid ); + BOOST_CHECK( tick_1_id(db).status == charging ); + BOOST_CHECK( tick_1_id(db).amount == asset(94) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 0 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); + + ticket_id_type tick_2_id = *result.new_objects.begin(); + BOOST_CHECK( tick_2_id(db).target_type == liquid ); + BOOST_CHECK( tick_2_id(db).current_type == liquid ); + BOOST_CHECK( tick_2_id(db).status == withdrawing ); + BOOST_CHECK( tick_2_id(db).amount == asset(6) ); + BOOST_CHECK_EQUAL( tick_2_id(db).value.value, 0 ); + + // 1 day passed + generate_blocks( db.head_block_time() + fc::days(1) ); + set_expiration( db, trx ); + + // ticket should be stable now + BOOST_CHECK( tick_1_id(db).account == sam_id ); + BOOST_CHECK( tick_1_id(db).target_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).current_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).status == stable ); + BOOST_CHECK( tick_1_id(db).next_auto_update_time == time_point_sec::maximum() ); + BOOST_CHECK( tick_1_id(db).next_type_downgrade_time == time_point_sec::maximum() ); + BOOST_CHECK( tick_1_id(db).amount == asset(94) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 94 * 2 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + // split ticket 1, downgrade some + result = update_ticket( tick_1_id(db), liquid, asset(10) ); + + BOOST_CHECK( tick_1_id(db).account == sam_id ); + BOOST_CHECK( tick_1_id(db).target_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).current_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).status == stable ); + BOOST_CHECK( tick_1_id(db).next_auto_update_time == time_point_sec::maximum() ); + BOOST_CHECK( tick_1_id(db).next_type_downgrade_time == time_point_sec::maximum() ); + BOOST_CHECK( tick_1_id(db).amount == asset(84) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 84 * 2 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + BOOST_REQUIRE_EQUAL( result.new_objects.size(), 1u ); + + ticket_id_type tick_3_id = *result.new_objects.begin(); + BOOST_CHECK( tick_3_id(db).target_type == liquid ); + BOOST_CHECK( tick_3_id(db).current_type == liquid ); + BOOST_CHECK( tick_3_id(db).status == withdrawing ); + BOOST_CHECK( tick_3_id(db).amount == asset(10) ); + BOOST_CHECK_EQUAL( tick_3_id(db).value.value, 0 ); + + // update ticket 1, downgrade all + update_ticket( tick_1_id(db), liquid, {} ); + + // check new data + BOOST_CHECK( tick_1_id(db).account == sam_id ); + BOOST_CHECK( tick_1_id(db).target_type == liquid ); + BOOST_CHECK( tick_1_id(db).current_type == liquid ); + BOOST_CHECK( tick_1_id(db).status == withdrawing ); + BOOST_CHECK( tick_1_id(db).amount == asset(84) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 0 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + // create a new ticket + const ticket_object& tick_4 = create_ticket( sam_id, lock_360_days, asset(200) ); + ticket_id_type tick_4_id = tick_4.id; + + BOOST_CHECK( tick_4_id(db).account == sam_id ); + BOOST_CHECK( tick_4_id(db).target_type == lock_360_days ); + BOOST_CHECK( tick_4_id(db).current_type == liquid ); + BOOST_CHECK( tick_4_id(db).status == charging ); + BOOST_CHECK( tick_4_id(db).amount == asset(200) ); + BOOST_CHECK_EQUAL( tick_4_id(db).value.value, 0 ); + sam_balance -= 200; + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE( hf2262_auto_update_test ) +{ try { + + INVOKE( one_lock_360_ticket ); + + // activate hf2262 + auto mi = db.get_global_properties().parameters.maintenance_interval; + generate_blocks( HARDFORK_CORE_2262_TIME - mi ); + generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + + GET_ACTOR( sam ); + + int64_t sam_balance = db.get_balance( sam_id, asset_id_type() ).amount.value; + + ticket_id_type tick_1_id; // default value + + // withdraw the ticket + auto result = update_ticket( tick_1_id(db), liquid, {} ); + BOOST_CHECK_EQUAL( result.new_objects.size(), 0u ); + + BOOST_CHECK( tick_1_id(db).target_type == liquid ); + BOOST_CHECK( tick_1_id(db).current_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).status == withdrawing ); + BOOST_CHECK( tick_1_id(db).amount == asset(100) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 100 * 2 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + // 179 days passed + generate_blocks( db.head_block_time() + fc::days(179) ); + set_expiration( db, trx ); + + // no change + BOOST_CHECK( tick_1_id(db).target_type == liquid ); + BOOST_CHECK( tick_1_id(db).current_type == lock_180_days ); + BOOST_CHECK( tick_1_id(db).status == withdrawing ); + BOOST_CHECK( tick_1_id(db).amount == asset(100) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 100 * 2 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + // 1 day passed + generate_blocks( db.head_block_time() + fc::days(1) ); + set_expiration( db, trx ); + + // the ticket should have downgraded + BOOST_CHECK( tick_1_id(db).target_type == liquid ); + BOOST_CHECK( tick_1_id(db).current_type == liquid ); + BOOST_CHECK( tick_1_id(db).status == withdrawing ); + BOOST_CHECK( tick_1_id(db).amount == asset(100) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 0 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + // 179 days passed + generate_blocks( db.head_block_time() + fc::days(179) ); + set_expiration( db, trx ); + + // no change + BOOST_CHECK( tick_1_id(db).target_type == liquid ); + BOOST_CHECK( tick_1_id(db).current_type == liquid ); + BOOST_CHECK( tick_1_id(db).status == withdrawing ); + BOOST_CHECK( tick_1_id(db).amount == asset(100) ); + BOOST_CHECK_EQUAL( tick_1_id(db).value.value, 0 ); + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance ); + + // 1 day passed + generate_blocks( db.head_block_time() + fc::days(1) ); + set_expiration( db, trx ); + + // the ticket should be freed + BOOST_CHECK( !db.find( tick_1_id ) ); + + BOOST_CHECK_EQUAL( db.get_balance( sam_id, asset_id_type() ).amount.value, sam_balance + 100 ); + + } FC_LOG_AND_RETHROW() +} + + BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/tests/voting_tests.cpp b/tests/tests/voting_tests.cpp index fd3cb6d95e..8a0630dc00 100644 --- a/tests/tests/voting_tests.cpp +++ b/tests/tests/voting_tests.cpp @@ -817,9 +817,10 @@ BOOST_AUTO_TEST_CASE( witness_votes_calculation ) generate_blocks( tick_start_time + fc::days(60+180) ); generate_blocks( db.get_dynamic_global_properties().next_maintenance_time ); + bool has_hf_2262 = ( HARDFORK_CORE_2262_PASSED( db.get_dynamic_global_properties().next_maintenance_time ) ); // check votes base4 = 40 * 6 + (114 - 40) - 40; - expected_votes[4] = base4 - base4 * 6 / 8; // 585 days + expected_votes[4] = ( has_hf_2262 ? 0 : (base4 - base4 * 6 / 8) ); // 585 days base7 = 20 * 8 * 6 + (30 - 20) * 6 + (117 - 30 - 20) - (30 - 20); expected_votes[7] = 0; // 720 days @@ -829,8 +830,11 @@ BOOST_AUTO_TEST_CASE( witness_votes_calculation ) } expected_active_witnesses = original_wits; - expected_active_witnesses.erase( *expected_active_witnesses.rbegin() ); - expected_active_witnesses.insert( wit_ids[4] ); + if( !has_hf_2262 ) + { + expected_active_witnesses.erase( *expected_active_witnesses.rbegin() ); + expected_active_witnesses.insert( wit_ids[4] ); + } BOOST_CHECK( db.get_global_properties().active_witnesses == expected_active_witnesses ); } FC_LOG_AND_RETHROW()