diff --git a/.github/workflows/build-and-test.mac.yml b/.github/workflows/build-and-test.mac.yml new file mode 100644 index 0000000000..a08db11241 --- /dev/null +++ b/.github/workflows/build-and-test.mac.yml @@ -0,0 +1,57 @@ +name: macOS +on: [ push, pull_request ] +env: + CCACHE_COMPRESS: exists means true + CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime,time_macros +jobs: + build-osx: + name: Build and test in macOS + strategy: + matrix: + os: [macos-10.15, macos-11.0] + runs-on: ${{ matrix.os }} + steps: + - name: Install dependencies + run: | + brew install autoconf automake libtool + brew install ccache + brew install parallel + brew install bitshares/boost/boost@1.69 + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Configure + run: | + mkdir -p _build + pushd _build + cmake -D CMAKE_BUILD_TYPE=Release \ + -D CMAKE_C_COMPILER_LAUNCHER=ccache \ + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -D BOOST_ROOT=/usr/local/opt/boost@1.69 \ + -D OPENSSL_ROOT_DIR=/usr/local/opt/openssl \ + .. + - name: Load Cache + uses: actions/cache@v1 + with: + path: ccache + key: ccache-osx-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ccache-osx-${{ github.ref }}- + ccache-osx- + - name: Build + run: | + export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" + mkdir -p "$CCACHE_DIR" + make -j 2 -C _build witness_node cli_wallet app_test cli_test chain_test + df -h + - name: Unit-Tests + run: | + _build/tests/app_test -l test_suite + libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l test_suite + _build/tests/cli_test -l test_suite + df -h + - name: Node-Test + run: | + df -h + pushd _build + ../programs/build_helpers/run-node-test diff --git a/.github/workflows/build-and-test.ubuntu-debug.yml b/.github/workflows/build-and-test.ubuntu-debug.yml new file mode 100644 index 0000000000..031c644851 --- /dev/null +++ b/.github/workflows/build-and-test.ubuntu-debug.yml @@ -0,0 +1,107 @@ +name: Ubuntu Debug +on: [ push, pull_request ] +env: + CCACHE_COMPRESS: exists means true + CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime,time_macros +jobs: + test-debug: + name: Build and test in Debug mode + strategy: + matrix: + os: [ ubuntu-16.04, ubuntu-18.04 ] + runs-on: ${{ matrix.os }} + services: + elasticsearch: + image: docker://elasticsearch:7.4.2 + options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 + steps: + - name: Install dependencies + run: | + df -h + sudo apt-get update + openssl_ver=`sudo apt-cache madison openssl | grep xenial-updates | awk '{print $3}'` + libssl_ver=`sudo apt-cache madison libssl-dev | grep xenial-updates | awk '{print $3}'` + [ -n "${openssl_ver}" ] && [ -n "${libssl_ver}" ] && \ + sudo apt-get install -y --allow-downgrades openssl=${openssl_ver} libssl-dev=${libssl_ver} + sudo apt-get install -y \ + ccache \ + parallel \ + libboost-thread-dev \ + libboost-iostreams-dev \ + libboost-date-time-dev \ + libboost-system-dev \ + libboost-filesystem-dev \ + libboost-program-options-dev \ + libboost-chrono-dev \ + libboost-test-dev \ + libboost-context-dev \ + libboost-regex-dev \ + libboost-coroutine-dev \ + libcurl4-openssl-dev + sudo apt-get auto-remove -y + sudo apt-get clean -y + df -h + sudo du -hs /mnt/* + sudo ls -alr /mnt/ + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Configure + run: | + pwd + df -h . + mkdir -p _build + sudo mkdir -p /_build/libraries /_build/programs /mnt/_build/tests + sudo chmod a+rwx /_build/libraries /_build/programs /mnt/_build/tests + ln -s /_build/libraries _build/libraries + ln -s /_build/programs _build/programs + ln -s /mnt/_build/tests _build/tests + sudo ln -s /_build/libraries /mnt/_build/libraries + sudo ln -s /_build/programs /mnt/_build/programs + sudo ln -s /mnt/_build/tests /_build/tests + ls -al _build + pushd _build + export -n BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR + cmake -D CMAKE_BUILD_TYPE=Debug \ + -D CMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON \ + -D CMAKE_C_COMPILER=gcc \ + -D CMAKE_C_COMPILER_LAUNCHER=ccache \ + -D CMAKE_CXX_COMPILER=g++ \ + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ + .. + popd + - name: Load Cache + uses: actions/cache@v1 + with: + path: ccache + key: ccache-debug-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ccache-debug-${{ github.ref }}- + ccache-debug- + - name: Build + run: | + export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" + mkdir -p "$CCACHE_DIR" + df -h + make -j 2 -C _build chain_test + make -j 2 -C _build cli_wallet + make -j 2 -C _build + df -h + du -hs _build/libraries/* _build/programs/* _build/tests/* + du -hs _build/* + du -hs /_build/* + - name: Unit-Tests + run: | + _build/tests/app_test -l test_suite + df -h + _build/tests/es_test -l test_suite + df -h + libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l test_suite + _build/tests/cli_test -l test_suite + df -h + - name: Node-Test + run: | + df -h + pushd _build + ../programs/build_helpers/run-node-test + df -h diff --git a/.github/workflows/build-and-test.ubuntu-release.yml b/.github/workflows/build-and-test.ubuntu-release.yml new file mode 100644 index 0000000000..5627c005ce --- /dev/null +++ b/.github/workflows/build-and-test.ubuntu-release.yml @@ -0,0 +1,85 @@ +name: Ubuntu Release +on: [ push, pull_request ] +env: + CCACHE_COMPRESS: exists means true + CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime,time_macros +jobs: + test-release: + name: Build and test in Release mode + strategy: + matrix: + os: [ ubuntu-16.04, ubuntu-18.04 ] + runs-on: ${{ matrix.os }} + services: + elasticsearch: + image: docker://elasticsearch:7.4.2 + options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 + steps: + - name: Install dependencies + run: | + sudo apt-get update + openssl_ver=`sudo apt-cache madison openssl | grep xenial-updates | awk '{print $3}'` + libssl_ver=`sudo apt-cache madison libssl-dev | grep xenial-updates | awk '{print $3}'` + [ -n "${openssl_ver}" ] && [ -n "${libssl_ver}" ] && \ + sudo apt-get install -y --allow-downgrades openssl=${openssl_ver} libssl-dev=${libssl_ver} + sudo apt-get install -y \ + ccache \ + parallel \ + libboost-thread-dev \ + libboost-iostreams-dev \ + libboost-date-time-dev \ + libboost-system-dev \ + libboost-filesystem-dev \ + libboost-program-options-dev \ + libboost-chrono-dev \ + libboost-test-dev \ + libboost-context-dev \ + libboost-regex-dev \ + libboost-coroutine-dev \ + libcurl4-openssl-dev + sudo apt-get auto-remove -y + sudo apt-get clean -y + df -h + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Configure + run: | + mkdir -p _build + pushd _build + export -n BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR + cmake -D CMAKE_BUILD_TYPE=Release \ + -D CMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON \ + -D CMAKE_C_COMPILER=gcc \ + -D CMAKE_C_COMPILER_LAUNCHER=ccache \ + -D CMAKE_CXX_COMPILER=g++ \ + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ + .. + popd + - name: Load Cache + uses: actions/cache@v1 + with: + path: ccache + key: ccache-release-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ccache-release-${{ github.ref }}- + ccache-release- + - name: Build + run: | + export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" + mkdir -p "$CCACHE_DIR" + make -j 2 -C _build + df -h + - name: Unit-Tests + run: | + _build/tests/app_test -l test_suite + _build/tests/es_test -l test_suite + libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l test_suite + _build/tests/cli_test -l test_suite + df -h + - name: Node-Test + run: | + df -h + pushd _build + ../programs/build_helpers/run-node-test + df -h diff --git a/.github/workflows/build-and-test.win.yml b/.github/workflows/build-and-test.win.yml new file mode 100644 index 0000000000..68bca8940d --- /dev/null +++ b/.github/workflows/build-and-test.win.yml @@ -0,0 +1,152 @@ +name: Windows MinGW64 +on: [ push, pull_request ] +env: + CCACHE_COMPRESS: exists means true + CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime,time_macros + # The following are for windows cross-build only: + BOOST_VERSION: 1_69_0 + BOOST_DOTTED_VERSION: 1.69.0 + CURL_VERSION: 7.67.0 + OPENSSL_VERSION: 1.1.1d + ZLIB_VERSION: 1.2.11 +jobs: + prepare-mingw64-libs: + name: Build required 3rd-party libraries + runs-on: ubuntu-latest + steps: + - name: Load Cache + id: cache-libs + uses: actions/cache@v1 + with: + path: libs + key: mingw64-libs-${{ env.BOOST_VERSION }}_${{ env.CURL_VERSION }}_${{ env.OPENSSL_VERSION }}_${{ env.ZLIB_VERSION }} + - name: Install dependencies + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + sudo apt-get update + sudo apt-get install -y \ + g++-mingw-w64-x86-64 \ + mingw-w64-tools + - name: Download library sources + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + curl -LO https://dl.bintray.com/boostorg/release/${{ env.BOOST_DOTTED_VERSION }}/source/boost_${{ env.BOOST_VERSION }}.tar.bz2 + curl -LO https://curl.haxx.se/download/curl-${{ env.CURL_VERSION }}.tar.bz2 + curl -LO https://www.openssl.org/source/openssl-${{ env.OPENSSL_VERSION }}.tar.gz + curl -LO https://zlib.net/zlib-${{ env.ZLIB_VERSION }}.tar.gz + - name: Build zlib + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + LIBS="`pwd`/libs" + ZLIB="`echo zlib-*`" + tar xfz "$ZLIB" + pushd "${ZLIB%.tar.gz}" + CROSS_PREFIX=x86_64-w64-mingw32- ./configure --prefix="$LIBS" --static --64 + make install + - name: Build openssl + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + LIBS="`pwd`/libs" + OPENSSL="`echo openssl-*`" + tar xfz "$OPENSSL" + pushd "${OPENSSL%.tar.gz}" + ./Configure --prefix="$LIBS" --cross-compile-prefix=x86_64-w64-mingw32- \ + no-shared zlib threads \ + mingw64 + make CPPFLAGS="-I$LIBS/include" LDFLAGS="-L$LIBS/lib" build_libs + make -j 2 install_dev + - name: Build curl + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + LIBS="`pwd`/libs" + CURL="`echo curl-*`" + tar xfj "$CURL" + pushd "${CURL%.tar.bz2}" + sed -i 's=-lgdi32=-lcrypt32 \0=' configure + PKG_CONFIG_PATH="$LIBS/lib/pkgconfig" ./configure --host=x86_64-w64-mingw32 \ + --prefix="$LIBS" \ + --disable-shared \ + --disable-tftpf \ + --disable-ldap \ + --with-zlib \ + --without-ssl --with-winssl \ + --disable-tftp \ + --disable-ldap + make -j 2 install + - name: Build boost + if: steps.cache-libs.outputs.cache-hit != 'true' + run: | + LIBS="`pwd`/libs" + BOOST="`echo boost_*`" + tar xfj "$BOOST" + pushd "${BOOST%.tar.bz2}" + # See https://github.com/boostorg/context/issues/101 + sed -i '/os.\(name\|platform\)/d;/local tmp = /s=elf=pe=;/local tmp = /s=sysv=ms=' libs/context/build/Jamfile.v2 + ./bootstrap.sh --prefix=$LIBS + echo "using gcc : mingw32 : x86_64-w64-mingw32-g++ ;" > user-config.jam + ./b2 --user-config=user-config.jam \ + --without-python \ + toolset=gcc-mingw32 \ + target-os=windows \ + variant=release \ + link=static \ + threading=multi \ + runtime-link=static \ + address-model=64 \ + abi=ms \ + install + build-mingw64: + name: Cross-build for windows using mingw + runs-on: ubuntu-latest + needs: prepare-mingw64-libs + steps: + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y \ + ccache \ + g++-mingw-w64-x86-64 \ + mingw-w64-tools + sudo apt-get auto-remove -y + sudo apt-get clean -y + df -h + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Load external libraries + uses: actions/cache@v1 + with: + path: libs + key: mingw64-libs-${{ env.BOOST_VERSION }}_${{ env.CURL_VERSION }}_${{ env.OPENSSL_VERSION }}_${{ env.ZLIB_VERSION }} + - name: Configure + run: | + LIBS="`pwd`/libs" + mkdir -p _build + pushd _build + cmake -D CMAKE_BUILD_TYPE=Release \ + -D CMAKE_C_COMPILER=/usr/bin/x86_64-w64-mingw32-gcc-posix \ + -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -D CMAKE_CXX_COMPILER=/usr/bin/x86_64-w64-mingw32-g++-posix \ + -D CMAKE_CXX_FLAGS=-Wa,-mbig-obj \ + -D CMAKE_SYSTEM_NAME=Windows \ + -D CURL_STATICLIB=ON \ + -D CMAKE_EXE_LINKER_FLAGS=--static \ + -D CMAKE_FIND_ROOT_PATH="/usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/;$LIBS" \ + -D CMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \ + -D CMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \ + -D CMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY \ + -D GRAPHENE_DISABLE_UNITY_BUILD=ON \ + .. + - name: Load Cache + uses: actions/cache@v1 + with: + path: ccache + key: ccache-mingw64-${{ github.ref }}-${{ github.sha }} + restore-keys: | + ccache-mingw64-${{ github.ref }}- + ccache-mingw64- + - name: Build + run: | + export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" + mkdir -p "$CCACHE_DIR" + make -j 2 -C _build witness_node cli_wallet diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml deleted file mode 100644 index 43ca35a2cc..0000000000 --- a/.github/workflows/build-and-test.yml +++ /dev/null @@ -1,364 +0,0 @@ -name: Github Autobuild -on: [ push, pull_request ] -env: - CCACHE_COMPRESS: exists means true - CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime,time_macros - # The following are for windows cross-build only: - BOOST_VERSION: 1_69_0 - BOOST_DOTTED_VERSION: 1.69.0 - CURL_VERSION: 7.67.0 - OPENSSL_VERSION: 1.1.1d - ZLIB_VERSION: 1.2.11 -jobs: - test-release: - name: Build and run tests in Release mode - runs-on: ubuntu-latest - services: - elasticsearch: - image: docker://elasticsearch:7.4.2 - options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 - steps: - - name: Install dependencies - run: | - sudo apt-get update - sudo apt-get install -y \ - ccache \ - parallel \ - libboost-thread-dev \ - libboost-iostreams-dev \ - libboost-date-time-dev \ - libboost-system-dev \ - libboost-filesystem-dev \ - libboost-program-options-dev \ - libboost-chrono-dev \ - libboost-test-dev \ - libboost-context-dev \ - libboost-regex-dev \ - libboost-coroutine-dev \ - libcurl4-openssl-dev - sudo apt-get auto-remove -y - sudo apt-get clean -y - df -h - - uses: actions/checkout@v2 - with: - submodules: recursive - - name: Configure - run: | - mkdir -p _build - pushd _build - export -n BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR - cmake -D CMAKE_BUILD_TYPE=Release \ - -D CMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON \ - -D CMAKE_C_COMPILER=gcc \ - -D CMAKE_C_COMPILER_LAUNCHER=ccache \ - -D CMAKE_CXX_COMPILER=g++ \ - -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ - .. - popd - - name: Load Cache - uses: actions/cache@v1 - with: - path: ccache - key: ccache-release-${{ github.ref }}-${{ github.sha }} - restore-keys: | - ccache-release-${{ github.ref }}- - ccache-release- - - name: Build - run: | - export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" - mkdir -p "$CCACHE_DIR" - make -j 2 -C _build - df -h - - name: Unit-Tests - run: | - _build/tests/app_test -l message - _build/tests/es_test -l message - libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l message - libraries/fc/tests/run-parallel-tests.sh _build/tests/cli_test -l message - df -h - - name: Node-Test - run: | - df -h - pushd _build - ../programs/build_helpers/run-node-test - df -h - test-debug: - name: Build and run tests in Debug mode - runs-on: ubuntu-latest - services: - elasticsearch: - image: docker://elasticsearch:7.4.2 - options: --env discovery.type=single-node --publish 9200:9200 --publish 9300:9300 - steps: - - name: Install dependencies - run: | - df -h - sudo apt-get update - sudo apt-get install -y \ - ccache \ - parallel \ - libboost-thread-dev \ - libboost-iostreams-dev \ - libboost-date-time-dev \ - libboost-system-dev \ - libboost-filesystem-dev \ - libboost-program-options-dev \ - libboost-chrono-dev \ - libboost-test-dev \ - libboost-context-dev \ - libboost-regex-dev \ - libboost-coroutine-dev \ - libcurl4-openssl-dev - sudo apt-get auto-remove -y - sudo apt-get clean -y - df -h - sudo du -hs /mnt/* - sudo ls -alr /mnt/ - - uses: actions/checkout@v2 - with: - submodules: recursive - - name: Configure - run: | - pwd - df -h . - mkdir -p _build - sudo mkdir -p /_build/libraries /_build/programs /mnt/_build/tests - sudo chmod a+rwx /_build/libraries /_build/programs /mnt/_build/tests - ln -s /_build/libraries _build/libraries - ln -s /_build/programs _build/programs - ln -s /mnt/_build/tests _build/tests - sudo ln -s /_build/libraries /mnt/_build/libraries - sudo ln -s /_build/programs /mnt/_build/programs - sudo ln -s /mnt/_build/tests /_build/tests - ls -al _build - pushd _build - export -n BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR - cmake -D CMAKE_BUILD_TYPE=Debug \ - -D CMAKE_CXX_OUTPUT_EXTENSION_REPLACE=ON \ - -D CMAKE_C_COMPILER=gcc \ - -D CMAKE_C_COMPILER_LAUNCHER=ccache \ - -D CMAKE_CXX_COMPILER=g++ \ - -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ - .. - popd - - name: Load Cache - uses: actions/cache@v1 - with: - path: ccache - key: ccache-debug-${{ github.ref }}-${{ github.sha }} - restore-keys: | - ccache-debug-${{ github.ref }}- - ccache-debug- - - name: Build - run: | - export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" - mkdir -p "$CCACHE_DIR" - df -h - make -j 2 -C _build - df -h - du -hs _build/libraries/* _build/programs/* _build/tests/* - du -hs _build/* - du -hs /_build/* - - name: Unit-Tests - run: | - _build/tests/app_test -l message - df -h - _build/tests/es_test -l message - df -h - libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l message - libraries/fc/tests/run-parallel-tests.sh _build/tests/cli_test -l message - df -h - - name: Node-Test - run: | - df -h - pushd _build - ../programs/build_helpers/run-node-test - df -h - prepare-mingw64-libs: - name: Build 3rd-party libraries required for windows cross-build - runs-on: ubuntu-latest - steps: - - name: Load Cache - id: cache-libs - uses: actions/cache@v1 - with: - path: libs - key: mingw64-libs-${{ env.BOOST_VERSION }}_${{ env.CURL_VERSION }}_${{ env.OPENSSL_VERSION }}_${{ env.ZLIB_VERSION }} - - name: Install dependencies - if: steps.cache-libs.outputs.cache-hit != 'true' - run: | - sudo apt-get update - sudo apt-get install -y \ - g++-mingw-w64-x86-64 \ - mingw-w64-tools - - name: Download library sources - if: steps.cache-libs.outputs.cache-hit != 'true' - run: | - curl -LO https://dl.bintray.com/boostorg/release/${{ env.BOOST_DOTTED_VERSION }}/source/boost_${{ env.BOOST_VERSION }}.tar.bz2 - curl -LO https://curl.haxx.se/download/curl-${{ env.CURL_VERSION }}.tar.bz2 - curl -LO https://www.openssl.org/source/openssl-${{ env.OPENSSL_VERSION }}.tar.gz - curl -LO https://zlib.net/zlib-${{ env.ZLIB_VERSION }}.tar.gz - - name: Build zlib - if: steps.cache-libs.outputs.cache-hit != 'true' - run: | - LIBS="`pwd`/libs" - ZLIB="`echo zlib-*`" - tar xfz "$ZLIB" - pushd "${ZLIB%.tar.gz}" - CROSS_PREFIX=x86_64-w64-mingw32- ./configure --prefix="$LIBS" --static --64 - make install - - name: Build openssl - if: steps.cache-libs.outputs.cache-hit != 'true' - run: | - LIBS="`pwd`/libs" - OPENSSL="`echo openssl-*`" - tar xfz "$OPENSSL" - pushd "${OPENSSL%.tar.gz}" - ./Configure --prefix="$LIBS" --cross-compile-prefix=x86_64-w64-mingw32- \ - no-shared zlib threads \ - mingw64 - make CPPFLAGS="-I$LIBS/include" LDFLAGS="-L$LIBS/lib" build_libs - make -j 2 install_dev - - name: Build curl - if: steps.cache-libs.outputs.cache-hit != 'true' - run: | - LIBS="`pwd`/libs" - CURL="`echo curl-*`" - tar xfj "$CURL" - pushd "${CURL%.tar.bz2}" - sed -i 's=-lgdi32=-lcrypt32 \0=' configure - PKG_CONFIG_PATH="$LIBS/lib/pkgconfig" ./configure --host=x86_64-w64-mingw32 \ - --prefix="$LIBS" \ - --disable-shared \ - --disable-tftpf \ - --disable-ldap \ - --with-zlib \ - --without-ssl --with-winssl \ - --disable-tftp \ - --disable-ldap - make -j 2 install - - name: Build boost - if: steps.cache-libs.outputs.cache-hit != 'true' - run: | - LIBS="`pwd`/libs" - BOOST="`echo boost_*`" - tar xfj "$BOOST" - pushd "${BOOST%.tar.bz2}" - # See https://github.com/boostorg/context/issues/101 - sed -i '/os.\(name\|platform\)/d;/local tmp = /s=elf=pe=;/local tmp = /s=sysv=ms=' libs/context/build/Jamfile.v2 - ./bootstrap.sh --prefix=$LIBS - echo "using gcc : mingw32 : x86_64-w64-mingw32-g++ ;" > user-config.jam - ./b2 --user-config=user-config.jam \ - --without-python \ - toolset=gcc-mingw32 \ - target-os=windows \ - variant=release \ - link=static \ - threading=multi \ - runtime-link=static \ - address-model=64 \ - abi=ms \ - install - build-mingw64: - name: Cross-build for windows using mingw - runs-on: ubuntu-latest - needs: prepare-mingw64-libs - steps: - - name: Install dependencies - run: | - sudo apt-get update - sudo apt-get install -y \ - ccache \ - g++-mingw-w64-x86-64 \ - mingw-w64-tools - sudo apt-get auto-remove -y - sudo apt-get clean -y - df -h - - uses: actions/checkout@v2 - with: - submodules: recursive - - name: Load external libraries - uses: actions/cache@v1 - with: - path: libs - key: mingw64-libs-${{ env.BOOST_VERSION }}_${{ env.CURL_VERSION }}_${{ env.OPENSSL_VERSION }}_${{ env.ZLIB_VERSION }} - - name: Configure - run: | - LIBS="`pwd`/libs" - mkdir -p _build - pushd _build - cmake -D CMAKE_BUILD_TYPE=Release \ - -D CMAKE_C_COMPILER=/usr/bin/x86_64-w64-mingw32-gcc-posix \ - -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ - -D CMAKE_CXX_COMPILER=/usr/bin/x86_64-w64-mingw32-g++-posix \ - -D CMAKE_CXX_FLAGS=-Wa,-mbig-obj \ - -D CMAKE_SYSTEM_NAME=Windows \ - -D CURL_STATICLIB=ON \ - -D CMAKE_EXE_LINKER_FLAGS=--static \ - -D CMAKE_FIND_ROOT_PATH="/usr/lib/gcc/x86_64-w64-mingw32/7.3-win32/;$LIBS" \ - -D CMAKE_FIND_ROOT_PATH_MODE_PROGRAM=NEVER \ - -D CMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY \ - -D CMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY \ - -D GRAPHENE_DISABLE_UNITY_BUILD=ON \ - .. - - name: Load Cache - uses: actions/cache@v1 - with: - path: ccache - key: ccache-mingw64-${{ github.ref }}-${{ github.sha }} - restore-keys: | - ccache-mingw64-${{ github.ref }}- - ccache-mingw64- - - name: Build - run: | - export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" - mkdir -p "$CCACHE_DIR" - make -j 2 -C _build witness_node cli_wallet - build-osx: - name: Build and run tests on OSX - runs-on: macos-latest - steps: - - name: Install dependencies - run: | - brew install autoconf automake libtool - brew install ccache - brew search boost - brew install bitshares/boost160/boost@1.60 - - uses: actions/checkout@v2 - with: - submodules: recursive - - name: Configure - run: | - mkdir -p _build - pushd _build - cmake -D CMAKE_BUILD_TYPE=Release \ - -D CMAKE_C_COMPILER_LAUNCHER=ccache \ - -D CMAKE_CXX_COMPILER_LAUNCHER=ccache \ - -D BOOST_ROOT=/usr/local/opt/boost@1.60 \ - -D OPENSSL_ROOT_DIR=/usr/local/opt/openssl \ - .. - - name: Load Cache - uses: actions/cache@v1 - with: - path: ccache - key: ccache-osx-${{ github.ref }}-${{ github.sha }} - restore-keys: | - ccache-osx-${{ github.ref }}- - ccache-osx- - - name: Build - run: | - export CCACHE_DIR="$GITHUB_WORKSPACE/ccache" - mkdir -p "$CCACHE_DIR" - make -j 2 -C _build witness_node cli_wallet app_test cli_test chain_test - df -h - - name: Unit-Tests - run: | - _build/tests/app_test -l message - libraries/fc/tests/run-parallel-tests.sh _build/tests/chain_test -l message - libraries/fc/tests/run-parallel-tests.sh _build/tests/cli_test -l message - df -h - - name: Node-Test - run: | - df -h - pushd _build - ../programs/build_helpers/run-node-test diff --git a/.mailmap b/.mailmap index 7eba6719ed..7e715a013c 100644 --- a/.mailmap +++ b/.mailmap @@ -1,19 +1,32 @@ +Alexey Frolov Alfredo Garcia Alfredo Garcia +Anonymous +Anton Autushka +BrownBear <-> <-> Christopher Sanborn <23085117+christophersanborn@users.noreply.github.com> Chronos +Dan Notestein +Dan Notestein +Daniel Larimer Daniel Larimer Daniel Larimer Daniel Larimer Daniel Larimer +Daniel Larimer +Daniel Larimer Eric Frias Fabian Schuh Fabian Schuh John M. Jones Ken Code Matias Romeo +Michael Vandeberg +Michael Vandeberg Nathan Hourt Nathan Hourt +Nathan Hourt +Nikolai Mushegian OpenLedger OpenLedger <42674402+OpenLedgerApp@users.noreply.github.com> Peter Conrad @@ -23,20 +36,26 @@ Roelandp Ryan R. Fox Ryan R. Fox Sigve Kvalsvik -Valentine Zavgorodnev +Valentine Zavgorodnev +Valentine Zavgorodnev Valera Cogut Vikram Rajkumar Vikram Rajkumar +Vikram Rajkumar William William Xiaodong Li Xiaodong Li Xiaodong Li Xiaodong Li +Yuvaraj Gogoi abitmore albert <393259066@qq.com> <393259066@qq.com> albert <393259066@qq.com> +batmaninpink bitcube btcinshares <33876675+btcinshares@users.noreply.github.com> crazybits crazybits +theoreticalbts +theoreticalbts diff --git a/CMakeLists.txt b/CMakeLists.txt index 2c27d7efce..75028de33c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -77,6 +77,29 @@ endmacro() # Save the old value of CMAKE_REQUIRED_FLAGS set( TEMP_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} ) +# Check submodules +if(NOT MANUAL_SUBMODULES) + find_package(Git) + if(GIT_FOUND) + function (check_submodule relative_path) + execute_process(COMMAND git rev-parse "HEAD" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${relative_path} OUTPUT_VARIABLE localHead) + execute_process(COMMAND git rev-parse "HEAD:${relative_path}" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE checkedHead) + string(COMPARE EQUAL "${localHead}" "${checkedHead}" upToDate) + if (upToDate) + message(STATUS "Submodule '${relative_path}' is up-to-date") + else() + message(FATAL_ERROR "Submodule '${relative_path}' is not up-to-date. Please update all submodules with\ngit submodule update --init --recursive --force\nor run cmake with -DMANUAL_SUBMODULES=1\n") + endif() + endfunction () + + message(STATUS "Checking submodules") + check_submodule(docs) + check_submodule(libraries/fc) + endif() +endif() +# Make sure to always re-run the test. +unset( MANUAL_SUBMODULES CACHE ) + # Fortify source if (CMAKE_COMPILER_IS_GNUCXX) if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") @@ -239,7 +262,7 @@ else( WIN32 ) # Apple AND Linux if( APPLE ) # Apple Specific Options Here message( STATUS "Configuring BitShares on OS X" ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -stdlib=libc++ -Wall" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -stdlib=libc++ -Wall -fvisibility-inlines-hidden -fvisibility=hidden" ) else( APPLE ) if ( "${CMAKE_SYSTEM_NAME}" STREQUAL "OpenBSD" ) # OpenBSD Specific Options diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index 8c9d03f1b4..6a0a654748 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -26,7 +26,7 @@ Sigve Kvalsvik albert <393259066@qq.com> Ryan R. Fox Valentine Zavgorodnev -Michael Vandeberg +Michael Vandeberg James Calfee Alexey Frolov syalon @@ -34,7 +34,7 @@ takaaki7 Nicolas Wack Taconator Qi Xing -Anton Autushka +Anton Autushka Chronos Wei Yang Zapata @@ -45,11 +45,11 @@ Tengfei Niu Tiago Peralta ioBanker <37595908+ioBanker@users.noreply.github.com> Karl Semich <0xloem@gmail.com> -Michael Vandeberg SahkanDesertHawk Scott Howard Tydus William +bangzi1001 <36911788+bangzi1001@users.noreply.github.com> d.yakovitsky ddylko iHashFury @@ -69,12 +69,14 @@ Ken Code Krzysztof Szumny Paul Brossier Roelandp +Semen Martynov Thomas Freedman Troglodactyl VoR0220 alt -bangzi1001 <36911788+bangzi1001@users.noreply.github.com> bitcube lafona liondani lososeg +sinetek +xiao93 <42384581+xiao93@users.noreply.github.com> diff --git a/Doxyfile b/Doxyfile index 66404bd858..3d8c8770a5 100644 --- a/Doxyfile +++ b/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "BitShares-Core" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = "5.0.0" +PROJECT_NUMBER = "5.1.0" # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -758,7 +758,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = README.md doc/main.dox libraries +INPUT = README.md libraries # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -1558,7 +1558,7 @@ EXTRA_SEARCH_MAPPINGS = # If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output. # The default value is: YES. -GENERATE_LATEX = YES +GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of diff --git a/LICENSE.txt b/LICENSE.txt index 0ad9985bf9..973952b911 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,5 +1,5 @@ -Copyright (c) 2015-2016 Cryptonomex Inc. -Copyright (c) 2015-2020 contributors, see CONTRIBUTORS.txt +Copyright (c) 2015-2021 Cryptonomex Inc. and +contributors (see CONTRIBUTORS.txt) The MIT License diff --git a/README.md b/README.md index e51827bfb1..b5ce0029d7 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,14 @@ BitShares Core ============== -[Build Status](https://travis-ci.org/bitshares/bitshares-core/branches): +BitShares Core is the BitShares blockchain implementation and command-line interface. +The web browser based wallet is [BitShares UI](https://github.com/bitshares/bitshares-ui). + +Visit [BitShares.org](https://bitshares.org/) to learn about BitShares and join the community at [BitSharesTalk.org](https://bitsharestalk.org/). -`master` | `develop` | `hardfork` | `testnet` | `bitshares-fc` - --- | --- | --- | --- | --- - [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=master)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=develop)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=hardfork)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-core.svg?branch=testnet)](https://travis-ci.org/bitshares/bitshares-core) | [![](https://travis-ci.org/bitshares/bitshares-fc.svg?branch=master)](https://travis-ci.org/bitshares/bitshares-fc) - [![](https://github.com/bitshares/bitshares-core/workflows/Github%20Autobuild/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=branch%3Amaster) | [![](https://github.com/bitshares/bitshares-core/workflows/Github%20Autobuild/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=branch%3Adevelop) | [![](https://github.com/bitshares/bitshares-core/workflows/Github%20Autobuild/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=branch%3Ahardfork) | [![](https://github.com/bitshares/bitshares-core/workflows/Github%20Autobuild/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=branch%3Atestnet) | [![](https://github.com/bitshares/bitshares-fc/workflows/Github%20Autobuild/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=branch%3Amaster) +Information for developers can be found in the [BitShares Developer Portal](https://dev.bitshares.works/). Users interested in how BitShares works can go to the [BitShares Documentation](https://how.bitshares.works/) site. + +Visit [Awesome BitShares](https://github.com/bitshares/awesome-bitshares) to find more resources and links. * [Getting Started](#getting-started) * [Support](#support) @@ -15,13 +17,13 @@ BitShares Core * [FAQ](#faq) * [License](#license) -BitShares Core is the BitShares blockchain implementation and command-line interface. -The web browser based wallet is [BitShares UI](https://github.com/bitshares/bitshares-ui). - -Visit [BitShares.org](https://bitshares.org/) to learn about BitShares and join the community at [BitSharesTalk.org](https://bitsharestalk.org/). - -Information for developers can be found in the [BitShares Developer Portal](https://dev.bitshares.works/). Users interested in how BitShares works can go to the [BitShares Documentation](https://how.bitshares.works/) site. - +|Branch|Build Status| +|---|---| +|`master`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=master)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Amaster)| +|`develop`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Adevelop) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=develop)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Adevelop)| +|`hardfork`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Ahardfork) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=hardfork)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Ahardfork)| +|`testnet`|[![](https://github.com/bitshares/bitshares-core/workflows/macOS/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"macOS"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Debug/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Ubuntu%20Release/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Ubuntu+Release"+branch%3Atestnet) [![](https://github.com/bitshares/bitshares-core/workflows/Windows%20MinGW64/badge.svg?branch=testnet)](https://github.com/bitshares/bitshares-core/actions?query=workflow%3A"Windows+MinGW64"+branch%3Atestnet)| +|`master` of `bitshares-fc`|[![](https://github.com/bitshares/bitshares-fc/workflows/macOS/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"macOS"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-fc/workflows/Ubuntu%20Debug/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"Ubuntu+Debug"+branch%3Amaster) [![](https://github.com/bitshares/bitshares-fc/workflows/Ubuntu%20Release/badge.svg?branch=master)](https://github.com/bitshares/bitshares-fc/actions?query=workflow%3A"Ubuntu+Release"+branch%3Amaster)| Getting Started --------------- @@ -30,7 +32,7 @@ Build instructions and additional documentation are available in the ### Build -We recommend building on Ubuntu 16.04 LTS (64-bit) +We recommend building on Ubuntu 16.04 LTS (64-bit) **Build Dependencies:** @@ -66,7 +68,7 @@ We recommend building on Ubuntu 16.04 LTS (64-bit) * Windows (various versions, Visual Studio and MinGW) * OpenBSD (various versions) -* BitShares requires [Boost](http://www.boost.org/) libraries to build, supports version `1.58` to `1.69`. +* BitShares requires [Boost](https://www.boost.org/) libraries to build, supports version `1.58` to `1.69`. Newer versions may work, but have not been tested. If your system came pre-installed with a version of Boost libraries that you do not wish to use, you may manually build your preferred version and use it with BitShares by specifying it on the CMake command line. @@ -171,7 +173,7 @@ Use `gethelp ` to see more info about individual commands. E.G. >>> gethelp get_order_book -The definition of all commands is available in the +The definition of all commands is available in the [wallet.hpp](https://github.com/bitshares/bitshares-core/blob/master/libraries/wallet/include/graphene/wallet/wallet.hpp) souce code file. Corresponding documentation can be found in the [Doxygen documentation](https://doxygen.bitshares.org/classgraphene_1_1wallet_1_1wallet__api.html). @@ -327,7 +329,7 @@ FAQ - Is there a way to access methods which require login over HTTP? Yes. Most of the methods can be accessed by specifying the API name instead of an API ID. If an API is protected by a username and a password, it can be accessed by using *basic access authentication*. Please check the ["Accessing restrictable node API's"](#accessing-restrictable-node-apis) section for more info. - + However, HTTP is not really designed for "server push" notifications, and we would have to figure out a way to queue notifications for a polling client. Websockets solves this problem. If you need to access the stateful methods, use Websockets. - What is the meaning of `a.b.c` numbers? @@ -370,7 +372,7 @@ FAQ less fine if your `witness_node` allows the general public to control which p2p nodes it's connecting to. Therefore the API to add p2p connections needs to be set up with proper access controls. - + License ------- BitShares Core is under the MIT license. See [LICENSE](https://github.com/bitshares/bitshares-core/blob/master/LICENSE.txt) diff --git a/docs b/docs index 7e42e1a8c5..0a09ebbfd7 160000 --- a/docs +++ b/docs @@ -1 +1 @@ -Subproject commit 7e42e1a8c551eb23542077c9f97a26b530eec32b +Subproject commit 0a09ebbfd71c6e80bb8cf87dbd1794f425a1d300 diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 90576296c6..62cb9317e8 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -546,6 +546,127 @@ namespace graphene { namespace app { return result; } FC_CAPTURE_AND_RETHROW( (asset_a)(asset_b)(bucket_seconds)(start)(end) ) } + vector history_api::get_liquidity_pool_history( + liquidity_pool_id_type pool_id, + optional start, + optional stop, + optional olimit, + optional operation_type )const + { try { + FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); + + uint32_t limit = olimit.valid() ? *olimit : 101; + + const auto configured_limit = _app.get_options().api_limit_get_liquidity_pool_history; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + FC_ASSERT( _app.chain_database(), "Internal error: the chain database is not availalbe" ); + + const auto& db = *_app.chain_database(); + + vector result; + + if( limit == 0 || ( start.valid() && stop.valid() && *start <= *stop ) ) // empty result + return result; + + const auto& hist_idx = db.get_index_type(); + + if( operation_type.valid() ) // one operation type + { + const auto& idx = hist_idx.indices().get(); + auto itr = start.valid() ? idx.lower_bound( boost::make_tuple( pool_id, *operation_type, *start ) ) + : idx.lower_bound( boost::make_tuple( pool_id, *operation_type ) ); + auto itr_stop = stop.valid() ? idx.upper_bound( boost::make_tuple( pool_id, *operation_type, *stop ) ) + : idx.upper_bound( boost::make_tuple( pool_id, *operation_type ) ); + while( itr != itr_stop && result.size() < limit ) + { + result.push_back( *itr ); + ++itr; + } + } + else // all operation types + { + const auto& idx = hist_idx.indices().get(); + auto itr = start.valid() ? idx.lower_bound( boost::make_tuple( pool_id, *start ) ) + : idx.lower_bound( pool_id ); + auto itr_stop = stop.valid() ? idx.upper_bound( boost::make_tuple( pool_id, *stop ) ) + : idx.upper_bound( pool_id ); + while( itr != itr_stop && result.size() < limit ) + { + result.push_back( *itr ); + ++itr; + } + } + + return result; + + } FC_CAPTURE_AND_RETHROW( (pool_id)(start)(stop)(olimit)(operation_type) ) } + + vector history_api::get_liquidity_pool_history_by_sequence( + liquidity_pool_id_type pool_id, + optional start, + optional stop, + optional olimit, + optional operation_type )const + { try { + FC_ASSERT( _app.get_options().has_market_history_plugin, "Market history plugin is not enabled." ); + + uint32_t limit = olimit.valid() ? *olimit : 101; + + const auto configured_limit = _app.get_options().api_limit_get_liquidity_pool_history; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + FC_ASSERT( _app.chain_database(), "Internal error: the chain database is not availalbe" ); + + const auto& db = *_app.chain_database(); + + vector result; + + if( limit == 0 ) // empty result + return result; + + const auto& hist_idx = db.get_index_type(); + + if( operation_type.valid() ) // one operation type + { + const auto& idx = hist_idx.indices().get(); + const auto& idx_t = hist_idx.indices().get(); + auto itr = start.valid() ? idx.lower_bound( boost::make_tuple( pool_id, *operation_type, *start ) ) + : idx.lower_bound( boost::make_tuple( pool_id, *operation_type ) ); + auto itr_temp = stop.valid() ? idx_t.upper_bound( boost::make_tuple( pool_id, *operation_type, *stop ) ) + : idx_t.upper_bound( boost::make_tuple( pool_id, *operation_type ) ); + auto itr_stop = ( itr_temp == idx_t.end() ? idx.end() : idx.iterator_to( *itr_temp ) ); + while( itr != itr_stop && result.size() < limit ) + { + result.push_back( *itr ); + ++itr; + } + } + else // all operation types + { + const auto& idx = hist_idx.indices().get(); + const auto& idx_t = hist_idx.indices().get(); + auto itr = start.valid() ? idx.lower_bound( boost::make_tuple( pool_id, *start ) ) + : idx.lower_bound( pool_id ); + auto itr_temp = stop.valid() ? idx_t.upper_bound( boost::make_tuple( pool_id, *stop ) ) + : idx_t.upper_bound( pool_id ); + auto itr_stop = ( itr_temp == idx_t.end() ? idx.end() : idx.iterator_to( *itr_temp ) ); + while( itr != itr_stop && result.size() < limit ) + { + result.push_back( *itr ); + ++itr; + } + } + + return result; + + } FC_CAPTURE_AND_RETHROW( (pool_id)(start)(stop)(olimit)(operation_type) ) } + + crypto_api::crypto_api(){}; commitment_type crypto_api::blind( const blind_factor_type& blind, uint64_t value ) diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 48984faa74..a1403e2c7f 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -236,7 +236,58 @@ void application_impl::set_dbg_init_key( graphene::chain::genesis_state_type& ge genesis.initial_witness_candidates[i].block_signing_key = init_pubkey; } +void application_impl::initialize() +{ + if( _options->count("force-validate") > 0 ) + { + ilog( "All transaction signatures will be validated" ); + _force_validate = true; + } + + if ( _options->count("enable-subscribe-to-all") > 0 ) + _app_options.enable_subscribe_to_all = _options->at( "enable-subscribe-to-all" ).as(); + + set_api_limit(); + if( is_plugin_enabled( "market_history" ) ) + _app_options.has_market_history_plugin = true; + else + ilog("Market history plugin is not enabled"); + + if( is_plugin_enabled( "api_helper_indexes" ) ) + _app_options.has_api_helper_indexes_plugin = true; + else + ilog("API helper indexes plugin is not enabled"); + + if( _options->count("api-access") > 0 ) + { + + fc::path api_access_file = _options->at("api-access").as(); + + FC_ASSERT( fc::exists(api_access_file), + "Failed to load file from ${path}", ("path", api_access_file) ); + + _apiaccess = fc::json::from_file( api_access_file ).as( 20 ); + ilog( "Using api access file from ${path}", + ("path", api_access_file) ); + } + else + { + // TODO: Remove this generous default access policy + // when the UI logs in properly + _apiaccess = api_access(); + api_access_info wild_access; + wild_access.password_hash_b64 = "*"; + wild_access.password_salt_b64 = "*"; + wild_access.allowed_apis.push_back( "database_api" ); + wild_access.allowed_apis.push_back( "network_broadcast_api" ); + wild_access.allowed_apis.push_back( "history_api" ); + wild_access.allowed_apis.push_back( "orders_api" ); + wild_access.allowed_apis.push_back( "custom_operations_api" ); + _apiaccess.permission_map["*"] = wild_access; + } + +} void application_impl::set_api_limit() { if (_options->count("api-limit-get-account-history-operations")) { @@ -326,9 +377,16 @@ void application_impl::set_api_limit() { if(_options->count("api-limit-get-withdraw-permissions-by-recipient")) { _app_options.api_limit_get_withdraw_permissions_by_recipient = _options->at("api-limit-get-withdraw-permissions-by-recipient").as(); } - if(_options->count("api-limit-get-liquidity-pools")) { + if(_options->count("api-limit-get-tickets") > 0) { + _app_options.api_limit_get_tickets = _options->at("api-limit-get-tickets").as(); + } + if(_options->count("api-limit-get-liquidity-pools") > 0) { _app_options.api_limit_get_liquidity_pools = _options->at("api-limit-get-liquidity-pools").as(); } + if(_options->count("api-limit-get-liquidity-pool-history") > 0) { + _app_options.api_limit_get_liquidity_pool_history = + _options->at("api-limit-get-liquidity-pool-history").as(); + } } void application_impl::startup() @@ -353,7 +411,7 @@ void application_impl::startup() modified_genesis = true; ilog( - "Used genesis timestamp: ${timestamp} (PLEASE RECORD THIS)", + "Used genesis timestamp: ${timestamp} (PLEASE RECORD THIS)", ("timestamp", genesis.initial_timestamp.to_iso_string()) ); } @@ -443,50 +501,6 @@ void application_impl::startup() throw; } - if( _options->count("force-validate") ) - { - ilog( "All transaction signatures will be validated" ); - _force_validate = true; - } - - if ( _options->count("enable-subscribe-to-all") ) - _app_options.enable_subscribe_to_all = _options->at( "enable-subscribe-to-all" ).as(); - - set_api_limit(); - - if( _active_plugins.find( "market_history" ) != _active_plugins.end() ) - _app_options.has_market_history_plugin = true; - - if( _active_plugins.find( "api_helper_indexes" ) != _active_plugins.end() ) - _app_options.has_api_helper_indexes_plugin = true; - - if( _options->count("api-access") ) { - - fc::path api_access_file = _options->at("api-access").as(); - - FC_ASSERT( fc::exists(api_access_file), - "Failed to load file from ${path}", ("path", api_access_file) ); - - _apiaccess = fc::json::from_file( api_access_file ).as( 20 ); - ilog( "Using api access file from ${path}", - ("path", api_access_file) ); - } - else - { - // TODO: Remove this generous default access policy - // when the UI logs in properly - _apiaccess = api_access(); - api_access_info wild_access; - wild_access.password_hash_b64 = "*"; - wild_access.password_salt_b64 = "*"; - wild_access.allowed_apis.push_back( "database_api" ); - wild_access.allowed_apis.push_back( "network_broadcast_api" ); - wild_access.allowed_apis.push_back( "history_api" ); - wild_access.allowed_apis.push_back( "orders_api" ); - wild_access.allowed_apis.push_back( "custom_operations_api" ); - _apiaccess.permission_map["*"] = wild_access; - } - reset_p2p_node(_data_dir); reset_websocket_server(); reset_websocket_tls_server(); @@ -510,6 +524,11 @@ void application_impl::set_api_access_info(const string& username, api_access_in _apiaccess.permission_map.insert(std::make_pair(username, std::move(permissions))); } +bool application_impl::is_plugin_enabled(const string& name) const +{ + return !(_active_plugins.find(name) == _active_plugins.end()); +} + /** * If delegate has the item, the network has no need to fetch it. */ @@ -1057,8 +1076,12 @@ void application::set_program_options(boost::program_options::options_descriptio "For database_api_impl::get_withdraw_permissions_by_giver to set max limit value") ("api-limit-get-withdraw-permissions-by-recipient",boost::program_options::value()->default_value(101), "For database_api_impl::get_withdraw_permissions_by_recipient to set max limit value") - ("api-limit-get-liquidity-pools",boost::program_options::value()->default_value(101), - "For database_api_impl::get_liquidity_pools_* to set max limit value") + ("api-limit-get-tickets", boost::program_options::value()->default_value(101), + "Set maximum limit value for database APIs which query for tickets") + ("api-limit-get-liquidity-pools", boost::program_options::value()->default_value(101), + "Set maximum limit value for database APIs which query for liquidity pools") + ("api-limit-get-liquidity-pool-history", boost::program_options::value()->default_value(101), + "Set maximum limit value for APIs which query for history of liquidity pools") ; command_line_options.add(configuration_file_options); command_line_options.add_options() @@ -1078,11 +1101,13 @@ void application::initialize(const fc::path& data_dir, const boost::program_opti my->_data_dir = data_dir; my->_options = &options; - if ( options.count("io-threads") ) + if ( options.count("io-threads") > 0 ) { const uint16_t num_threads = options["io-threads"].as(); fc::asio::default_io_service_scope::set_num_threads(num_threads); } + + my->initialize(); } void application::startup() @@ -1117,7 +1142,7 @@ std::shared_ptr application::get_plugin(const string& name) con bool application::is_plugin_enabled(const string& name) const { - return !(my->_active_plugins.find(name) == my->_active_plugins.end()); + return my->is_plugin_enabled(name); } net::node_ptr application::p2p_node() @@ -1165,8 +1190,11 @@ void graphene::app::application::add_available_plugin(std::shared_ptr_active_plugins ) + { + ilog( "Stopping plugin ${name}", ( "name", entry.second->plugin_name() ) ); entry.second->plugin_shutdown(); - return; + ilog( "Stopped plugin ${name}", ( "name", entry.second->plugin_name() ) ); + } } void application::shutdown() { @@ -1182,18 +1210,21 @@ void application::shutdown() void application::initialize_plugins( const boost::program_options::variables_map& options ) { for( auto& entry : my->_active_plugins ) + { + ilog( "Initializing plugin ${name}", ( "name", entry.second->plugin_name() ) ); entry.second->plugin_initialize( options ); - return; + ilog( "Initialized plugin ${name}", ( "name", entry.second->plugin_name() ) ); + } } void application::startup_plugins() { for( auto& entry : my->_active_plugins ) { + ilog( "Starting plugin ${name}", ( "name", entry.second->plugin_name() ) ); entry.second->plugin_startup(); - ilog( "Plugin ${name} started", ( "name", entry.second->plugin_name() ) ); + ilog( "Started plugin ${name}", ( "name", entry.second->plugin_name() ) ); } - return; } const application_options& application::get_options() diff --git a/libraries/app/application_impl.hxx b/libraries/app/application_impl.hxx index accc8fe4f1..9218770c7c 100644 --- a/libraries/app/application_impl.hxx +++ b/libraries/app/application_impl.hxx @@ -41,6 +41,7 @@ class application_impl : public net::node_delegate void set_dbg_init_key( graphene::chain::genesis_state_type& genesis, const std::string& init_key ); void set_api_limit(); + void initialize(); void startup(); fc::optional< api_access_info > get_api_access_info(const string& username)const; @@ -55,7 +56,9 @@ class application_impl : public net::node_delegate /** * @brief allows the application to validate an item prior to broadcasting to peers. * + * @param blk_msg the message which contains the block * @param sync_mode true if the message was fetched through the sync process, false during normal operation + * @param contained_transaction_message_ids container for the transactions to write back into * @returns true if this message caused the blockchain to switch forks, false if it did not * * @throws exception if error validating the item, otherwise the item is safe to broadcast on. @@ -180,6 +183,9 @@ class application_impl : public net::node_delegate uint8_t get_current_block_interval_in_seconds() const override; + /// Returns whether a plugin is enabled + bool is_plugin_enabled(const string& name) const; + application* _self; fc::path _data_dir; diff --git a/libraries/app/config_util.cpp b/libraries/app/config_util.cpp index 5214d6ee2f..da3e82db0f 100644 --- a/libraries/app/config_util.cpp +++ b/libraries/app/config_util.cpp @@ -214,7 +214,7 @@ static void load_config_file(const fc::path& config_ini_path, const bpo::options { graphene::app::detail::deduplicator dedup; bpo::options_description unique_options("BitShares Witness Node"); - for( const boost::shared_ptr opt : cfg_options.options() ) + for( const boost::shared_ptr& opt : cfg_options.options() ) { const boost::shared_ptr od = dedup.next(opt); if( !od ) continue; @@ -263,7 +263,7 @@ static void create_new_config_file(const fc::path& config_ini_path, const fc::pa graphene::app::detail::deduplicator dedup(modify_option_defaults); std::ofstream out_cfg(config_ini_path.preferred_string()); std::string plugin_header_surrounding( 78, '=' ); - for( const boost::shared_ptr opt : cfg_options.options() ) + for( const boost::shared_ptr& opt : cfg_options.options() ) { const boost::shared_ptr od = dedup.next(opt); if( !od ) continue; diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index 3590bcb0c2..60bf4980b9 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -713,15 +713,7 @@ vector> database_api::lookup_account_names(const vector vector> database_api_impl::lookup_account_names(const vector& account_names)const { - const auto& accounts_by_name = _db.get_index_type().indices().get(); - vector > result; - result.reserve(account_names.size()); - std::transform(account_names.begin(), account_names.end(), std::back_inserter(result), - [&accounts_by_name](const string& name) -> optional { - auto itr = accounts_by_name.find(name); - return itr == accounts_by_name.end()? optional() : *itr; - }); - return result; + return get_accounts( account_names, false ); } map database_api::lookup_accounts( const string& lower_bound_name, @@ -994,20 +986,7 @@ vector> database_api::lookup_asset_symbols( vector> database_api_impl::lookup_asset_symbols( const vector& symbols_or_ids )const { - const auto& assets_by_symbol = _db.get_index_type().indices().get(); - vector > result; - result.reserve(symbols_or_ids.size()); - std::transform(symbols_or_ids.begin(), symbols_or_ids.end(), std::back_inserter(result), - [this, &assets_by_symbol](const string& symbol_or_id) -> optional { - if( !symbol_or_id.empty() && std::isdigit(symbol_or_id[0]) ) - { - auto ptr = _db.find(variant(symbol_or_id, 1).as(1)); - return ptr == nullptr? optional() : extend_asset( *ptr ); - } - auto itr = assets_by_symbol.find(symbol_or_id); - return itr == assets_by_symbol.end()? optional() : extend_asset( *itr ); - }); - return result; + return get_assets( symbols_or_ids, false ); } ////////////////////////////////////////////////////////////////////// @@ -1742,68 +1721,122 @@ vector database_api_impl::get_trade_history_by_sequence( // // ////////////////////////////////////////////////////////////////////// -vector database_api::get_liquidity_pools_by_asset_a( +vector database_api::list_liquidity_pools( + optional limit, + optional start_id, + optional with_statistics )const +{ + return my->list_liquidity_pools( + limit, + start_id, + with_statistics ); +} + +vector database_api_impl::list_liquidity_pools( + optional olimit, + optional ostart_id, + optional with_statistics )const +{ + uint32_t limit = olimit.valid() ? *olimit : 101; + + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + bool with_stats = ( with_statistics.valid() && *with_statistics ); + + vector results; + + liquidity_pool_id_type start_id = ostart_id.valid() ? *ostart_id : liquidity_pool_id_type(); + + const auto& idx = _db.get_index_type().indices().get(); + auto lower_itr = idx.lower_bound( start_id ); + auto upper_itr = idx.end(); + + results.reserve( limit ); + for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) + { + results.emplace_back( extend_liquidity_pool( *lower_itr, with_stats ) ); + } + + return results; +} + +vector database_api::get_liquidity_pools_by_asset_a( std::string asset_symbol_or_id, optional limit, - optional start_id )const + optional start_id, + optional with_statistics )const { return my->get_liquidity_pools_by_asset_a( asset_symbol_or_id, limit, - start_id ); + start_id, + with_statistics ); } -vector database_api_impl::get_liquidity_pools_by_asset_a( +vector database_api_impl::get_liquidity_pools_by_asset_a( std::string asset_symbol_or_id, optional limit, - optional start_id )const + optional start_id, + optional with_statistics )const { return get_liquidity_pools_by_asset_x( asset_symbol_or_id, limit, - start_id ); + start_id, + with_statistics ); } -vector database_api::get_liquidity_pools_by_asset_b( +vector database_api::get_liquidity_pools_by_asset_b( std::string asset_symbol_or_id, optional limit, - optional start_id )const + optional start_id, + optional with_statistics )const { return my->get_liquidity_pools_by_asset_b( asset_symbol_or_id, limit, - start_id ); + start_id, + with_statistics ); } -vector database_api_impl::get_liquidity_pools_by_asset_b( +vector database_api_impl::get_liquidity_pools_by_asset_b( std::string asset_symbol_or_id, optional limit, - optional start_id )const + optional start_id, + optional with_statistics )const { return get_liquidity_pools_by_asset_x( asset_symbol_or_id, limit, - start_id ); + start_id, + with_statistics ); } -vector database_api::get_liquidity_pools_by_both_assets( +vector database_api::get_liquidity_pools_by_both_assets( std::string asset_symbol_or_id_a, std::string asset_symbol_or_id_b, optional limit, - optional start_id )const + optional start_id, + optional with_statistics )const { return my->get_liquidity_pools_by_both_assets( asset_symbol_or_id_a, asset_symbol_or_id_b, limit, - start_id ); + start_id, + with_statistics ); } -vector database_api_impl::get_liquidity_pools_by_both_assets( +vector database_api_impl::get_liquidity_pools_by_both_assets( std::string asset_symbol_or_id_a, std::string asset_symbol_or_id_b, optional olimit, - optional ostart_id )const + optional ostart_id, + optional with_statistics )const { uint32_t limit = olimit.valid() ? *olimit : 101; @@ -1813,7 +1846,9 @@ vector database_api_impl::get_liquidity_pools_by_both_ass "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); - vector results; + bool with_stats = ( with_statistics.valid() && *with_statistics ); + + vector results; asset_id_type asset_id_a = get_asset_from_string(asset_symbol_or_id_a)->id; asset_id_type asset_id_b = get_asset_from_string(asset_symbol_or_id_b)->id; @@ -1827,27 +1862,75 @@ vector database_api_impl::get_liquidity_pools_by_both_ass auto upper_itr = idx.upper_bound( std::make_tuple( asset_id_a, asset_id_b ) ); results.reserve( limit ); - uint32_t count = 0; - for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) + for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) { - results.emplace_back( *lower_itr ); + results.emplace_back( extend_liquidity_pool( *lower_itr, with_stats ) ); } return results; } -vector> database_api::get_liquidity_pools_by_share_asset( +vector> database_api::get_liquidity_pools( + const vector& ids, + optional subscribe, + optional with_statistics )const +{ + return my->get_liquidity_pools( + ids, + subscribe, + with_statistics ); +} + +vector> database_api_impl::get_liquidity_pools( + const vector& ids, + optional subscribe, + optional with_statistics )const +{ + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + FC_ASSERT( ids.size() <= configured_limit, + "size of the querying list can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + bool with_stats = ( with_statistics.valid() && *with_statistics ); + + bool to_subscribe = get_whether_to_subscribe( subscribe ); + vector> result; result.reserve(ids.size()); + std::transform(ids.begin(), ids.end(), std::back_inserter(result), + [this,to_subscribe,with_stats](liquidity_pool_id_type id) + -> optional { + + if(auto o = _db.find(id)) + { + auto ext_obj = extend_liquidity_pool( *o, with_stats ); + if( to_subscribe ) + { + subscribe_to_item( id ); + if( ext_obj.statistics.valid() ) + subscribe_to_item( ext_obj.statistics->id ); + } + return ext_obj; + } + return {}; + }); + return result; +} + +vector> database_api::get_liquidity_pools_by_share_asset( const vector& asset_symbols_or_ids, - optional subscribe )const + optional subscribe, + optional with_statistics )const { return my->get_liquidity_pools_by_share_asset( asset_symbols_or_ids, - subscribe ); + subscribe, + with_statistics ); } -vector> database_api_impl::get_liquidity_pools_by_share_asset( +vector> database_api_impl::get_liquidity_pools_by_share_asset( const vector& asset_symbols_or_ids, - optional subscribe )const + optional subscribe, + optional with_statistics )const { FC_ASSERT( _app_options, "Internal error" ); const auto configured_limit = _app_options->api_limit_get_liquidity_pools; @@ -1855,22 +1938,81 @@ vector> database_api_impl::get_liquidity_pools_b "size of the querying list can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); + bool with_stats = ( with_statistics.valid() && *with_statistics ); + bool to_subscribe = get_whether_to_subscribe( subscribe ); - vector> result; result.reserve(asset_symbols_or_ids.size()); + vector> result; result.reserve(asset_symbols_or_ids.size()); std::transform(asset_symbols_or_ids.begin(), asset_symbols_or_ids.end(), std::back_inserter(result), - [this,to_subscribe](std::string id_or_name) -> optional { + [this,to_subscribe,with_stats](std::string id_or_name) -> optional { const asset_object* asset_obj = get_asset_from_string( id_or_name, false ); if( asset_obj == nullptr || !asset_obj->is_liquidity_pool_share_asset() ) return {}; const liquidity_pool_object& lp_obj = (*asset_obj->for_liquidity_pool)(_db); + auto ext_obj = extend_liquidity_pool( lp_obj, with_stats ); if( to_subscribe ) + { subscribe_to_item( lp_obj.id ); - return lp_obj; + if( ext_obj.statistics.valid() ) + subscribe_to_item( ext_obj.statistics->id ); + } + return ext_obj; }); return result; } +vector database_api::get_liquidity_pools_by_owner( + std::string account_name_or_id, + optional limit, + optional start_id, + optional with_statistics )const +{ + return my->get_liquidity_pools_by_owner( + account_name_or_id, + limit, + start_id, + with_statistics ); +} + +vector database_api_impl::get_liquidity_pools_by_owner( + std::string account_name_or_id, + optional olimit, + optional ostart_id, + optional with_statistics )const +{ + uint32_t limit = olimit.valid() ? *olimit : 101; + + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_liquidity_pools; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + bool with_stats = ( with_statistics.valid() && *with_statistics ); + + vector results; + + account_id_type owner = get_account_from_string(account_name_or_id)->id; + + asset_id_type start_id = ostart_id.valid() ? *ostart_id : asset_id_type(); + + // get assets owned by account + const auto& idx = _db.get_index_type().indices().get(); + auto lower_itr = idx.lower_bound( std::make_tuple( owner, start_id ) ); + auto upper_itr = idx.upper_bound( owner ); + + results.reserve( limit ); + for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) + { + const asset_object& asset_obj = *lower_itr; + if( !asset_obj.is_liquidity_pool_share_asset() ) // TODO improve performance + continue; + results.emplace_back( extend_liquidity_pool( (*asset_obj.for_liquidity_pool)(_db), with_stats ) ); + } + + return results; +} + ////////////////////////////////////////////////////////////////////// // // // Witnesses // @@ -2701,6 +2843,95 @@ vector database_api_impl::list_htlcs(const htlc_id_type start, uint return result; } +////////////////////////////////////////////////////////////////////// +// // +// Tickets // +// // +////////////////////////////////////////////////////////////////////// + +vector database_api::list_tickets( + optional limit, + optional start_id )const +{ + return my->list_tickets( + limit, + start_id ); +} + +vector database_api_impl::list_tickets( + optional olimit, + optional ostart_id )const +{ + uint32_t limit = olimit.valid() ? *olimit : 101; + + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_tickets; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + vector results; + + ticket_id_type start_id = ostart_id.valid() ? *ostart_id : ticket_id_type(); + + const auto& idx = _db.get_index_type().indices().get(); + auto lower_itr = idx.lower_bound( start_id ); + auto upper_itr = idx.end(); + + results.reserve( limit ); + uint32_t count = 0; + for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) + { + results.emplace_back( *lower_itr ); + } + + return results; +} + +vector database_api::get_tickets_by_account( + std::string account_name_or_id, + optional limit, + optional start_id )const +{ + return my->get_tickets_by_account( + account_name_or_id, + limit, + start_id ); +} + +vector database_api_impl::get_tickets_by_account( + std::string account_name_or_id, + optional olimit, + optional ostart_id )const +{ + uint32_t limit = olimit.valid() ? *olimit : 101; + + FC_ASSERT( _app_options, "Internal error" ); + const auto configured_limit = _app_options->api_limit_get_tickets; + FC_ASSERT( limit <= configured_limit, + "limit can not be greater than ${configured_limit}", + ("configured_limit", configured_limit) ); + + vector results; + + account_id_type account = get_account_from_string(account_name_or_id)->id; + + ticket_id_type start_id = ostart_id.valid() ? *ostart_id : ticket_id_type(); + + const auto& idx = _db.get_index_type().indices().get(); + auto lower_itr = idx.lower_bound( std::make_tuple( account, start_id ) ); + auto upper_itr = idx.upper_bound( account ); + + results.reserve( limit ); + uint32_t count = 0; + for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) + { + results.emplace_back( *lower_itr ); + } + + return results; +} + ////////////////////////////////////////////////////////////////////// // // // Private methods // @@ -2975,7 +3206,7 @@ void database_api_impl::on_applied_block() */ default: break; } - if( market.valid() && _market_subscriptions.count(*market) ) + if( market.valid() && _market_subscriptions.count(*market) > 0 ) // FIXME this may cause fill_order_operation be pushed before order creation subscribed_markets_ops[*market].emplace_back(std::make_pair(op.op, op.result)); } diff --git a/libraries/app/database_api_impl.hxx b/libraries/app/database_api_impl.hxx index c6053eefa8..d6d8dd12ed 100644 --- a/libraries/app/database_api_impl.hxx +++ b/libraries/app/database_api_impl.hxx @@ -139,22 +139,39 @@ class database_api_impl : public std::enable_shared_from_this unsigned limit = 100 )const; // Liquidity pools - vector get_liquidity_pools_by_asset_a( + vector list_liquidity_pools( + optional limit = 101, + optional start_id = optional(), + optional with_statistics = false )const; + vector get_liquidity_pools_by_asset_a( std::string asset_symbol_or_id, optional limit = 101, - optional start_id = optional() )const; - vector get_liquidity_pools_by_asset_b( + optional start_id = optional(), + optional with_statistics = false )const; + vector get_liquidity_pools_by_asset_b( std::string asset_symbol_or_id, optional limit = 101, - optional start_id = optional() )const; - vector get_liquidity_pools_by_both_assets( + optional start_id = optional(), + optional with_statistics = false )const; + vector get_liquidity_pools_by_both_assets( std::string asset_symbol_or_id_a, std::string asset_symbol_or_id_b, optional limit = 101, - optional start_id = optional() )const; - vector> get_liquidity_pools_by_share_asset( + optional start_id = optional(), + optional with_statistics = false )const; + vector> get_liquidity_pools( + const vector& ids, + optional subscribe = optional(), + optional with_statistics = false )const; + vector> get_liquidity_pools_by_share_asset( const vector& asset_symbols_or_ids, - optional subscribe = optional() )const; + optional subscribe = optional(), + optional with_statistics = false )const; + vector get_liquidity_pools_by_owner( + std::string account_name_or_id, + optional limit = 101, + optional start_id = optional(), + optional with_statistics = false )const; // Witnesses vector> get_witnesses(const vector& witness_ids)const; @@ -216,6 +233,15 @@ class database_api_impl : public std::enable_shared_from_this htlc_id_type start, uint32_t limit) const; vector list_htlcs(const htlc_id_type lower_bound_id, uint32_t limit) const; + // Tickets + vector list_tickets( + optional limit = 101, + optional start_id = optional() )const; + vector get_tickets_by_account( + std::string account_name_or_id, + optional limit = 101, + optional start_id = optional() )const; + //private: //////////////////////////////////////////////// @@ -261,12 +287,28 @@ class database_api_impl : public std::enable_shared_from_this // Liquidity pools //////////////////////////////////////////////// + template + extended_liquidity_pool_object extend_liquidity_pool( LP&& a, bool with_stats )const + { + liquidity_pool_id_type id = a.id; + extended_liquidity_pool_object result = extended_liquidity_pool_object( std::forward( a ) ); + if( with_stats && _app_options && _app_options->has_market_history_plugin ) + { + liquidity_pool_ticker_id_type ticker_id( id.instance ); + const liquidity_pool_ticker_object* ticker = _db.find( ticker_id ); + if( ticker ) + result.statistics = *ticker; + } + return result; + } + // template function to reduce duplicate code template - vector get_liquidity_pools_by_asset_x( + vector get_liquidity_pools_by_asset_x( std::string asset_symbol_or_id, optional olimit, - optional ostart_id )const + optional ostart_id, + optional with_statistics )const { uint32_t limit = olimit.valid() ? *olimit : 101; @@ -276,7 +318,9 @@ class database_api_impl : public std::enable_shared_from_this "limit can not be greater than ${configured_limit}", ("configured_limit", configured_limit) ); - vector results; + bool with_stats = ( with_statistics.valid() && *with_statistics ); + + vector results; const asset_id_type asset_id = get_asset_from_string(asset_symbol_or_id)->id; @@ -287,10 +331,9 @@ class database_api_impl : public std::enable_shared_from_this auto upper_itr = idx.upper_bound( asset_id ); results.reserve( limit ); - uint32_t count = 0; - for ( ; lower_itr != upper_itr && count < limit; ++lower_itr, ++count) + for ( ; lower_itr != upper_itr && results.size() < limit; ++lower_itr ) { - results.emplace_back( *lower_itr ); + results.emplace_back( extend_liquidity_pool( *lower_itr, with_stats ) ); } return results; diff --git a/libraries/app/include/graphene/app/api.hpp b/libraries/app/include/graphene/app/api.hpp index 92c08c266a..af2dc3c1fd 100644 --- a/libraries/app/include/graphene/app/api.hpp +++ b/libraries/app/include/graphene/app/api.hpp @@ -127,14 +127,14 @@ namespace graphene { namespace app { /** * @brief Get operations relevant to the specificed account - * @param account_id_or_name The account ID or name whose history should be queried + * @param account_name_or_id The account name or ID whose history should be queried * @param stop ID of the earliest operation to retrieve * @param limit Maximum number of operations to retrieve (must not exceed 100) * @param start ID of the most recent operation to retrieve * @return A list of operations performed by account, ordered from most recent to oldest. */ vector get_account_history( - const std::string account_id_or_name, + const std::string account_name_or_id, operation_history_id_type stop = operation_history_id_type(), unsigned limit = 100, operation_history_id_type start = operation_history_id_type() @@ -142,7 +142,7 @@ namespace graphene { namespace app { /** * @brief Get operations relevant to the specified account filtering by operation type - * @param account_id_or_name The account ID or name whose history should be queried + * @param account_name_or_id The account name or ID whose history should be queried * @param operation_types The IDs of the operation we want to get operations in the account * ( 0 = transfer , 1 = limit order create, ...) * @param start the sequence number where to start looping back throw the history @@ -150,7 +150,7 @@ namespace graphene { namespace app { * @return history_operation_detail */ history_operation_detail get_account_history_by_operations( - const std::string account_id_or_name, + const std::string account_name_or_id, flat_set operation_types, uint32_t start, unsigned limit @@ -158,7 +158,7 @@ namespace graphene { namespace app { /** * @brief Get only asked operations relevant to the specified account - * @param account_id_or_name The account ID or name whose history should be queried + * @param account_name_or_id The account name or ID whose history should be queried * @param operation_type The type of the operation we want to get operations in the account * ( 0 = transfer , 1 = limit order create, ...) * @param stop ID of the earliest operation to retrieve @@ -167,7 +167,7 @@ namespace graphene { namespace app { * @return A list of operations performed by account, ordered from most recent to oldest. */ vector get_account_history_operations( - const std::string account_id_or_name, + const std::string account_name_or_id, int operation_type, operation_history_id_type start = operation_history_id_type(), operation_history_id_type stop = operation_history_id_type(), @@ -178,7 +178,7 @@ namespace graphene { namespace app { * @brief Get operations relevant to the specified account referenced * by an event numbering specific to the account. The current number of operations * for the account can be found in the account statistics (or use 0 for start). - * @param account_id_or_name The account ID or name whose history should be queried + * @param account_name_or_id The account name or ID whose history should be queried * @param stop Sequence number of earliest operation. 0 is default and will * query 'limit' number of operations. * @param limit Maximum number of operations to retrieve (must not exceed 100) @@ -186,7 +186,7 @@ namespace graphene { namespace app { * 0 is default, which will start querying from the most recent operation. * @return A list of operations performed by account, ordered from most recent to oldest. */ - vector get_relative_account_history( const std::string account_id_or_name, + vector get_relative_account_history( const std::string account_name_or_id, uint64_t stop = 0, unsigned limit = 100, uint64_t start = 0) const; @@ -220,6 +220,65 @@ namespace graphene { namespace app { * it means this API server supports OHLCV data aggregated in 5-minute buckets. */ flat_set get_market_history_buckets()const; + + /** + * @brief Get history of a liquidity pool + * @param pool_id ID of the liquidity pool to query + * @param start A UNIX timestamp. Optional. + * If specified, only the operations occurred not later than this time will be returned. + * @param stop A UNIX timestamp. Optional. + * If specified, only the operations occurred later than this time will be returned. + * @param limit Maximum quantity of operations in the history to retrieve. + * Optional. If not specified, at most 101 records will be returned. + * @param operation_type Optional. If specified, only the operations whose type is the specified type + * will be returned. Otherwise all operations will be returned. + * @return operation history of the liquidity pool, ordered by time, most recent first. + * + * @note + * 1. The time must be UTC. The range is (stop, start]. + * 2. In case when there are more than 100 operations occurred in the same second, this API only returns + * the most recent records, the rest records can be retrieved with the + * @ref get_liquidity_pool_history_by_sequence API. + * 3. List of operation type code: 59-creation, 60-deletion, 61-deposit, 62-withdrawal, 63-exchange. + * 4. Can only omit one or more arguments in the end of the list, but not one or more in the middle. + * If need to not specify an individual argument, can specify \c null in the place. + */ + vector get_liquidity_pool_history( + liquidity_pool_id_type pool_id, + optional start = optional(), + optional stop = optional(), + optional limit = 101, + optional operation_type = optional() )const; + + /** + * @brief Get history of a liquidity pool + * @param pool_id ID of the liquidity pool to query + * @param start An Integer. Optional. + * If specified, only the operations whose sequences are not greater than this will be returned. + * @param stop A UNIX timestamp. Optional. + * If specified, only operations occurred later than this time will be returned. + * @param limit Maximum quantity of operations in the history to retrieve. + * Optional. If not specified, at most 101 records will be returned. + * @param operation_type Optional. If specified, only the operations whose type is the specified type + * will be returned. Otherwise all operations will be returned. + * @return operation history of the liquidity pool, ordered by time, most recent first. + * + * @note + * 1. The time must be UTC. The range is (stop, start]. + * 2. In case when there are more than 100 operations occurred in the same second, this API only returns + * the most recent records, the rest records can be retrieved with the + * @ref get_liquidity_pool_history_by_sequence API. + * 3. List of operation type code: 59-creation, 60-deletion, 61-deposit, 62-withdrawal, 63-exchange. + * 4. Can only omit one or more arguments in the end of the list, but not one or more in the middle. + * If need to not specify an individual argument, can specify \c null in the place. + */ + vector get_liquidity_pool_history_by_sequence( + liquidity_pool_id_type pool_id, + optional start = optional(), + optional stop = optional(), + optional limit = 101, + optional operation_type = optional() )const; + private: application& _app; graphene::app::database_api database_api; @@ -503,8 +562,8 @@ namespace graphene { namespace app { /** * @brief Get grouped limit orders in given market. * - * @param base_asset ID or symbol of asset being sold - * @param quote_asset ID or symbol of asset being purchased + * @param base_asset symbol or ID of asset being sold + * @param quote_asset symbol or ID of asset being purchased * @param group Maximum price diff within each order group, have to be one of configured values * @param start Optional price to indicate the first order group to retrieve * @param limit Maximum number of order groups to retrieve (must not exceed 101) @@ -534,12 +593,12 @@ namespace graphene { namespace app { /** * @brief Get all stored objects of an account in a particular catalog * - * @param account The account ID or name to get info from + * @param account_name_or_id The account name or ID to get info from * @param catalog Category classification. Each account can store multiple catalogs. * * @return The vector of objects of the account or empty */ - vector get_storage_info(std::string account_id_or_name, std::string catalog)const; + vector get_storage_info(std::string account_name_or_id, std::string catalog)const; private: application& _app; @@ -645,6 +704,8 @@ FC_API(graphene::app::history_api, (get_fill_order_history) (get_market_history) (get_market_history_buckets) + (get_liquidity_pool_history) + (get_liquidity_pool_history_by_sequence) ) FC_API(graphene::app::block_api, (get_blocks) diff --git a/libraries/app/include/graphene/app/api_objects.hpp b/libraries/app/include/graphene/app/api_objects.hpp index 00a03f1b75..328fd0b946 100644 --- a/libraries/app/include/graphene/app/api_objects.hpp +++ b/libraries/app/include/graphene/app/api_objects.hpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -152,6 +153,15 @@ namespace graphene { namespace app { optional total_backing_collateral; }; + struct extended_liquidity_pool_object : liquidity_pool_object + { + extended_liquidity_pool_object() {} + explicit extended_liquidity_pool_object( const liquidity_pool_object& o ) : liquidity_pool_object( o ) {} + explicit extended_liquidity_pool_object( liquidity_pool_object&& o ) : liquidity_pool_object( std::move(o) ) {} + + optional statistics; + }; + } } FC_REFLECT( graphene::app::more_data, @@ -181,16 +191,18 @@ FC_REFLECT( graphene::app::full_account, (more_data_available) ) -FC_REFLECT( graphene::app::order, (price)(quote)(base) ); -FC_REFLECT( graphene::app::order_book, (base)(quote)(bids)(asks) ); +FC_REFLECT( graphene::app::order, (price)(quote)(base) ) +FC_REFLECT( graphene::app::order_book, (base)(quote)(bids)(asks) ) FC_REFLECT( graphene::app::market_ticker, (time)(base)(quote)(latest)(lowest_ask)(lowest_ask_base_size)(lowest_ask_quote_size) - (highest_bid)(highest_bid_base_size)(highest_bid_quote_size)(percent_change)(base_volume)(quote_volume)(mto_id) ); -FC_REFLECT( graphene::app::market_volume, (time)(base)(quote)(base_volume)(quote_volume) ); + (highest_bid)(highest_bid_base_size)(highest_bid_quote_size)(percent_change)(base_volume)(quote_volume)(mto_id) ) +FC_REFLECT( graphene::app::market_volume, (time)(base)(quote)(base_volume)(quote_volume) ) FC_REFLECT( graphene::app::market_trade, (sequence)(date)(price)(amount)(value)(type) - (side1_account_id)(side2_account_id)); + (side1_account_id)(side2_account_id) ) FC_REFLECT_DERIVED( graphene::app::extended_asset_object, (graphene::chain::asset_object), - (total_in_collateral)(total_backing_collateral) ); + (total_in_collateral)(total_backing_collateral) ) +FC_REFLECT_DERIVED( graphene::app::extended_liquidity_pool_object, (graphene::chain::liquidity_pool_object), + (statistics) ) diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index 5d9be78b92..f3f54ae54e 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -72,7 +72,9 @@ namespace graphene { namespace app { uint64_t api_limit_get_trade_history_by_sequence = 100; uint64_t api_limit_get_withdraw_permissions_by_giver = 101; uint64_t api_limit_get_withdraw_permissions_by_recipient = 101; + uint64_t api_limit_get_tickets = 101; uint64_t api_limit_get_liquidity_pools = 101; + uint64_t api_limit_get_liquidity_pool_history = 101; }; class application diff --git a/libraries/app/include/graphene/app/database_api.hpp b/libraries/app/include/graphene/app/database_api.hpp index 6df0d49433..6b397f1882 100644 --- a/libraries/app/include/graphene/app/database_api.hpp +++ b/libraries/app/include/graphene/app/database_api.hpp @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -119,6 +120,7 @@ class database_api * - lookup_accounts * - get_full_accounts * - get_htlc + * - get_liquidity_pools * - get_liquidity_pools_by_share_asset * * Note: auto-subscription is enabled by default @@ -597,30 +599,31 @@ class database_api vector get_top_markets(uint32_t limit)const; /** - * @brief Returns recent trades for the market base:quote, ordered by time, most recent first. - * Note: Currently, timezone offsets are not supported. The time must be UTC. The range is [stop, start). - * In case when there are more than 100 trades occurred in the same second, this API only returns - * the first 100 records, can use another API @ref get_trade_history_by_sequence to query for the rest. + * @brief Get market transactions occurred in the market base:quote, ordered by time, most recent first. * @param base symbol or ID of the base asset * @param quote symbol or ID of the quote asset - * @param start Start time as a UNIX timestamp, the latest trade to retrieve - * @param stop Stop time as a UNIX timestamp, the earliest trade to retrieve - * @param limit Number of trasactions to retrieve, capped at 100. - * @return Recent transactions in the market + * @param start Start time as a UNIX timestamp, the latest transactions to retrieve + * @param stop Stop time as a UNIX timestamp, the earliest transactions to retrieve + * @param limit Maximum quantity of transactions to retrieve, capped at 100. + * @return Transactions in the market + * @note The time must be UTC, timezone offsets are not supported. The range is [stop, start]. + * In case when there are more than 100 transactions occurred in the same second, + * this API only returns the most recent 100 records, the rest records can be retrieved + * with the @ref get_trade_history_by_sequence API. */ vector get_trade_history( const string& base, const string& quote, fc::time_point_sec start, fc::time_point_sec stop, unsigned limit = 100 )const; /** - * @brief Returns trades for the market base:quote, ordered by time, most recent first. - * Note: Currently, timezone offsets are not supported. The time must be UTC. The range is [stop, start). + * @brief Get market transactions occurred in the market base:quote, ordered by time, most recent first. * @param base symbol or ID of the base asset * @param quote symbol or ID of the quote asset - * @param start Start sequence as an Integer, the latest trade to retrieve - * @param stop Stop time as a UNIX timestamp, the earliest trade to retrieve - * @param limit Number of trasactions to retrieve, capped at 100 + * @param start Start sequence as an Integer, the latest transaction to retrieve + * @param stop Stop time as a UNIX timestamp, the earliest transactions to retrieve + * @param limit Maximum quantity of transactions to retrieve, capped at 100 * @return Transactions in the market + * @note The time must be UTC, timezone offsets are not supported. The range is [stop, start]. */ vector get_trade_history_by_sequence( const string& base, const string& quote, int64_t start, fc::time_point_sec stop, @@ -631,11 +634,29 @@ class database_api // Liquidity pools // ///////////////////// + /** + * @brief Get a list of liquidity pools + * @param limit The limitation of items each query can fetch, not greater than a configured value + * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param with_statistics Whether to return statistics + * @return The liquidity pools + * + * @note + * 1. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of pools + * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle + */ + vector list_liquidity_pools( + optional limit = 101, + optional start_id = optional(), + optional with_statistics = false )const; + /** * @brief Get a list of liquidity pools by the symbol or ID of the first asset in the pool * @param asset_symbol_or_id symbol name or ID of the asset * @param limit The limitation of items each query can fetch, not greater than a configured value * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note @@ -644,16 +665,18 @@ class database_api * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ - vector get_liquidity_pools_by_asset_a( + vector get_liquidity_pools_by_asset_a( std::string asset_symbol_or_id, optional limit = 101, - optional start_id = optional() )const; + optional start_id = optional(), + optional with_statistics = false )const; /** * @brief Get a list of liquidity pools by the symbol or ID of the second asset in the pool * @param asset_symbol_or_id symbol name or ID of the asset * @param limit The limitation of items each query can fetch, not greater than a configured value * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note @@ -662,10 +685,11 @@ class database_api * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ - vector get_liquidity_pools_by_asset_b( + vector get_liquidity_pools_by_asset_b( std::string asset_symbol_or_id, optional limit = 101, - optional start_id = optional() )const; + optional start_id = optional(), + optional with_statistics = false )const; /** * @brief Get a list of liquidity pools by the symbols or IDs of the two assets in the pool @@ -673,6 +697,7 @@ class database_api * @param asset_symbol_or_id_b symbol name or ID of the other asset * @param limit The limitation of items each query can fetch, not greater than a configured value * @param start_id Start liquidity pool id, fetch pools whose IDs are greater than or equal to this ID + * @param with_statistics Whether to return statistics * @return The liquidity pools * * @note @@ -682,11 +707,29 @@ class database_api * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle */ - vector get_liquidity_pools_by_both_assets( + vector get_liquidity_pools_by_both_assets( std::string asset_symbol_or_id_a, std::string asset_symbol_or_id_b, optional limit = 101, - optional start_id = optional() )const; + optional start_id = optional(), + optional with_statistics = false )const; + + /** + * @brief Get a list of liquidity pools by their IDs + * @param ids IDs of the liquidity pools + * @param subscribe @a true to subscribe to the queried objects; @a false to not subscribe; + * @a null to subscribe or not subscribe according to current auto-subscription setting + * (see @ref set_auto_subscription) + * @param with_statistics Whether to return statistics + * @return The liquidity pools + * + * @note if an ID in the list can not be found, + * the corresponding data in the returned list is null. + */ + vector> get_liquidity_pools( + const vector& ids, + optional subscribe = optional(), + optional with_statistics = false )const; /** * @brief Get a list of liquidity pools by their share asset symbols or IDs @@ -694,14 +737,36 @@ class database_api * @param subscribe @a true to subscribe to the queried objects; @a false to not subscribe; * @a null to subscribe or not subscribe according to current auto-subscription setting * (see @ref set_auto_subscription) + * @param with_statistics Whether to return statistics * @return The liquidity pools that the assets are for * * @note if an asset in the list can not be found or is not a share asset of any liquidity pool, * the corresponding data in the returned list is null. */ - vector> get_liquidity_pools_by_share_asset( + vector> get_liquidity_pools_by_share_asset( const vector& asset_symbols_or_ids, - optional subscribe = optional() )const; + optional subscribe = optional(), + optional with_statistics = false )const; + + /** + * @brief Get a list of liquidity pools by the name or ID of the owner account + * @param account_name_or_id name or ID of the owner account + * @param limit The limitation of items each query can fetch, not greater than a configured value + * @param start_id Start share asset id, fetch pools whose share asset IDs are greater than or equal to this ID + * @param with_statistics Whether to return statistics + * @return The liquidity pools + * + * @note + * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of pools + * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + */ + vector get_liquidity_pools_by_owner( + std::string account_name_or_id, + optional limit = 101, + optional start_id = optional(), + optional with_statistics = false )const; /////////////// // Witnesses // @@ -991,6 +1056,43 @@ class database_api vector list_htlcs(const htlc_id_type start, uint32_t limit) const; + ///////////// + // Tickets // + ///////////// + + /** + * @brief Get a list of tickets + * @param limit The limitation of items each query can fetch, not greater than a configured value + * @param start_id Start ticket id, fetch tickets whose IDs are greater than or equal to this ID + * @return The tickets + * + * @note + * 1. @p limit can be omitted or be null, if so the default value 101 will be used + * 2. @p start_id can be omitted or be null, if so the api will return the "first page" of tickets + * 3. can only omit one or more arguments in the end of the list, but not one or more in the middle + */ + vector list_tickets( + optional limit = 101, + optional start_id = optional() )const; + + /** + * @brief Get a list of tickets by the name or ID of the owner account + * @param account_name_or_id name or ID of the owner account + * @param limit The limitation of items each query can fetch, not greater than a configured value + * @param start_id Start ticket id, fetch tickets whose IDs are greater than or equal to this ID + * @return The tickets + * + * @note + * 1. if @p account_name_or_id cannot be tied to an account, an error will be returned + * 2. @p limit can be omitted or be null, if so the default value 101 will be used + * 3. @p start_id can be omitted or be null, if so the api will return the "first page" of tickets + * 4. can only omit one or more arguments in the end of the list, but not one or more in the middle + */ + vector get_tickets_by_account( + std::string account_name_or_id, + optional limit = 101, + optional start_id = optional() )const; + private: std::shared_ptr< database_api_impl > my; }; @@ -1074,10 +1176,13 @@ FC_API(graphene::app::database_api, (get_trade_history_by_sequence) // Liquidity pools + (list_liquidity_pools) (get_liquidity_pools_by_asset_a) (get_liquidity_pools_by_asset_b) (get_liquidity_pools_by_both_assets) + (get_liquidity_pools) (get_liquidity_pools_by_share_asset) + (get_liquidity_pools_by_owner) // Witnesses (get_witnesses) @@ -1125,4 +1230,8 @@ FC_API(graphene::app::database_api, (get_htlc_by_from) (get_htlc_by_to) (list_htlcs) + + // Tickets + (list_tickets) + (get_tickets_by_account) ) diff --git a/libraries/app/include/graphene/app/plugin.hpp b/libraries/app/include/graphene/app/plugin.hpp index 45336f677c..b833d97feb 100644 --- a/libraries/app/include/graphene/app/plugin.hpp +++ b/libraries/app/include/graphene/app/plugin.hpp @@ -42,7 +42,7 @@ class abstract_plugin * * Plugins MUST supply a method initialize() which will be called early in the application startup. This method * should contain early setup code such as initializing variables, adding indexes to the database, registering - * callback methods from the database, adding APIs, etc., as well as applying any options in the @ref options map + * callback methods from the database, adding APIs, etc., as well as applying any options in the @p options map * * This method is called BEFORE the database is open, therefore any routines which require any chain state MUST * NOT be called by this method. These routines should be performed in startup() instead. @@ -120,7 +120,7 @@ class plugin : public abstract_plugin application* _app = nullptr; }; -/// @group Some useful tools for boost::program_options arguments using vectors of JSON strings +/// @ingroup Some useful tools for boost::program_options arguments using vectors of JSON strings /// @{ template T dejsonify(const string& s, uint32_t max_depth) @@ -138,7 +138,7 @@ namespace impl { #define DEFAULT_VALUE_VECTOR(value) default_value({fc::json::to_string(value)}, fc::json::to_string(value)) #define LOAD_VALUE_SET(options, name, container, type) \ -if( options.count(name) ) { \ +if( options.count(name) > 0 ) { \ const std::vector& ops = options[name].as>(); \ std::transform(ops.begin(), ops.end(), std::inserter(container, container.end()), &graphene::app::impl::dejsonify); \ } diff --git a/libraries/chain/asset_evaluator.cpp b/libraries/chain/asset_evaluator.cpp index 24cb4c4d8b..b48af41d1b 100644 --- a/libraries/chain/asset_evaluator.cpp +++ b/libraries/chain/asset_evaluator.cpp @@ -946,13 +946,13 @@ void_result asset_update_feed_producers_evaluator::do_apply(const asset_update_f //First, remove any old publishers who are no longer publishers for( auto itr = a.feeds.begin(); itr != a.feeds.end(); ) { - if( !o.new_feed_producers.count(itr->first) ) + if( o.new_feed_producers.count(itr->first) == 0 ) itr = a.feeds.erase(itr); else ++itr; } //Now, add any new publishers - for( const account_id_type acc : o.new_feed_producers ) + for( const account_id_type& acc : o.new_feed_producers ) { a.feeds[acc]; } @@ -1126,17 +1126,17 @@ void_result asset_publish_feeds_evaluator::do_evaluate(const asset_publish_feed_ //Verify that the publisher is authoritative to publish a feed if( base.options.flags & witness_fed_asset ) { - FC_ASSERT( d.get(GRAPHENE_WITNESS_ACCOUNT).active.account_auths.count(o.publisher), + FC_ASSERT( d.get(GRAPHENE_WITNESS_ACCOUNT).active.account_auths.count(o.publisher) > 0, "Only active witnesses are allowed to publish price feeds for this asset" ); } else if( base.options.flags & committee_fed_asset ) { - FC_ASSERT( d.get(GRAPHENE_COMMITTEE_ACCOUNT).active.account_auths.count(o.publisher), + FC_ASSERT( d.get(GRAPHENE_COMMITTEE_ACCOUNT).active.account_auths.count(o.publisher) > 0, "Only active committee members are allowed to publish price feeds for this asset" ); } else { - FC_ASSERT( bitasset.feeds.count(o.publisher), + FC_ASSERT( bitasset.feeds.count(o.publisher) > 0, "The account is not in the set of allowed price feed producers of this asset" ); } diff --git a/libraries/chain/db_maint.cpp b/libraries/chain/db_maint.cpp index ee9a9ecd79..8e7de32c85 100644 --- a/libraries/chain/db_maint.cpp +++ b/libraries/chain/db_maint.cpp @@ -412,6 +412,7 @@ void database::initialize_budget_record( fc::time_point_sec now, budget_record& rec.from_initial_reserve = core.reserved(*this); rec.from_accumulated_fees = core_dd.accumulated_fees; rec.from_unused_witness_budget = dpo.witness_budget; + rec.max_supply = core.options.max_supply; if( (dpo.last_budget_time == fc::time_point_sec()) || (now <= dpo.last_budget_time) ) @@ -536,6 +537,7 @@ void database::process_budget() _dpo.last_budget_time = now; }); + rec.current_supply = core.current_supply; create< budget_record_object >( [&]( budget_record_object& _rec ) { _rec.time = head_block_time(); diff --git a/libraries/chain/include/graphene/chain/asset_object.hpp b/libraries/chain/include/graphene/chain/asset_object.hpp index cb6a8ae386..2b364521cf 100644 --- a/libraries/chain/include/graphene/chain/asset_object.hpp +++ b/libraries/chain/include/graphene/chain/asset_object.hpp @@ -34,7 +34,7 @@ * A prediction market is a specialized BitAsset such that total debt and total collateral are always equal amounts * (although asset IDs differ). No margin calls or force settlements may be performed on a prediction market asset. A * prediction market is globally settled by the issuer after the event being predicted resolves, thus a prediction - * market must always have the @ref global_settle permission enabled. The maximum price for global settlement or short + * market must always have the @c global_settle permission enabled. The maximum price for global settlement or short * sale of a prediction market asset is 1-to-1. */ diff --git a/libraries/chain/include/graphene/chain/budget_record_object.hpp b/libraries/chain/include/graphene/chain/budget_record_object.hpp index 15eab0a6cc..84ada7a816 100644 --- a/libraries/chain/include/graphene/chain/budget_record_object.hpp +++ b/libraries/chain/include/graphene/chain/budget_record_object.hpp @@ -31,26 +31,36 @@ struct budget_record { uint64_t time_since_last_budget = 0; - // sources of budget + /// Sources of budget + ///@{ share_type from_initial_reserve = 0; share_type from_accumulated_fees = 0; share_type from_unused_witness_budget = 0; + ///@} - // witness budget requested by the committee + /// Witness budget requested by the committee share_type requested_witness_budget = 0; - // funds that can be released from reserve at maximum rate + /// Funds that can be released from reserve at maximum rate share_type total_budget = 0; - // sinks of budget, should sum up to total_budget + /// Sinks of budget, should sum up to total_budget + ///@{ share_type witness_budget = 0; share_type worker_budget = 0; + ///@} - // unused budget + /// Unused budget share_type leftover_worker_funds = 0; - // change in supply due to budget operations + /// Change in supply due to budget operations share_type supply_delta = 0; + + /// Maximum supply + share_type max_supply; + + /// Current supply + share_type current_supply; }; class budget_record_object : public graphene::db::abstract_object diff --git a/libraries/chain/include/graphene/chain/config.hpp b/libraries/chain/include/graphene/chain/config.hpp index 19c86354d9..cb0b381dc7 100644 --- a/libraries/chain/include/graphene/chain/config.hpp +++ b/libraries/chain/include/graphene/chain/config.hpp @@ -30,7 +30,7 @@ #define GRAPHENE_MAX_NESTED_OBJECTS (200) -#define GRAPHENE_CURRENT_DB_VERSION "20200910" +#define GRAPHENE_CURRENT_DB_VERSION "20201105" #define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4 #define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3 diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index dce314d363..3429625598 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -109,6 +109,7 @@ namespace graphene { namespace chain { /** * @brief Rebuild object graph from block history and open detabase + * @param data_dir the path to store the database * * This method may be called after or instead of @ref database::open, and will rebuild the object graph by * replaying blockchain history. When this method exits successfully, the database will be open. @@ -117,6 +118,7 @@ namespace graphene { namespace chain { /** * @brief wipe Delete database from disk, and potentially the raw chain as well. + * @param data_dir the path to store the database * @param include_blocks If true, delete the raw chain as well as the database. * * Will close the database before wiping. Database will be closed when this function returns. @@ -330,7 +332,7 @@ namespace graphene { namespace chain { void deposit_market_fee_vesting_balance(const account_id_type &account_id, const asset &delta); /** * @brief Retrieve a particular account's market fee vesting balance in a given asset - * @param owner Account whose balance should be retrieved + * @param account_id Account whose balance should be retrieved * @param asset_id ID of the asset to get balance in * @return owner's balance in asset */ @@ -370,7 +372,8 @@ namespace graphene { namespace chain { //////////////////// db_market.cpp //////////////////// - /// @{ @group Market Helpers + /// @ingroup Market Helpers + /// @{ void globally_settle_asset( const asset_object& bitasset, const price& settle_price ); void cancel_settle_order(const force_settlement_object& order, bool create_virtual_op = true); void cancel_limit_order(const limit_order_object& order, bool create_virtual_op = true, bool skip_cancel_fee = false); @@ -387,7 +390,8 @@ namespace graphene { namespace chain { public: /** * @brief Process a new limit order through the markets - * @param order The new order to process + * @param new_order_object The new order to process + * @param allow_black_swan whether to allow a black swan event * @return true if order was completely filled; false otherwise * * This function takes a new limit order, and runs the markets attempting to match it with existing orders @@ -686,23 +690,4 @@ namespace graphene { namespace chain { ///@} }; - namespace detail - { - template - struct seq { }; - - template - struct gen_seq : gen_seq { }; - - template - struct gen_seq<0, Is...> : seq { }; - - template - void for_each(T&& t, const account_object& a, seq) - { - auto l = { (std::get(t)(a), 0)... }; - (void)l; - } - } - } } diff --git a/libraries/chain/include/graphene/chain/liquidity_pool_evaluator.hpp b/libraries/chain/include/graphene/chain/liquidity_pool_evaluator.hpp index 6d8497c189..e7de47be76 100644 --- a/libraries/chain/include/graphene/chain/liquidity_pool_evaluator.hpp +++ b/libraries/chain/include/graphene/chain/liquidity_pool_evaluator.hpp @@ -82,6 +82,8 @@ namespace graphene { namespace chain { const asset_dynamic_data_object* _share_asset_dyn_data = nullptr; asset _pool_pays_a; asset _pool_pays_b; + asset _fee_a; + asset _fee_b; }; class liquidity_pool_exchange_evaluator : public evaluator @@ -100,6 +102,7 @@ namespace graphene { namespace chain { asset _account_receives; asset _maker_market_fee; asset _taker_market_fee; + asset _pool_taker_fee; }; } } // graphene::chain diff --git a/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp b/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp index 82a4b72943..90fffeac47 100644 --- a/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp +++ b/libraries/chain/include/graphene/chain/liquidity_pool_object.hpp @@ -107,6 +107,15 @@ typedef generic_index li MAP_OBJECT_ID_TO_TYPE( graphene::chain::liquidity_pool_object ) -FC_REFLECT_TYPENAME( graphene::chain::liquidity_pool_object ) +FC_REFLECT_DERIVED( graphene::chain::liquidity_pool_object, (graphene::db::object), + (asset_a) + (asset_b) + (balance_a) + (balance_b) + (share_asset) + (taker_fee_percent) + (withdrawal_fee_percent) + (virtual_value) + ) GRAPHENE_DECLARE_EXTERNAL_SERIALIZATION( graphene::chain::liquidity_pool_object ) diff --git a/libraries/chain/include/graphene/chain/ticket_object.hpp b/libraries/chain/include/graphene/chain/ticket_object.hpp index d6c18e93e1..16610dccf7 100644 --- a/libraries/chain/include/graphene/chain/ticket_object.hpp +++ b/libraries/chain/include/graphene/chain/ticket_object.hpp @@ -117,7 +117,7 @@ class ticket_object : public abstract_object }; struct by_next_update; -struct by_account_type; +struct by_account; /** * @ingroup object_index @@ -132,10 +132,9 @@ typedef multi_index_container< member< object, object_id_type, &object::id> > >, - ordered_unique< tag, + ordered_unique< tag, composite_key< ticket_object, member< ticket_object, account_id_type, &ticket_object::account>, - member< ticket_object, ticket_type, &ticket_object::current_type>, member< object, object_id_type, &object::id> > > diff --git a/libraries/chain/include/graphene/chain/types.hpp b/libraries/chain/include/graphene/chain/types.hpp index ec3e788219..1ec4a4a0ce 100644 --- a/libraries/chain/include/graphene/chain/types.hpp +++ b/libraries/chain/include/graphene/chain/types.hpp @@ -27,22 +27,24 @@ namespace graphene { namespace chain { using namespace protocol; } } +/// Object types in the Implementation Space (enum impl_object_type (2.x.x)) GRAPHENE_DEFINE_IDS(chain, implementation_ids, impl_, - (global_property) - (dynamic_global_property) - (reserved0) - (asset_dynamic_data) - (asset_bitasset_data) - (account_balance) - (account_statistics) - (transaction_history) - (block_summary) - (account_transaction_history) - (blinded_balance) - (chain_property) - (witness_schedule) - (budget_record) - (special_authority) - (buyback) - (fba_accumulator) - (collateral_bid)) + /* 2.0.x */ (global_property) + /* 2.1.x */ (dynamic_global_property) + /* 2.2.x */ (reserved0) // unused, but can not be simply deleted due to API compatibility + /* 2.3.x */ (asset_dynamic_data) + /* 2.4.x */ (asset_bitasset_data) + /* 2.5.x */ (account_balance) + /* 2.6.x */ (account_statistics) + /* 2.7.x */ (transaction_history) + /* 2.8.x */ (block_summary) + /* 2.9.x */ (account_transaction_history) + /* 2.10.x */ (blinded_balance) + /* 2.11.x */ (chain_property) + /* 2.12.x */ (witness_schedule) + /* 2.13.x */ (budget_record) + /* 2.14.x */ (special_authority) + /* 2.15.x */ (buyback) + /* 2.16.x */ (fba_accumulator) + /* 2.17.x */ (collateral_bid) + ) diff --git a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp index b8f6b1f54f..a16bcd1156 100644 --- a/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp +++ b/libraries/chain/include/graphene/chain/withdraw_permission_object.hpp @@ -36,7 +36,8 @@ namespace graphene { namespace chain { * @brief Grants another account authority to withdraw a limited amount of funds per interval * * The primary purpose of this object is to enable recurring payments on the blockchain. An account which wishes to - * process a recurring payment may use a @ref withdraw_permission_claim_operation to reference an object of this type + * process a recurring payment may use a @ref graphene::protocol::withdraw_permission_claim_operation + * to reference an object of this type * and withdraw up to @ref withdrawal_limit from @ref withdraw_from_account. Only @ref authorized_account may do * this. Any number of withdrawals may be made so long as the total amount withdrawn per period does not exceed the * limit for any given period. diff --git a/libraries/chain/include/graphene/chain/worker_object.hpp b/libraries/chain/include/graphene/chain/worker_object.hpp index 3855a9f34f..2836b0a382 100644 --- a/libraries/chain/include/graphene/chain/worker_object.hpp +++ b/libraries/chain/include/graphene/chain/worker_object.hpp @@ -48,8 +48,8 @@ class database; * To create a new worker type, define a my_new_worker_type struct with a pay_worker method which updates the * my_new_worker_type object and/or the database. Create a my_new_worker_type::initializer struct with an init * method and any data members necessary to create a new worker of this type. Reflect my_new_worker_type and - * my_new_worker_type::initializer into FC's type system, and add them to @ref worker_type and @ref - * worker_initializer respectively. Make sure the order of types in @ref worker_type and @ref worker_initializer + * my_new_worker_type::initializer into FC's type system, and add them to @ref worker_type and @c + * worker_initializer respectively. Make sure the order of types in @ref worker_type and @c worker_initializer * remains the same. * @{ */ diff --git a/libraries/chain/is_authorized_asset.cpp b/libraries/chain/is_authorized_asset.cpp index a8c3dc71a7..8eeff66a1c 100644 --- a/libraries/chain/is_authorized_asset.cpp +++ b/libraries/chain/is_authorized_asset.cpp @@ -52,7 +52,7 @@ bool _is_authorized_asset( // must still pass other checks even if it is in allowed_assets } - for( const auto id : acct.blacklisting_accounts ) + for( const auto& id : acct.blacklisting_accounts ) { if( asset_obj.options.blacklist_authorities.find(id) != asset_obj.options.blacklist_authorities.end() ) return false; @@ -61,7 +61,7 @@ bool _is_authorized_asset( if( asset_obj.options.whitelist_authorities.size() == 0 ) return true; - for( const auto id : acct.whitelisting_accounts ) + for( const auto& id : acct.whitelisting_accounts ) { if( asset_obj.options.whitelist_authorities.find(id) != asset_obj.options.whitelist_authorities.end() ) return true; diff --git a/libraries/chain/liquidity_pool_evaluator.cpp b/libraries/chain/liquidity_pool_evaluator.cpp index 607cedc7a0..07fba7d53c 100644 --- a/libraries/chain/liquidity_pool_evaluator.cpp +++ b/libraries/chain/liquidity_pool_evaluator.cpp @@ -241,6 +241,8 @@ void_result liquidity_pool_withdraw_evaluator::do_evaluate(const liquidity_pool_ { _pool_pays_a = asset( _pool->balance_a, _pool->asset_a ); _pool_pays_b = asset( _pool->balance_b, _pool->asset_b ); + _fee_a = asset( 0, _pool->asset_a ); + _fee_b = asset( 0, _pool->asset_b ); } else { @@ -258,6 +260,8 @@ void_result liquidity_pool_withdraw_evaluator::do_evaluate(const liquidity_pool_ FC_ASSERT( a128 > 0 || b128 > 0, "Aborting due to zero outcome" ); _pool_pays_a = asset( static_cast( a128 ), _pool->asset_a ); _pool_pays_b = asset( static_cast( b128 ), _pool->asset_b ); + _fee_a = asset( static_cast( fee_a ), _pool->asset_a ); + _fee_b = asset( static_cast( fee_b ), _pool->asset_b ); } return void_result(); @@ -292,6 +296,8 @@ generic_exchange_operation_result liquidity_pool_withdraw_evaluator::do_apply( result.paid.emplace_back( op.share_amount ); result.received.emplace_back( _pool_pays_a ); result.received.emplace_back( _pool_pays_b ); + result.fees.emplace_back( _fee_a ); + result.fees.emplace_back( _fee_b ); return result; } FC_CAPTURE_AND_RETHROW( (op) ) } @@ -354,6 +360,8 @@ void_result liquidity_pool_exchange_evaluator::do_evaluate(const liquidity_pool_ FC_ASSERT( _account_receives.amount >= op.min_to_receive.amount, "Unable to exchange at expected price" ); + _pool_taker_fee = asset( static_cast( pool_taker_fee ), op.min_to_receive.asset_id ); + return void_result(); } FC_CAPTURE_AND_RETHROW( (op) ) } @@ -374,7 +382,7 @@ generic_exchange_operation_result liquidity_pool_exchange_evaluator::do_apply( const auto old_virtual_value = _pool->virtual_value; if( op.amount_to_sell.asset_id == _pool->asset_a ) { - d.modify( *_pool, [&op,this]( liquidity_pool_object& lpo ){ + d.modify( *_pool, [this]( liquidity_pool_object& lpo ){ lpo.balance_a += _pool_receives.amount; lpo.balance_b -= _pool_pays.amount; lpo.update_virtual_value(); @@ -382,7 +390,7 @@ generic_exchange_operation_result liquidity_pool_exchange_evaluator::do_apply( } else { - d.modify( *_pool, [&op,this]( liquidity_pool_object& lpo ){ + d.modify( *_pool, [this]( liquidity_pool_object& lpo ){ lpo.balance_b += _pool_receives.amount; lpo.balance_a -= _pool_pays.amount; lpo.update_virtual_value(); @@ -396,6 +404,7 @@ generic_exchange_operation_result liquidity_pool_exchange_evaluator::do_apply( result.received.emplace_back( _account_receives ); result.fees.emplace_back( _maker_market_fee ); result.fees.emplace_back( _taker_market_fee ); + result.fees.emplace_back( _pool_taker_fee ); return result; } FC_CAPTURE_AND_RETHROW( (op) ) } diff --git a/libraries/chain/proposal_evaluator.cpp b/libraries/chain/proposal_evaluator.cpp index f3db44aaf2..fa4576a68a 100644 --- a/libraries/chain/proposal_evaluator.cpp +++ b/libraries/chain/proposal_evaluator.cpp @@ -306,8 +306,8 @@ void_result proposal_create_evaluator::do_evaluate( const proposal_create_operat "Proposals containing operations requiring non-account authorities are not yet implemented." ); // If we're dealing with the committee authority, make sure this transaction has a sufficient review period. - if( _required_active_auths.count( GRAPHENE_COMMITTEE_ACCOUNT ) || - _required_owner_auths.count( GRAPHENE_COMMITTEE_ACCOUNT ) ) + if( _required_active_auths.count( GRAPHENE_COMMITTEE_ACCOUNT ) > 0 || + _required_owner_auths.count( GRAPHENE_COMMITTEE_ACCOUNT ) > 0 ) { GRAPHENE_ASSERT( o.review_period_seconds.valid(), proposal_create_review_period_required, diff --git a/libraries/chain/small_objects.cpp b/libraries/chain/small_objects.cpp index 8868306ef6..238fc8e84e 100644 --- a/libraries/chain/small_objects.cpp +++ b/libraries/chain/small_objects.cpp @@ -63,6 +63,8 @@ FC_REFLECT_DERIVED_NO_TYPENAME( (worker_budget) (leftover_worker_funds) (supply_delta) + (max_supply) + (current_supply) ) FC_REFLECT_DERIVED_NO_TYPENAME( @@ -198,17 +200,6 @@ FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::custom_authority_object, (graph (account)(enabled)(valid_from)(valid_to)(operation_type) (auth)(restrictions)(restriction_counter) ) -FC_REFLECT_DERIVED_NO_TYPENAME( graphene::chain::liquidity_pool_object, (graphene::db::object), - (asset_a) - (asset_b) - (balance_a) - (balance_b) - (share_asset) - (taker_fee_percent) - (withdrawal_fee_percent) - (virtual_value) - ) - GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::balance_object ) GRAPHENE_IMPLEMENT_EXTERNAL_SERIALIZATION( graphene::chain::block_summary_object ) diff --git a/libraries/chain/ticket_evaluator.cpp b/libraries/chain/ticket_evaluator.cpp index 24eef0ee33..9e4deeee06 100644 --- a/libraries/chain/ticket_evaluator.cpp +++ b/libraries/chain/ticket_evaluator.cpp @@ -138,9 +138,9 @@ generic_operation_result ticket_update_evaluator::do_apply(const ticket_update_o generic_operation_result process_result = d.process_tickets(); result.removed_objects.insert( process_result.removed_objects.begin(), process_result.removed_objects.end() ); result.updated_objects.insert( process_result.updated_objects.begin(), process_result.updated_objects.end() ); - for( const auto id : result.new_objects ) + for( const auto& id : result.new_objects ) result.updated_objects.erase( id ); - for( const auto id : result.removed_objects ) + for( const auto& id : result.removed_objects ) result.updated_objects.erase( id ); return result; diff --git a/libraries/db/undo_database.cpp b/libraries/db/undo_database.cpp index 3e340728e3..ff078f34b6 100644 --- a/libraries/db/undo_database.cpp +++ b/libraries/db/undo_database.cpp @@ -90,18 +90,18 @@ void undo_database::on_remove( const object& obj ) if( _stack.empty() ) _stack.emplace_back(); undo_state& state = _stack.back(); - if( state.new_ids.count(obj.id) ) + if( state.new_ids.count(obj.id) > 0 ) { state.new_ids.erase(obj.id); return; } - if( state.old_values.count(obj.id) ) + if( state.old_values.count(obj.id) > 0 ) { state.removed[obj.id] = std::move(state.old_values[obj.id]); state.old_values.erase(obj.id); return; } - if( state.removed.count(obj.id) ) return; + if( state.removed.count(obj.id) > 0 ) return; state.removed[obj.id] = obj.clone(); } diff --git a/libraries/egenesis/seed-nodes.txt b/libraries/egenesis/seed-nodes.txt index a64ddd193a..cace834c9e 100644 --- a/libraries/egenesis/seed-nodes.txt +++ b/libraries/egenesis/seed-nodes.txt @@ -2,10 +2,9 @@ "seed01.liondani.com:1776", // liondani (Germany) "bts.lafona.net:1776", // lafona (France) "bts-seed1.abit-more.com:62015", // abit (China) -"seed.blckchnd.com:4243", // blckchnd (Germany) "seed.roelandp.nl:1776", // roelandp (Canada) -"seed.bts.bangzi.info:55501", // Bangzi (Germany) "seed1.xbts.io:1776", // xbts.io (Germany) "seed2.xbts.io:1776", // xbts.io (Germany) +"seed1.bitshares.im:1776", // clone (USA) "seed.bitshares.org:666", // bitshares.org (France) "seeds.btsnodes.com:1776", // Community diff --git a/libraries/fc b/libraries/fc index 7d0411252a..07ef37736a 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 7d0411252a76ac57837763200788901cd30c7e6f +Subproject commit 07ef37736a5685b09443b51175bf291a38931f7b diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 43aa94a6e9..0d4d5159d9 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -71,7 +71,9 @@ namespace graphene { namespace net { /** * @brief Called when a new block comes in from the network * + * @param blk_msg the message which contains the block * @param sync_mode true if the message was fetched through the sync process, false during normal operation + * @param contained_transaction_message_ids container for the transactions to write back into * @returns true if this message caused the blockchain to switch forks, false if it did not * * @throws exception if error validating the item, otherwise the item is diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index a00e43dcbf..88fdba62f5 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -339,4 +339,4 @@ FC_REFLECT_ENUM(graphene::net::peer_connection::connection_negotiation_status, ( (closing) (closed) ) -FC_REFLECT( graphene::net::peer_connection::timestamped_item_id, (item)(timestamp)); +FC_REFLECT( graphene::net::peer_connection::timestamped_item_id, (item)(timestamp) ) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index f2fd144488..16d16e5468 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -880,7 +880,7 @@ namespace graphene { namespace net { namespace detail { fc::time_point handshaking_disconnect_threshold = fc::time_point::now() - fc::seconds(handshaking_timeout); { fc::scoped_lock lock(_handshaking_connections.get_mutex()); - for( const peer_connection_ptr handshaking_peer : _handshaking_connections ) + for( const peer_connection_ptr& handshaking_peer : _handshaking_connections ) { if( handshaking_peer->connection_initiation_time < handshaking_disconnect_threshold && handshaking_peer->get_last_message_received_time() < handshaking_disconnect_threshold && @@ -1279,7 +1279,7 @@ namespace graphene { namespace net { namespace detail { } { fc::scoped_lock lock(_active_connections.get_mutex()); - for (const peer_connection_ptr active_peer : _active_connections) + for (const peer_connection_ptr& active_peer : _active_connections) { if (node_id == active_peer->node_id) { @@ -1290,7 +1290,7 @@ namespace graphene { namespace net { namespace detail { } { fc::scoped_lock lock(_handshaking_connections.get_mutex()); - for (const peer_connection_ptr handshaking_peer : _handshaking_connections) + for (const peer_connection_ptr& handshaking_peer : _handshaking_connections) if (node_id == handshaking_peer->node_id) { dlog("is_already_connected_to_id returning true because the peer is already in our handshaking list"); @@ -2471,7 +2471,7 @@ namespace graphene { namespace net { namespace detail { bool we_requested_this_item_from_a_peer = false; { fc::scoped_lock lock(_active_connections.get_mutex()); - for (const peer_connection_ptr peer : _active_connections) + for (const peer_connection_ptr& peer : _active_connections) { if (peer->inventory_advertised_to_peer.find(advertised_item_id) != peer->inventory_advertised_to_peer.end()) { diff --git a/libraries/plugins/README.md b/libraries/plugins/README.md index 68414c32cd..e7e2f9b499 100644 --- a/libraries/plugins/README.md +++ b/libraries/plugins/README.md @@ -4,7 +4,9 @@ The bitshares plugins are a collection of tools that brings new functionality wi The main source of I/O of the bitshares blockchain is the API. Plugins are a more powerful alternative to build more complex developments for when the current API is not enough. -Plugins are optional to run by node operator according to their needs. However, all plugins here will be compiled. There are plans for optional build of plugins at: [Issue 533](https://github.com/bitshares/bitshares-core/issues/533) +Plugins are optional to run by node operator according to their needs. However, all plugins here will be compiled. There are plans for optional build of plugins at: [Issue 533](https://github.com/bitshares/bitshares-core/issues/533). + +The [make_new_plugin.sh](make_new_plugin.sh) script can be used to create a skeleton of a new plugin quickly from a [template](template_plugin). # Available Plugins diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index 47878ba006..5f9e4c67f6 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -327,13 +327,13 @@ void account_history_plugin::plugin_initialize(const boost::program_options::var database().add_index< primary_index< account_transaction_history_index > >(); LOAD_VALUE_SET(options, "track-account", my->_tracked_accounts, graphene::chain::account_id_type); - if (options.count("partial-operations")) { + if (options.count("partial-operations") > 0) { my->_partial_operations = options["partial-operations"].as(); } - if (options.count("max-ops-per-account")) { + if (options.count("max-ops-per-account") > 0) { my->_max_ops_per_account = options["max-ops-per-account"].as(); } - if (options.count("extended-max-ops-per-account")) { + if (options.count("extended-max-ops-per-account") > 0) { auto emopa = options["extended-max-ops-per-account"].as(); my->_extended_max_ops_per_account = (emopa > my->_max_ops_per_account) ? emopa : my->_max_ops_per_account; } diff --git a/libraries/plugins/custom_operations/custom_evaluators.cpp b/libraries/plugins/custom_operations/custom_evaluators.cpp index 0b300b5369..bcc183f612 100644 --- a/libraries/plugins/custom_operations/custom_evaluators.cpp +++ b/libraries/plugins/custom_operations/custom_evaluators.cpp @@ -77,7 +77,7 @@ vector custom_generic_evaluator::do_apply(const account_storage_ else { try { - _db->modify(*itr, [&op, this, &row](account_storage_object &aso) { + _db->modify(*itr, [&row](account_storage_object &aso) { if(row.second.valid()) aso.value = fc::json::from_string(*row.second); else diff --git a/libraries/plugins/custom_operations/custom_operations_plugin.cpp b/libraries/plugins/custom_operations/custom_operations_plugin.cpp index 64d5959d90..70f898fa49 100644 --- a/libraries/plugins/custom_operations/custom_operations_plugin.cpp +++ b/libraries/plugins/custom_operations/custom_operations_plugin.cpp @@ -140,7 +140,7 @@ void custom_operations_plugin::plugin_initialize(const boost::program_options::v { database().add_index< primary_index< account_storage_index > >(); - if (options.count("custom-operations-start-block")) { + if (options.count("custom-operations-start-block") > 0) { my->_start_block = options["custom-operations-start-block"].as(); } diff --git a/libraries/plugins/debug_witness/debug_witness.cpp b/libraries/plugins/debug_witness/debug_witness.cpp index 7268006d3b..ea7f209fa6 100644 --- a/libraries/plugins/debug_witness/debug_witness.cpp +++ b/libraries/plugins/debug_witness/debug_witness.cpp @@ -62,7 +62,7 @@ void debug_witness_plugin::plugin_initialize(const boost::program_options::varia ilog("debug_witness plugin: plugin_initialize() begin"); _options = &options; - if( options.count("debug-private-key") ) + if( options.count("debug-private-key") > 0 ) { const std::vector key_id_to_wif_pair_strings = options["debug-private-key"].as>(); for (const std::string& key_id_to_wif_pair_string : key_id_to_wif_pair_strings) diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 80bb62cb81..2860057f3a 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -483,34 +483,34 @@ void elasticsearch_plugin::plugin_initialize(const boost::program_options::varia my->_oho_index = database().add_index< primary_index< operation_history_index > >(); database().add_index< primary_index< account_transaction_history_index > >(); - if (options.count("elasticsearch-node-url")) { + if (options.count("elasticsearch-node-url") > 0) { my->_elasticsearch_node_url = options["elasticsearch-node-url"].as(); } - if (options.count("elasticsearch-bulk-replay")) { + if (options.count("elasticsearch-bulk-replay") > 0) { my->_elasticsearch_bulk_replay = options["elasticsearch-bulk-replay"].as(); } - if (options.count("elasticsearch-bulk-sync")) { + if (options.count("elasticsearch-bulk-sync") > 0) { my->_elasticsearch_bulk_sync = options["elasticsearch-bulk-sync"].as(); } - if (options.count("elasticsearch-visitor")) { + if (options.count("elasticsearch-visitor") > 0) { my->_elasticsearch_visitor = options["elasticsearch-visitor"].as(); } - if (options.count("elasticsearch-basic-auth")) { + if (options.count("elasticsearch-basic-auth") > 0) { my->_elasticsearch_basic_auth = options["elasticsearch-basic-auth"].as(); } - if (options.count("elasticsearch-index-prefix")) { + if (options.count("elasticsearch-index-prefix") > 0) { my->_elasticsearch_index_prefix = options["elasticsearch-index-prefix"].as(); } - if (options.count("elasticsearch-operation-object")) { + if (options.count("elasticsearch-operation-object") > 0) { my->_elasticsearch_operation_object = options["elasticsearch-operation-object"].as(); } - if (options.count("elasticsearch-start-es-after-block")) { + if (options.count("elasticsearch-start-es-after-block") > 0) { my->_elasticsearch_start_es_after_block = options["elasticsearch-start-es-after-block"].as(); } - if (options.count("elasticsearch-operation-string")) { + if (options.count("elasticsearch-operation-string") > 0) { my->_elasticsearch_operation_string = options["elasticsearch-operation-string"].as(); } - if (options.count("elasticsearch-mode")) { + if (options.count("elasticsearch-mode") > 0) { const auto option_number = options["elasticsearch-mode"].as(); if(option_number > mode::all) FC_THROW_EXCEPTION(graphene::chain::plugin_exception, "Elasticsearch mode not valid"); diff --git a/libraries/plugins/es_objects/es_objects.cpp b/libraries/plugins/es_objects/es_objects.cpp index a2c332e9f1..9aec0f574d 100644 --- a/libraries/plugins/es_objects/es_objects.cpp +++ b/libraries/plugins/es_objects/es_objects.cpp @@ -320,43 +320,43 @@ void es_objects_plugin::plugin_set_program_options( void es_objects_plugin::plugin_initialize(const boost::program_options::variables_map& options) { - if (options.count("es-objects-elasticsearch-url")) { + if (options.count("es-objects-elasticsearch-url") > 0) { my->_es_objects_elasticsearch_url = options["es-objects-elasticsearch-url"].as(); } - if (options.count("es-objects-auth")) { + if (options.count("es-objects-auth") > 0) { my->_es_objects_auth = options["es-objects-auth"].as(); } - if (options.count("es-objects-bulk-replay")) { + if (options.count("es-objects-bulk-replay") > 0) { my->_es_objects_bulk_replay = options["es-objects-bulk-replay"].as(); } - if (options.count("es-objects-bulk-sync")) { + if (options.count("es-objects-bulk-sync") > 0) { my->_es_objects_bulk_sync = options["es-objects-bulk-sync"].as(); } - if (options.count("es-objects-proposals")) { + if (options.count("es-objects-proposals") > 0) { my->_es_objects_proposals = options["es-objects-proposals"].as(); } - if (options.count("es-objects-accounts")) { + if (options.count("es-objects-accounts") > 0) { my->_es_objects_accounts = options["es-objects-accounts"].as(); } - if (options.count("es-objects-assets")) { + if (options.count("es-objects-assets") > 0) { my->_es_objects_assets = options["es-objects-assets"].as(); } - if (options.count("es-objects-balances")) { + if (options.count("es-objects-balances") > 0) { my->_es_objects_balances = options["es-objects-balances"].as(); } - if (options.count("es-objects-limit-orders")) { + if (options.count("es-objects-limit-orders") > 0) { my->_es_objects_limit_orders = options["es-objects-limit-orders"].as(); } - if (options.count("es-objects-asset-bitasset")) { + if (options.count("es-objects-asset-bitasset") > 0) { my->_es_objects_asset_bitasset = options["es-objects-asset-bitasset"].as(); } - if (options.count("es-objects-index-prefix")) { + if (options.count("es-objects-index-prefix") > 0) { my->_es_objects_index_prefix = options["es-objects-index-prefix"].as(); } - if (options.count("es-objects-keep-only-current")) { + if (options.count("es-objects-keep-only-current") > 0) { my->_es_objects_keep_only_current = options["es-objects-keep-only-current"].as(); } - if (options.count("es-objects-start-es-after-block")) { + if (options.count("es-objects-start-es-after-block") > 0) { my->_es_objects_start_es_after_block = options["es-objects-start-es-after-block"].as(); } @@ -405,4 +405,4 @@ void es_objects_plugin::plugin_startup() ilog("elasticsearch OBJECTS: plugin_startup() begin"); } -} } \ No newline at end of file +} } diff --git a/libraries/plugins/grouped_orders/grouped_orders_plugin.cpp b/libraries/plugins/grouped_orders/grouped_orders_plugin.cpp index 2e2542bef3..96705ca22d 100644 --- a/libraries/plugins/grouped_orders/grouped_orders_plugin.cpp +++ b/libraries/plugins/grouped_orders/grouped_orders_plugin.cpp @@ -270,7 +270,7 @@ void grouped_orders_plugin::plugin_set_program_options( void grouped_orders_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { - if( options.count( "tracked-groups" ) ) + if( options.count( "tracked-groups" ) > 0 ) { const std::string& groups = options["tracked-groups"].as(); my->_tracked_groups = fc::json::from_string(groups).as>( 2 ); diff --git a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp index e777136764..4007b11d15 100644 --- a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp +++ b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -53,7 +54,10 @@ enum market_history_object_type order_history_object_type = 0, bucket_object_type = 1, market_ticker_object_type = 2, - market_ticker_meta_object_type = 3 + market_ticker_meta_object_type = 3, + liquidity_pool_history_object_type = 4, + liquidity_pool_ticker_meta_object_type = 5, + liquidity_pool_ticker_object_type = 6 }; struct bucket_key @@ -217,6 +221,152 @@ typedef generic_index hist typedef generic_index market_ticker_index; +/** Stores operation histories related to liquidity pools */ +struct liquidity_pool_history_object : public abstract_object +{ + static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static constexpr uint8_t type_id = liquidity_pool_history_object_type; + + liquidity_pool_id_type pool; + uint64_t sequence = 0; + fc::time_point_sec time; + int64_t op_type; + operation_history_object op; +}; + +struct by_pool_seq; +struct by_pool_time; +struct by_pool_op_type_seq; +struct by_pool_op_type_time; + +typedef multi_index_container< + liquidity_pool_history_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > >, + ordered_unique< tag, + composite_key< liquidity_pool_history_object, + member, + member + >, + composite_key_compare< + std::less< liquidity_pool_id_type >, + std::greater< uint64_t > + > + >, + ordered_unique< tag, + composite_key< liquidity_pool_history_object, + member, + member, + member + >, + composite_key_compare< + std::less< liquidity_pool_id_type >, + std::greater< time_point_sec >, + std::greater< uint64_t > + > + >, + ordered_unique< tag, + composite_key< liquidity_pool_history_object, + member, + member, + member + >, + composite_key_compare< + std::less< liquidity_pool_id_type >, + std::less< int64_t >, + std::greater< uint64_t > + > + >, + ordered_unique< tag, + composite_key< liquidity_pool_history_object, + member, + member, + member, + member + >, + composite_key_compare< + std::less< liquidity_pool_id_type >, + std::less< int64_t >, + std::greater< time_point_sec >, + std::greater< uint64_t > + > + > + > +> liquidity_pool_history_multi_index_type; + +typedef generic_index< liquidity_pool_history_object, + liquidity_pool_history_multi_index_type > liquidity_pool_history_index; + + +/// Stores meta data for liquidity pool tickers +struct liquidity_pool_ticker_meta_object : public abstract_object +{ + static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static constexpr uint8_t type_id = liquidity_pool_ticker_meta_object_type; + + object_id_type rolling_min_lp_his_id; + bool skip_min_lp_his_id = false; +}; + +using liquidity_pool_ticker_id_type = object_id; + +/// Stores ticker data for liquidity pools +struct liquidity_pool_ticker_object : public abstract_object +{ + static constexpr uint8_t space_id = MARKET_HISTORY_SPACE_ID; + static constexpr uint8_t type_id = liquidity_pool_ticker_object_type; + + uint32_t _24h_deposit_count = 0; + fc::uint128_t _24h_deposit_amount_a = 0; + fc::uint128_t _24h_deposit_amount_b = 0; + fc::uint128_t _24h_deposit_share_amount = 0; + uint32_t _24h_withdrawal_count = 0; + fc::uint128_t _24h_withdrawal_amount_a = 0; + fc::uint128_t _24h_withdrawal_amount_b = 0; + fc::uint128_t _24h_withdrawal_share_amount = 0; + fc::uint128_t _24h_withdrawal_fee_a = 0; + fc::uint128_t _24h_withdrawal_fee_b = 0; + uint32_t _24h_exchange_a2b_count = 0; + fc::uint128_t _24h_exchange_a2b_amount_a = 0; + fc::uint128_t _24h_exchange_a2b_amount_b = 0; + uint32_t _24h_exchange_b2a_count = 0; + fc::uint128_t _24h_exchange_b2a_amount_a = 0; + fc::uint128_t _24h_exchange_b2a_amount_b = 0; + fc::uint128_t _24h_exchange_fee_a = 0; + fc::uint128_t _24h_exchange_fee_b = 0; + share_type _24h_balance_delta_a; + share_type _24h_balance_delta_b; + uint64_t total_deposit_count = 0; + fc::uint128_t total_deposit_amount_a = 0; + fc::uint128_t total_deposit_amount_b = 0; + fc::uint128_t total_deposit_share_amount = 0; + uint64_t total_withdrawal_count = 0; + fc::uint128_t total_withdrawal_amount_a = 0; + fc::uint128_t total_withdrawal_amount_b = 0; + fc::uint128_t total_withdrawal_share_amount = 0; + fc::uint128_t total_withdrawal_fee_a = 0; + fc::uint128_t total_withdrawal_fee_b = 0; + uint64_t total_exchange_a2b_count = 0; + fc::uint128_t total_exchange_a2b_amount_a = 0; + fc::uint128_t total_exchange_a2b_amount_b = 0; + uint64_t total_exchange_b2a_count = 0; + fc::uint128_t total_exchange_b2a_amount_a = 0; + fc::uint128_t total_exchange_b2a_amount_b = 0; + fc::uint128_t total_exchange_fee_a = 0; + fc::uint128_t total_exchange_fee_b = 0; +}; + +typedef multi_index_container< + liquidity_pool_ticker_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > > + > +> liquidity_pool_ticker_multi_index_type; + +typedef generic_index< liquidity_pool_ticker_object, + liquidity_pool_ticker_multi_index_type > liquidity_pool_ticker_index; + + namespace detail { class market_history_plugin_impl; @@ -270,3 +420,47 @@ FC_REFLECT_DERIVED( graphene::market_history::market_ticker_object, (graphene::d (base_volume)(quote_volume) ) FC_REFLECT_DERIVED( graphene::market_history::market_ticker_meta_object, (graphene::db::object), (rolling_min_order_his_id)(skip_min_order_his_id) ) +FC_REFLECT_DERIVED( graphene::market_history::liquidity_pool_history_object, (graphene::db::object), + (pool)(sequence)(time)(op_type)(op) ) +FC_REFLECT_DERIVED( graphene::market_history::liquidity_pool_ticker_meta_object, (graphene::db::object), + (rolling_min_lp_his_id)(skip_min_lp_his_id) ) +FC_REFLECT_DERIVED( graphene::market_history::liquidity_pool_ticker_object, (graphene::db::object), + (_24h_deposit_count) + (_24h_deposit_amount_a) + (_24h_deposit_amount_b) + (_24h_deposit_share_amount) + (_24h_withdrawal_count) + (_24h_withdrawal_amount_a) + (_24h_withdrawal_amount_b) + (_24h_withdrawal_share_amount) + (_24h_withdrawal_fee_a) + (_24h_withdrawal_fee_b) + (_24h_exchange_a2b_count) + (_24h_exchange_a2b_amount_a) + (_24h_exchange_a2b_amount_b) + (_24h_exchange_b2a_count) + (_24h_exchange_b2a_amount_a) + (_24h_exchange_b2a_amount_b) + (_24h_exchange_fee_a) + (_24h_exchange_fee_b) + (_24h_balance_delta_a) + (_24h_balance_delta_b) + (total_deposit_count) + (total_deposit_amount_a) + (total_deposit_amount_b) + (total_deposit_share_amount) + (total_withdrawal_count) + (total_withdrawal_amount_a) + (total_withdrawal_amount_b) + (total_withdrawal_share_amount) + (total_withdrawal_fee_a) + (total_withdrawal_fee_b) + (total_exchange_a2b_count) + (total_exchange_a2b_amount_a) + (total_exchange_a2b_amount_b) + (total_exchange_b2a_count) + (total_exchange_b2a_amount_a) + (total_exchange_b2a_amount_b) + (total_exchange_fee_a) + (total_exchange_fee_b) + ) diff --git a/libraries/plugins/market_history/market_history_plugin.cpp b/libraries/plugins/market_history/market_history_plugin.cpp index 6b4a3558c4..ff69b7f144 100644 --- a/libraries/plugins/market_history/market_history_plugin.cpp +++ b/libraries/plugins/market_history/market_history_plugin.cpp @@ -52,6 +52,10 @@ class market_history_plugin_impl */ void update_market_histories( const signed_block& b ); + /// process all operations related to liquidity pools + void update_liquidity_pool_histories( time_point_sec time, const operation_history_object& oho, + const liquidity_pool_ticker_meta_object*& lp_meta ); + graphene::chain::database& database() { return _self.database(); @@ -80,7 +84,7 @@ struct operation_process_fill_order template void operator()( const T& )const{} - void operator()( const fill_order_operation& o )const + void operator()( const fill_order_operation& o )const { //ilog( "processing ${o}", ("o",o) ); auto& db = _plugin.database(); @@ -92,7 +96,7 @@ struct operation_process_fill_order history_key hkey; hkey.base = o.pays.asset_id; hkey.quote = o.receives.asset_id; - if( hkey.base > hkey.quote ) + if( hkey.base > hkey.quote ) std::swap( hkey.base, hkey.quote ); hkey.sequence = std::numeric_limits::min(); @@ -296,19 +300,29 @@ market_history_plugin_impl::~market_history_plugin_impl() void market_history_plugin_impl::update_market_histories( const signed_block& b ) { graphene::chain::database& db = database(); + const market_ticker_meta_object* _meta = nullptr; const auto& meta_idx = db.get_index_type>(); if( meta_idx.size() > 0 ) _meta = &( *meta_idx.begin() ); + + const liquidity_pool_ticker_meta_object* _lp_meta = nullptr; + const auto& lp_meta_idx = db.get_index_type>(); + if( lp_meta_idx.size() > 0 ) + _lp_meta = &( *lp_meta_idx.begin() ); + const vector >& hist = db.get_applied_operations(); for( const optional< operation_history_object >& o_op : hist ) { if( o_op.valid() ) { + // process market history try { o_op->op.visit( operation_process_fill_order( _self, b.timestamp, _meta ) ); } FC_CAPTURE_AND_LOG( (o_op) ) + // process liquidity pool history + update_liquidity_pool_histories( b.timestamp, *o_op, _lp_meta ); } } // roll out expired data from ticker @@ -381,8 +395,339 @@ void market_history_plugin_impl::update_market_histories( const signed_block& b } } } + // roll out expired data from LP ticker + if( _lp_meta != nullptr ) + { + time_point_sec last_day = b.timestamp - 86400; + object_id_type last_min_his_id = _lp_meta->rolling_min_lp_his_id; + bool skip = _lp_meta->skip_min_lp_his_id; + + const auto& history_idx = db.get_index_type().indices().get(); + auto history_itr = history_idx.lower_bound( _lp_meta->rolling_min_lp_his_id ); + while( history_itr != history_idx.end() && history_itr->time < last_day ) + { + if( skip && history_itr->id == _lp_meta->rolling_min_lp_his_id ) + skip = false; + else + { + liquidity_pool_ticker_id_type ticker_id( history_itr->pool.instance ); + const liquidity_pool_ticker_object* ticker = db.find( ticker_id ); + if( ticker != nullptr ) // should always be true + { + const operation_history_object& oho = history_itr->op; + if( oho.op.is_type< liquidity_pool_deposit_operation >() ) + { + auto& op = oho.op.get< liquidity_pool_deposit_operation >(); + auto& result = oho.result.get< generic_exchange_operation_result >(); + db.modify( *ticker, [&op,&result]( liquidity_pool_ticker_object& t ) { + t._24h_deposit_count -= 1; + t._24h_deposit_amount_a -= op.amount_a.amount.value; + t._24h_deposit_amount_b -= op.amount_b.amount.value; + t._24h_deposit_share_amount -= result.received.front().amount.value; + t._24h_balance_delta_a -= op.amount_a.amount.value; + t._24h_balance_delta_b -= op.amount_b.amount.value; + }); + } + else if( oho.op.is_type< liquidity_pool_withdraw_operation >() ) + { + auto& op = oho.op.get< liquidity_pool_withdraw_operation >(); + auto& result = oho.result.get< generic_exchange_operation_result >(); + db.modify( *ticker, [&op,&result]( liquidity_pool_ticker_object& t ) { + t._24h_withdrawal_count -= 1; + t._24h_withdrawal_amount_a -= result.received.front().amount.value; + t._24h_withdrawal_amount_b -= result.received.back().amount.value; + t._24h_withdrawal_share_amount -= op.share_amount.amount.value; + t._24h_withdrawal_fee_a -= result.fees.front().amount.value; + t._24h_withdrawal_fee_b -= result.fees.back().amount.value; + t._24h_balance_delta_a += result.received.front().amount.value; + t._24h_balance_delta_b += result.received.back().amount.value; + }); + } + else if( oho.op.is_type< liquidity_pool_exchange_operation >() ) + { + auto& op = oho.op.get< liquidity_pool_exchange_operation >(); + auto& result = oho.result.get< generic_exchange_operation_result >(); + db.modify( *ticker, [&op,&result]( liquidity_pool_ticker_object& t ) { + auto amount_in = op.amount_to_sell.amount - result.fees.front().amount; + auto amount_out = result.received.front().amount + result.fees.at(1).amount; + if( op.amount_to_sell.asset_id < op.min_to_receive.asset_id ) // pool got a, paid b + { + t._24h_exchange_a2b_count -= 1; + t._24h_exchange_a2b_amount_a -= amount_in.value; + t._24h_exchange_a2b_amount_b -= amount_out.value; + t._24h_exchange_fee_b -= result.fees.back().amount.value; + t._24h_balance_delta_a -= amount_in.value; + t._24h_balance_delta_b += amount_out.value; + } + else // pool got b, paid a + { + t._24h_exchange_b2a_count -= 1; + t._24h_exchange_b2a_amount_a -= amount_out.value; + t._24h_exchange_b2a_amount_b -= amount_in.value; + t._24h_exchange_fee_a -= result.fees.back().amount.value; + t._24h_balance_delta_a += amount_out.value; + t._24h_balance_delta_b -= amount_in.value; + } + }); + } + } + } + last_min_his_id = history_itr->id; + ++history_itr; + } + // update meta + if( history_itr != history_idx.end() ) // if still has some data rolling + { + if( history_itr->id != _lp_meta->rolling_min_lp_his_id ) // if rolled out some + { + db.modify( *_lp_meta, [history_itr]( liquidity_pool_ticker_meta_object& mtm ) { + mtm.rolling_min_lp_his_id = history_itr->id; + mtm.skip_min_lp_his_id = false; + }); + } + } + else // if all data are rolled out + { + if( !_lp_meta->skip_min_lp_his_id + || last_min_his_id != _lp_meta->rolling_min_lp_his_id ) // if rolled out some + { + db.modify( *_lp_meta, [last_min_his_id]( liquidity_pool_ticker_meta_object& mtm ) { + mtm.rolling_min_lp_his_id = last_min_his_id; + mtm.skip_min_lp_his_id = true; + }); + } + } + } } +struct get_liquidity_pool_id_visitor +{ + typedef optional result_type; + + /** do nothing for other operation types */ + template + result_type operator()( const T& )const + { + return {}; + } + + result_type operator()( const liquidity_pool_delete_operation& o )const + { + return o.pool; + } + + result_type operator()( const liquidity_pool_deposit_operation& o )const + { + return o.pool; + } + + result_type operator()( const liquidity_pool_withdraw_operation& o )const + { + return o.pool; + } + + result_type operator()( const liquidity_pool_exchange_operation& o )const + { + return o.pool; + } + +}; + +void market_history_plugin_impl::update_liquidity_pool_histories( + time_point_sec time, const operation_history_object& oho, + const liquidity_pool_ticker_meta_object*& lp_meta ) +{ try { + + optional pool; + uint64_t sequence = 0; + if( oho.op.is_type< liquidity_pool_create_operation >() ) + { + pool = *oho.result.get().new_objects.begin(); + sequence = 1; + } + else + { + pool = oho.op.visit( get_liquidity_pool_id_visitor() ); + } + + if( pool.valid() ) + { + auto& db = database(); + const auto& his_index = db.get_index_type().indices(); + const auto& his_seq_idx = his_index.get(); + const auto& his_time_idx = his_index.get(); + + if( sequence == 0 ) + { + auto itr = his_seq_idx.lower_bound( *pool ); + if( itr != his_seq_idx.end() && itr->pool == *pool ) + sequence = itr->sequence + 1; + else + sequence = 2; + } + + // To save new data + const auto& new_his_obj = db.create( [&pool,sequence,time,&oho]( + liquidity_pool_history_object& ho ) { + ho.pool = *pool; + ho.sequence = sequence; + ho.time = time; + ho.op_type = oho.op.which(); + ho.op = oho; + }); + + // save a reference to the ticker meta object + if( lp_meta == nullptr ) + { + const auto& lp_meta_idx = db.get_index_type>(); + if( lp_meta_idx.size() == 0 ) + lp_meta = &db.create( [&new_his_obj]( + liquidity_pool_ticker_meta_object& lptm ) { + lptm.rolling_min_lp_his_id = new_his_obj.id; + lptm.skip_min_lp_his_id = false; + }); + else + lp_meta = &( *lp_meta_idx.begin() ); + } + + // To remove old history data + if( sequence > _max_order_his_records_per_market ) + { + const auto min_seq = sequence - _max_order_his_records_per_market; + auto itr = his_seq_idx.lower_bound( std::make_tuple( *pool, min_seq ) ); + if( itr != his_seq_idx.end() && itr->pool == *pool ) + { + fc::time_point_sec min_time; + if( min_time + _max_order_his_seconds_per_market < time ) + min_time = time - _max_order_his_seconds_per_market; + auto time_itr = his_time_idx.lower_bound( std::make_tuple( *pool, min_time ) ); + if( time_itr != his_time_idx.end() && time_itr->pool == *pool ) + { + if( itr->sequence <= time_itr->sequence ) + { + while( itr != his_seq_idx.end() && itr->pool == *pool ) + { + auto old_itr = itr; + ++itr; + db.remove( *old_itr ); + } + } + else + { + while( time_itr != his_time_idx.end() && time_itr->pool == *pool ) + { + auto old_itr = time_itr; + ++time_itr; + db.remove( *old_itr ); + } + } + } + } + } + + // To update ticker data + if( sequence == 1 ) // create + { + const liquidity_pool_ticker_object* ticker = nullptr; + do { + ticker = &db.create( []( liquidity_pool_ticker_object& lpt ) { + }); + } while( ticker->id.instance() < pool->instance ); + } + else + { + liquidity_pool_ticker_id_type ticker_id( pool->instance ); + const liquidity_pool_ticker_object* ticker = db.find( ticker_id ); + if( ticker != nullptr ) + { + if( oho.op.is_type< liquidity_pool_deposit_operation >() ) + { + auto& op = oho.op.get< liquidity_pool_deposit_operation >(); + auto& result = oho.result.get< generic_exchange_operation_result >(); + + db.modify( *ticker, [&op,&result]( liquidity_pool_ticker_object& t ) { + t._24h_deposit_count += 1; + t._24h_deposit_amount_a += op.amount_a.amount.value; + t._24h_deposit_amount_b += op.amount_b.amount.value; + t._24h_deposit_share_amount += result.received.front().amount.value; + t._24h_balance_delta_a += op.amount_a.amount.value; + t._24h_balance_delta_b += op.amount_b.amount.value; + t.total_deposit_count += 1; + t.total_deposit_amount_a += op.amount_a.amount.value; + t.total_deposit_amount_b += op.amount_b.amount.value; + t.total_deposit_share_amount += result.received.front().amount.value; + }); + + } + else if( oho.op.is_type< liquidity_pool_withdraw_operation >() ) + { + auto& op = oho.op.get< liquidity_pool_withdraw_operation >(); + auto& result = oho.result.get< generic_exchange_operation_result >(); + + db.modify( *ticker, [&op,&result]( liquidity_pool_ticker_object& t ) { + t._24h_withdrawal_count += 1; + t._24h_withdrawal_amount_a += result.received.front().amount.value; + t._24h_withdrawal_amount_b += result.received.back().amount.value; + t._24h_withdrawal_share_amount += op.share_amount.amount.value; + t._24h_withdrawal_fee_a += result.fees.front().amount.value; + t._24h_withdrawal_fee_b += result.fees.back().amount.value; + t._24h_balance_delta_a -= result.received.front().amount.value; + t._24h_balance_delta_b -= result.received.back().amount.value; + t.total_withdrawal_count += 1; + t.total_withdrawal_amount_a += result.received.front().amount.value; + t.total_withdrawal_amount_b += result.received.back().amount.value; + t.total_withdrawal_share_amount += op.share_amount.amount.value; + t.total_withdrawal_fee_a += result.fees.front().amount.value; + t.total_withdrawal_fee_b += result.fees.back().amount.value; + }); + + } + else if( oho.op.is_type< liquidity_pool_exchange_operation >() ) + { + auto& op = oho.op.get< liquidity_pool_exchange_operation >(); + auto& result = oho.result.get< generic_exchange_operation_result >(); + + db.modify( *ticker, [&op,&result]( liquidity_pool_ticker_object& t ) { + auto amount_in = op.amount_to_sell.amount - result.fees.front().amount; + auto amount_out = result.received.front().amount + result.fees.at(1).amount; + if( op.amount_to_sell.asset_id < op.min_to_receive.asset_id ) // pool got a, paid b + { + t._24h_exchange_a2b_count += 1; + t._24h_exchange_a2b_amount_a += amount_in.value; + t._24h_exchange_a2b_amount_b += amount_out.value; + t._24h_exchange_fee_b += result.fees.back().amount.value; + t._24h_balance_delta_a += amount_in.value; + t._24h_balance_delta_b -= amount_out.value; + t.total_exchange_a2b_count += 1; + t.total_exchange_a2b_amount_a += amount_in.value; + t.total_exchange_a2b_amount_b += amount_out.value; + t.total_exchange_fee_b += result.fees.back().amount.value; + } + else // pool got b, paid a + { + t._24h_exchange_b2a_count += 1; + t._24h_exchange_b2a_amount_a += amount_out.value; + t._24h_exchange_b2a_amount_b += amount_in.value; + t._24h_exchange_fee_a += result.fees.back().amount.value; + t._24h_balance_delta_a -= amount_out.value; + t._24h_balance_delta_b += amount_in.value; + t.total_exchange_b2a_count += 1; + t.total_exchange_b2a_amount_a += amount_out.value; + t.total_exchange_b2a_amount_b += amount_in.value; + t.total_exchange_fee_a += result.fees.back().amount.value; + } + }); + + } + } + } + + + } + +} FC_CAPTURE_AND_LOG( (time)(oho) ) } + + } // end namespace detail @@ -411,13 +756,20 @@ void market_history_plugin::plugin_set_program_options( { cli.add_options() ("bucket-size", boost::program_options::value()->default_value("[60,300,900,1800,3600,14400,86400]"), - "Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers") + "Track market history by grouping orders into buckets of equal size measured " + "in seconds specified as a JSON array of numbers") ("history-per-size", boost::program_options::value()->default_value(1000), - "How far back in time to track history for each bucket size, measured in the number of buckets (default: 1000)") + "How far back in time to track history for each bucket size, " + "measured in the number of buckets (default: 1000)") ("max-order-his-records-per-market", boost::program_options::value()->default_value(1000), - "Will only store this amount of matched orders for each market in order history for querying, or those meet the other option, which has more data (default: 1000)") + "Will only store this amount of matched orders for each market in order history for querying, " + "or those meet the other option, which has more data (default: 1000). " + "This parameter is reused for liquidity pools as maximum operations per pool in history.") ("max-order-his-seconds-per-market", boost::program_options::value()->default_value(259200), - "Will only store matched orders in last X seconds for each market in order history for querying, or those meet the other option, which has more data (default: 259200 (3 days))") + "Will only store matched orders in last X seconds for each market in order history for querying, " + "or those meet the other option, which has more data (default: 259200 (3 days)). " + "This parameter is reused for liquidity pools as operations in last X seconds per pool in history. " + "Note: this parameter need to be greater than 24 hours to be able to serve market ticker data correctly.") ; cfg.add(cli); } @@ -425,22 +777,27 @@ void market_history_plugin::plugin_set_program_options( void market_history_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { database().applied_block.connect( [this]( const signed_block& b){ my->update_market_histories(b); } ); + database().add_index< primary_index< bucket_index > >(); database().add_index< primary_index< history_index > >(); database().add_index< primary_index< market_ticker_index > >(); database().add_index< primary_index< simple_index< market_ticker_meta_object > > >(); - if( options.count( "bucket-size" ) ) + database().add_index< primary_index< liquidity_pool_history_index > >(); + database().add_index< primary_index< simple_index< liquidity_pool_ticker_meta_object > > >(); + database().add_index< primary_index< liquidity_pool_ticker_index, 8 > >(); // 256 pools per chunk + + if( options.count( "bucket-size" ) > 0 ) { const std::string& buckets = options["bucket-size"].as(); my->_tracked_buckets = fc::json::from_string(buckets).as>(2); my->_tracked_buckets.erase( 0 ); } - if( options.count( "history-per-size" ) ) + if( options.count( "history-per-size" ) > 0 ) my->_maximum_history_per_bucket_size = options["history-per-size"].as(); - if( options.count( "max-order-his-records-per-market" ) ) + if( options.count( "max-order-his-records-per-market" ) > 0 ) my->_max_order_his_records_per_market = options["max-order-his-records-per-market"].as(); - if( options.count( "max-order-his-seconds-per-market" ) ) + if( options.count( "max-order-his-seconds-per-market" ) > 0 ) my->_max_order_his_seconds_per_market = options["max-order-his-seconds-per-market"].as(); } FC_CAPTURE_AND_RETHROW() } diff --git a/libraries/plugins/snapshot/snapshot.cpp b/libraries/plugins/snapshot/snapshot.cpp index f74ad5894a..a7e9d5138c 100644 --- a/libraries/plugins/snapshot/snapshot.cpp +++ b/libraries/plugins/snapshot/snapshot.cpp @@ -63,20 +63,22 @@ void snapshot_plugin::plugin_initialize(const boost::program_options::variables_ { try { ilog("snapshot plugin: plugin_initialize() begin"); - if( options.count(OPT_BLOCK_NUM) || options.count(OPT_BLOCK_TIME) ) + if( options.count(OPT_BLOCK_NUM) > 0 || options.count(OPT_BLOCK_TIME) > 0 ) { - FC_ASSERT( options.count(OPT_DEST), "Must specify snapshot-to in addition to snapshot-at-block or snapshot-at-time!" ); + FC_ASSERT( options.count(OPT_DEST) > 0, + "Must specify snapshot-to in addition to snapshot-at-block or snapshot-at-time!" ); dest = options[OPT_DEST].as(); - if( options.count(OPT_BLOCK_NUM) ) + if( options.count(OPT_BLOCK_NUM) > 0 ) snapshot_block = options[OPT_BLOCK_NUM].as(); - if( options.count(OPT_BLOCK_TIME) ) + if( options.count(OPT_BLOCK_TIME) > 0 ) snapshot_time = fc::time_point_sec::from_iso_string( options[OPT_BLOCK_TIME].as() ); database().applied_block.connect( [&]( const graphene::chain::signed_block& b ) { check_snapshot( b ); }); } else - FC_ASSERT( !options.count("snapshot-to"), "Must specify snapshot-at-block or snapshot-at-time in addition to snapshot-to!" ); + ilog("snapshot plugin is not enabled because neither snapshot-at-block nor snapshot-at-time is specified"); + ilog("snapshot plugin: plugin_initialize() end"); } FC_LOG_AND_RETHROW() } diff --git a/libraries/plugins/template_plugin/template_plugin.cpp b/libraries/plugins/template_plugin/template_plugin.cpp index 2296f25cef..7c2cfbc831 100644 --- a/libraries/plugins/template_plugin/template_plugin.cpp +++ b/libraries/plugins/template_plugin/template_plugin.cpp @@ -99,7 +99,7 @@ void template_plugin::plugin_initialize(const boost::program_options::variables_ my->onBlock(b); } ); - if (options.count("template_plugin")) { + if (options.count("template_plugin") > 0) { my->_plugin_option = options["template_plugin"].as(); } } diff --git a/libraries/plugins/witness/witness.cpp b/libraries/plugins/witness/witness.cpp index d2609625ab..88e1499086 100644 --- a/libraries/plugins/witness/witness.cpp +++ b/libraries/plugins/witness/witness.cpp @@ -122,7 +122,7 @@ void witness_plugin::plugin_initialize(const boost::program_options::variables_m _options = &options; LOAD_VALUE_SET(options, "witness-id", _witnesses, chain::witness_id_type) - if( options.count("private-key") ) + if( options.count("private-key") > 0 ) { const std::vector key_id_to_wif_pair_strings = options["private-key"].as>(); for (const std::string& key_id_to_wif_pair_string : key_id_to_wif_pair_strings) @@ -130,7 +130,7 @@ void witness_plugin::plugin_initialize(const boost::program_options::variables_m add_private_key(key_id_to_wif_pair_string); } } - if (options.count("private-key-file")) + if (options.count("private-key-file") > 0) { const std::vector key_id_to_wif_pair_files = options["private-key-file"].as>(); @@ -154,7 +154,7 @@ void witness_plugin::plugin_initialize(const boost::program_options::variables_m } } } - if(options.count("required-participation")) + if(options.count("required-participation") > 0) { auto required_participation = options["required-participation"].as(); FC_ASSERT(required_participation <= 100); @@ -217,7 +217,7 @@ void witness_plugin::stop_block_production() void witness_plugin::refresh_witness_key_cache() { const auto& db = database(); - for( const chain::witness_id_type wit_id : _witnesses ) + for( const chain::witness_id_type& wit_id : _witnesses ) { const chain::witness_object* wit_obj = db.find( wit_id ); if( wit_obj ) diff --git a/libraries/protocol/account.cpp b/libraries/protocol/account.cpp index 72c3adf5ed..e1822308be 100644 --- a/libraries/protocol/account.cpp +++ b/libraries/protocol/account.cpp @@ -29,20 +29,24 @@ namespace graphene { namespace protocol { /** * Names must comply with the following grammar (RFC 1035): + * @code * ::= | " " * ::=