diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index d4bc7244193..e20657a2db6 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -97,9 +97,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":darwin: Tests" agents: - "role=macos-tester" @@ -107,7 +104,7 @@ steps: - "mongod.log" - "build/genesis.json" - "build/config.ini" - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -117,9 +114,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: Tests" agents: - "role=linux-tester" @@ -131,7 +125,7 @@ steps: docker#v1.4.0: image: "eosio/ci:ubuntu" workdir: /data/job - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -141,9 +135,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: 18.04 Tests" agents: - "role=linux-tester" @@ -155,7 +146,7 @@ steps: docker#v1.4.0: image: "eosio/ci:ubuntu18" workdir: /data/job - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -165,9 +156,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":fedora: Tests" agents: - "role=linux-tester" @@ -179,7 +167,7 @@ steps: docker#v1.4.0: image: "eosio/ci:fedora" workdir: /data/job - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -189,9 +177,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":centos: Tests" agents: - "role=linux-tester" @@ -203,7 +188,7 @@ steps: docker#v1.4.0: image: "eosio/ci:centos" workdir: /data/job - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -213,9 +198,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":aws: Tests" agents: - "role=linux-tester" @@ -227,4 +209,4 @@ steps: docker#v1.4.0: image: "eosio/ci:amazonlinux" workdir: /data/job - timeout: 60 + timeout: 100 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index b26a20f569b..d35e401f7b6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -97,9 +97,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":darwin: Tests" agents: - "role=macos-tester" @@ -117,9 +114,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":darwin: NP Tests" agents: - "role=macos-tester" @@ -137,9 +131,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: Tests" agents: - "role=linux-tester" @@ -161,9 +152,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: NP Tests" agents: - "role=linux-tester" @@ -185,9 +173,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: 18.04 Tests" agents: - "role=linux-tester" @@ -209,9 +194,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: 18.04 NP Tests" agents: - "role=linux-tester" @@ -233,9 +215,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":fedora: Tests" agents: - "role=linux-tester" @@ -257,9 +236,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":fedora: NP Tests" agents: - "role=linux-tester" @@ -281,9 +257,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":centos: Tests" agents: - "role=linux-tester" @@ -305,9 +278,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":centos: NP Tests" agents: - "role=linux-tester" @@ -329,9 +299,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":aws: Tests" agents: - "role=linux-tester" @@ -353,9 +320,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":aws: NP Tests" agents: - "role=linux-tester" diff --git a/.buildkite/sanitizers.yml b/.buildkite/sanitizers.yml new file mode 100644 index 00000000000..db479d25697 --- /dev/null +++ b/.buildkite/sanitizers.yml @@ -0,0 +1,66 @@ +steps: + - command: | + echo "--- :hammer: Building with Undefined Sanitizer" && \ + /usr/bin/cmake -GNinja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_CXX_COMPILER=clang++-4.0 \ + -DCMAKE_C_COMPILER=clang-4.0 \ + -DBOOST_ROOT="${BOOST_ROOT}" \ + -DWASM_ROOT="${WASM_ROOT}" \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ + -DBUILD_MONGO_DB_PLUGIN=true \ + -DENABLE_COVERAGE_TESTING=true\ + -DBUILD_DOXYGEN=false -DCMAKE_CXX_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ + -DCMAKE_C_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ + -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" \ + -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" && \ + echo "--- :shinto_shrine: Running ninja" && \ + /usr/bin/ninja | tee ninja.log && \ + echo "--- :compression: Compressing build directory" && \ + tar -pczf build.tar.gz * + echo "--- :beers: Done" + label: ":_: Undefined Sanitizer" + agents: + - "role=automation-builder-large" + artifact_paths: + - "build.tar.gz" + - "ninja.log" + plugins: + docker#v1.4.0: + image: "eosio/ci:ubuntu18" + command: ["--privileged"] + workdir: /data/job + mounts: + - /etc/buildkite-agent/config:/config + environment: + - BOOST_ROOT=/root/opt/boost + - OPENSSL_ROOT_DIR=/usr/include/openssl + - WASM_ROOT=/root/opt/wasm + - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin + - CI=true + - UBSAN_OPTIONS=print_stacktrace=1 + timeout: 60 + + - wait + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":_: Undefined Sanitizer" && \ + tar -zxf build.tar.gz --no-same-owner && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + ctest -j8 -LE _tests -V -O sanitizer.log + label: ":_: Undefined Sanitizer Tests" + agents: + - "role=automation-builder-large" + artifact_paths: + - "mongod.log" + - "sanitizer.log" + plugins: + docker#v1.4.0: + image: "eosio/ci:ubuntu18" + workdir: /data/job + mounts: + - /etc/buildkite-agent/config:/config + timeout: 120 \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..9ffbb5ffb6b --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,20 @@ + + + +**Change Description** + + + +**Consensus Changes** + + + + +**API Changes** + + + + +**Documentation Additions** + + diff --git a/CMakeLists.txt b/CMakeLists.txt index 6f021d90f0d..3ec2ba0d0bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,8 +26,8 @@ set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 3) -set(VERSION_PATCH 2) +set(VERSION_MINOR 4) +set(VERSION_PATCH 0) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) @@ -186,6 +186,7 @@ if(ENABLE_COVERAGE_TESTING) find_program( GENHTML_PATH NAMES genhtml) endif() +include(utils) add_subdirectory( externals ) if ("${CORE_SYMBOL_NAME}" STREQUAL "") @@ -207,7 +208,6 @@ endif() message( STATUS "Using '${EOSIO_ROOT_KEY}' as public key for 'eosio' account" ) include(wasm) - add_subdirectory( libraries ) add_subdirectory( contracts ) add_subdirectory( plugins ) @@ -228,10 +228,38 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/testnet.template ${CMAKE_CURRENT_BINA configure_file(${CMAKE_CURRENT_SOURCE_DIR}/eosio.version.in ${CMAKE_CURRENT_BINARY_DIR}/eosio.version.hpp) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eosio.version.hpp DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/EosioTester.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/EosioTester.cmake @ONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/EosioTesterBuild.cmake.in ${CMAKE_BINARY_DIR}/lib/cmake/EosioTester.cmake @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/EosioTester.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/) - -include(installer) - +set(EOS_ROOT_DIR ${CMAKE_BINARY_DIR}) +configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/eosio-config.cmake.in ${CMAKE_BINARY_DIR}/lib/cmake/eosio/eosio-config.cmake @ONLY) +configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/EosioTesterBuild.cmake.in ${CMAKE_BINARY_DIR}/lib/cmake/eosio/EosioTester.cmake @ONLY) + +set(EOS_ROOT_DIR ${CMAKE_INSTALL_PREFIX}) +configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/eosio-config.cmake.in ${CMAKE_BINARY_DIR}/modules/eosio-config.cmake @ONLY) +install(FILES ${CMAKE_BINARY_DIR}/modules/eosio-config.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/eosio) +configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/EosioTester.cmake.in ${CMAKE_BINARY_DIR}/modules/EosioTester.cmake @ONLY) +install(FILES ${CMAKE_BINARY_DIR}/modules/EosioTester.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/eosio) + +configure_file(${CMAKE_SOURCE_DIR}/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/wabt/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.wabt COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/softfloat/COPYING.txt + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.softfloat COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/wasm-jit/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.wavm COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/secp256k1/upstream/COPYING + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.secp256k1 COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/src/network/LICENSE.go + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.go COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/externals/binaryen/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.binaryen COPYONLY) + +install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/) +install(FILES libraries/wabt/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wabt) +install(FILES libraries/softfloat/COPYING.txt DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.softfloat) +install(FILES libraries/wasm-jit/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wavm) +install(FILES libraries/fc/secp256k1/upstream/COPYING DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.secp256k1) +install(FILES externals/binaryen/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.binaryen) +install(FILES libraries/fc/src/network/LICENSE.go DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ ) + +include(package) include(doxygen) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index ed177f0f432..f47743fe5cb 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -1,4 +1,5 @@ cmake_minimum_required( VERSION 3.5 ) +message(STATUS "Setting up Eosio Tester @VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@ at @EOS_ROOT_DIR@") set(CMAKE_CXX_COMPILER @CMAKE_CXX_COMPILER@) set(CMAKE_C_COMPILER @CMAKE_C_COMPILER@) @@ -49,7 +50,6 @@ else() find_library(libsecp256k1 secp256k1 @CMAKE_INSTALL_FULL_LIBDIR@) endif() -find_library(libbinaryen binaryen @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwasm WASM @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwast WAST @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwabt wabt @CMAKE_INSTALL_FULL_LIBDIR@) @@ -76,7 +76,6 @@ macro(add_eosio_test test_name) ${libtester} ${libchain} ${libfc} - ${libbinaryen} ${libwast} ${libwasm} ${libwabt} diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 6f619632fe0..5618fe0d149 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -1,4 +1,5 @@ cmake_minimum_required( VERSION 3.5 ) +message(STATUS "Setting up Eosio Tester @VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@ at @EOS_ROOT_DIR@") set(CMAKE_CXX_COMPILER @CMAKE_CXX_COMPILER@) set(CMAKE_C_COMPILER @CMAKE_C_COMPILER@) @@ -50,7 +51,6 @@ else() find_library(libsecp256k1 secp256k1 @CMAKE_BINARY_DIR@/libraries/fc/secp256k1) endif() -find_library(libbinaryen binaryen @CMAKE_BINARY_DIR@/externals/binaryen/lib) find_library(libwasm WASM @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WASM) find_library(libwast WAST @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WAST) find_library(libir IR @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/IR) @@ -77,7 +77,6 @@ macro(add_eosio_test test_name) ${libtester} ${libchain} ${libfc} - ${libbinaryen} ${libwast} ${libwasm} ${libwabt} @@ -175,7 +174,7 @@ if(ENABLE_COVERAGE_TESTING) # Run tests COMMAND ./tools/ctestwrapper.sh -R ${ctest_tests} -E ${ctest_exclude_tests} - COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ./tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info + COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ${CMAKE_SOURCE_DIR}/tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info COMMAND ${LCOV_PATH} -remove ${Coverage_NAME}.info '*/boost/*' '/usr/lib/*' '/usr/include/*' '*/externals/*' '*/fc/*' '*/wasm-jit/*' --output-file ${Coverage_NAME}_filtered.info diff --git a/CMakeModules/eosio-config.cmake.in b/CMakeModules/eosio-config.cmake.in new file mode 100644 index 00000000000..97de49c4568 --- /dev/null +++ b/CMakeModules/eosio-config.cmake.in @@ -0,0 +1,96 @@ +if(EOSIO_ROOT STREQUAL "" OR NOT EOSIO_ROOT) + set(EOSIO_ROOT "@EOS_ROOT_DIR@") +endif() +list(APPEND CMAKE_MODULE_PATH ${EOSIO_ROOT}/lib/cmake/eosio) +include(EosioTester) + +function(EXTRACT_MAJOR_MINOR_FROM_VERSION version success major minor) + string(REGEX REPLACE "^([0-9]+)\\..+$" "\\1" _major "${version}") + if("${_major}" STREQUAL "${version}") + set(${success} FALSE PARENT_SCOPE) + return() + endif() + + string(REGEX REPLACE "^[0-9]+\\.([0-9]+)(\\..*)?$" "\\1" _minor "${version}") + if("${_minor}" STREQUAL "${version}") + set(success FALSE PARENT_SCOPE) + return() + endif() + + set(${major} ${_major} PARENT_SCOPE) + set(${minor} ${_minor} PARENT_SCOPE) + set(${success} TRUE PARENT_SCOPE) +endfunction(EXTRACT_MAJOR_MINOR_FROM_VERSION) + +function(EOSIO_CHECK_VERSION output version hard_min soft_max hard_max) # optional 6th argument for error message + set(${output} "INVALID" PARENT_SCOPE) + + EXTRACT_MAJOR_MINOR_FROM_VERSION("${version}" success major minor) + if(NOT success) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' is invalid" PARENT_SCOPE) + endif() + return() + endif() + + EXTRACT_MAJOR_MINOR_FROM_VERSION("${hard_min}" success hard_min_major hard_min_minor) + if(NOT success) + if(${ARGC} GREATER 5) + set(${ARGV5} "hard minimum version '${hard_min}' is invalid" PARENT_SCOPE) + endif() + return() + endif() + + if( "${major}.${minor}" VERSION_LESS "${hard_min_major}.${hard_min_minor}" ) + set(${output} "MISMATCH" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' does not meet hard minimum version requirement of ${hard_min_major}.${hard_min_minor}" PARENT_SCOPE) + endif() + return() + endif() + + if(NOT hard_max STREQUAL "") + EXTRACT_MAJOR_MINOR_FROM_VERSION("${hard_max}" success hard_max_major hard_max_minor) + if(NOT success) + if(${ARGC} GREATER 5) + set(${ARGV5} "hard maximum version '${hard_max}' is invalid" PARENT_SCOPE) + endif() + return() + endif() + + if( "${major}.${minor}" VERSION_GREATER "${hard_max_major}.${hard_max_minor}" ) + set(${output} "MISMATCH" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' does not meet hard maximum version requirement of ${hard_max_major}.${hard_max_minor}" PARENT_SCOPE) + endif() + return() + endif() + endif() + + EXTRACT_MAJOR_MINOR_FROM_VERSION("${soft_max}" success soft_max_major soft_max_minor) + if(NOT success) + set(${output} "MISMATCH" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "soft maximum version '${soft_max}' is invalid" PARENT_SCOPE) + endif() + return() + endif() + + if( ${major} GREATER ${soft_max_major} ) + set(${output} "MISMATCH" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' must have the same major version as the soft maximum version (${soft_max_major})" PARENT_SCOPE) + endif() + return() + endif() + + if( "${major}.${minor}" VERSION_GREATER "${soft_max_major}.${soft_max_minor}" ) + set(${output} "WARN" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' matches requirements but is greater than the soft maximum version of ${soft_max_major}.${soft_max_minor}" PARENT_SCOPE) + endif() + return() + endif() + + set(${output} "MATCH" PARENT_SCOPE) +endfunction(EOSIO_CHECK_VERSION) diff --git a/CMakeModules/package.cmake b/CMakeModules/package.cmake new file mode 100644 index 00000000000..895ce5459f3 --- /dev/null +++ b/CMakeModules/package.cmake @@ -0,0 +1,11 @@ +set(VENDOR "block.one") +set(PROJECT_NAME "eosio") +set(DESC "Software for the EOS.IO network") +set(URL "https://github.com/eosio/eos") +set(EMAIL "support@block.one") + +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_package.sh.in ${CMAKE_BINARY_DIR}/packages/generate_package.sh @ONLY) +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_bottle.sh ${CMAKE_BINARY_DIR}/packages/generate_bottle.sh COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_deb.sh ${CMAKE_BINARY_DIR}/packages/generate_deb.sh COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_rpm.sh ${CMAKE_BINARY_DIR}/packages/generate_rpm.sh COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_tarball.sh ${CMAKE_BINARY_DIR}/packages/generate_tarball.sh COPYONLY) diff --git a/CMakeModules/utils.cmake b/CMakeModules/utils.cmake new file mode 100644 index 00000000000..2b15f3d7d28 --- /dev/null +++ b/CMakeModules/utils.cmake @@ -0,0 +1,4 @@ +macro( copy_bin file ) + add_custom_command( TARGET ${file} POST_BUILD COMMAND mkdir -p ${CMAKE_BINARY_DIR}/bin ) + add_custom_command( TARGET ${file} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/${file} ${CMAKE_BINARY_DIR}/bin/ ) +endmacro( copy_bin ) diff --git a/Docker/README.md b/Docker/README.md index 55982cc7ee4..ae6c0add6fd 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.3.2 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.4.0 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.3.2 --build-arg branch=v1.3.2 . +docker build -t eosio/eos:v1.4.0 --build-arg branch=v1.4.0 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/Docker/config.ini b/Docker/config.ini index c821437ab96..d9871858f19 100644 --- a/Docker/config.ini +++ b/Docker/config.ini @@ -151,12 +151,6 @@ keosd-provider-timeout = 5 # Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block) (eosio::txn_test_gen_plugin) txn-reference-block-lag = 0 -# The path of the wallet files (absolute path or relative to application data dir) (eosio::wallet_plugin) -wallet-dir = "." - -# Timeout for unlocked wallet in seconds (default 900 (15 minutes)). Wallets will automatically lock after specified number of seconds of inactivity. Activity is defined as any wallet command e.g. list-wallets. (eosio::wallet_plugin) -unlock-timeout = 900 - # eosio key that will be imported automatically when a wallet is created. (eosio::wallet_plugin) # eosio-key = diff --git a/contracts/CMakeLists.txt b/contracts/CMakeLists.txt index c6eb0903d63..bd84d155e85 100644 --- a/contracts/CMakeLists.txt +++ b/contracts/CMakeLists.txt @@ -12,6 +12,7 @@ add_subdirectory(eosio.token) add_subdirectory(eosio.msig) add_subdirectory(eosio.sudo) add_subdirectory(multi_index_test) +add_subdirectory(snapshot_test) add_subdirectory(eosio.system) add_subdirectory(identity) add_subdirectory(stltest) @@ -30,7 +31,6 @@ add_subdirectory(test_ram_limit) #add_subdirectory(social) add_subdirectory(eosio.bios) add_subdirectory(noop) -add_subdirectory(dice) add_subdirectory(tic_tac_toe) add_subdirectory(payloadless) add_subdirectory(integration_test) diff --git a/contracts/dice/README.md b/contracts/dice/README.md deleted file mode 100644 index 0edd7d5ca34..00000000000 --- a/contracts/dice/README.md +++ /dev/null @@ -1,269 +0,0 @@ -DICE ------------------ - -This contract implements a simple DICE game between two players with 50/50 odds of winning. - -Before playing all players deposit funds into their @dice account just like the @exchange contract - -1. Player 1 proposes to bet 1 EOS and submits SHA256(secret1) -2. Player 2 proposes to bet 1 EOS and submits SHA256(secret2) - -Because Player 1 and 2 bet equal amounts their orders are matched and the game begins. - -3. A Player reveales their secret -4. A 5 minute deadline starts whereby the first to reveal automatically wins unless the other player reveals -5. The other player reveals and a winner is chosen and paid based upon the value of sha256( cat(secret1,secret2) ) -6. After the deadline anyone can trigger a default claim and the rewards - - -Economic Incentive for Interface Developers ------------------ - -A variation on this game would be to add an additional information on offer creation that will get paid -a commission when the player wins. With this commission in place there is financial incentive for a -service provider to continue to execute the game in a timely manner as well as provide quality and -entertaining interfaces on top of this game. - - -Other Games ------------ -This same basic model can be used to build more robust games. - - -Potential Vulnerabilities -------- -1. Block Producers may exclude reveal transaction -2. Losers may force winner to wait 5 minutes to get rewards -3. Service providers may fail to auto-reveal on your behalf -4. You may lose internet connectivity mid-game -5. A blockhain reorganization could cause some havock if secrets are revealed too quickly - - @dice could protect users by rejecting reveals until a game creation is irreversible (about 45 seconds max) - - users could take risk themselves by deciding how many confirmations are required - - for small amounts it probably doesn't matter - - under normal operation of DPOS chains there are few if any chain reorganizations - - -Example game session using cleos -------- -#### Prerequisites -* Wallet must be unlock and have at least the following private keys - - **5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3** - **5Jmsawgsp1tQ3GD6JyGCwy1dcvqKZgX6ugMVMdjirx85iv5VyPR** - -##### Upload bios contract -````bash -cleos set contract eosio build/contracts/eosio.bios -p eosio -```` - -##### Ceate eosio.token account -````bash -cleos create account eosio eosio.token EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 -```` - -##### Set eosio.token contract to eosio.token account -````bash -cleos set contract eosio.token build/contracts/eosio.token -p eosio.token -```` - -##### Create dice account -````bash -cleos create account eosio dice EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 -```` - -##### Set dice contract to dice account -````bash -cleos set contract dice build/contracts/dice -p dice -```` - -##### Create native EOS token -````bash -cleos push action eosio.token create '[ "eosio", "1000000000.0000 EOS", 0, 0, 0]' -p eosio.token -```` - -##### Create alice account -````bash -cleos create account eosio alice EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 -```` - -##### Create bob account -````bash -cleos create account eosio bob EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 -```` - -##### Issue 1000 EOS to alice -````bash -cleos push action eosio.token issue '[ "alice", "1000.0000 EOS", "" ]' -p eosio -```` - -##### Issue 1000 EOS to bob -````bash -cleos push action eosio.token issue '[ "bob", "1000.0000 EOS", "" ]' -p eosio -```` - -##### Allow dice contract to make transfers on alice behalf (deposit) -````bash -cleos set account permission alice active '{"threshold": 1,"keys": [{"key": "EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4","weight": 1}],"accounts": [{"permission":{"actor":"dice","permission":"active"},"weight":1}]}' owner -p alice -```` - -##### Allow dice contract to make transfers on bob behalf (deposit) -````bash -cleos set account permission bob active '{"threshold": 1,"keys": [{"key": "EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4","weight": 1}],"accounts": [{"permission":{"actor":"dice","permission":"active"},"weight":1}]}' owner -p bob -```` - -##### Alice deposits 100 EOS into the dice contract -````bash -cleos push action dice deposit '[ "alice", "100.0000 EOS" ]' -p alice -```` - -##### Bob deposits 100 EOS into the dice contract -````bash -cleos push action dice deposit '[ "bob", "100.0000 EOS" ]' -p bob -```` - -##### Alice generates a secret -````bash -openssl rand 32 -hex -28349b1d4bcdc9905e4ef9719019e55743c84efa0c5e9a0b077f0b54fcd84905 -```` - -##### Alice generates sha256(secret) -````bash -echo -n '28349b1d4bcdc9905e4ef9719019e55743c84efa0c5e9a0b077f0b54fcd84905' | xxd -r -p | sha256sum -b | awk '{print $1}' -d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883 -```` - -##### Alice bets 3 EOS -````bash -cleos push action dice offerbet '[ "3.0000 EOS", "alice", "d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883" ]' -p alice -```` - -##### Bob generates a secret -````bash -openssl rand 32 -hex -15fe76d25e124b08feb835f12e00a879bd15666a33786e64b655891fba7d6c12 -```` - -##### Bob generates sha256(secret) -````bash -echo -n '15fe76d25e124b08feb835f12e00a879bd15666a33786e64b655891fba7d6c12' | xxd -r -p | sha256sum -b | awk '{print $1}' -50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129 -```` - -##### Bob also bets 3 EOS (a game is started) -````bash -cleos push action dice offerbet '[ "3.0000 EOS", "bob", "50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129" ]' -p bob -```` - -##### Dice contract tables right after the game started -````bash -cleos get table dice dice account -```` -````json -{ - "rows": [{ - "owner": "alice", - "eos_balance": "97.0000 EOS", - "open_offers": 0, - "open_games": 1 - },{ - "owner": "bob", - "eos_balance": "97.0000 EOS", - "open_offers": 0, - "open_games": 1 - } - ], - "more": false -} -```` - -````bash -cleos get table dice dice game -```` -````json -{ - "rows": [{ - "id": 1, - "bet": "3.0000 EOS", - "deadline": "1970-01-01T00:00:00", - "player1": { - "commitment": "d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883", - "reveal": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "player2": { - "commitment": "50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129", - "reveal": "0000000000000000000000000000000000000000000000000000000000000000" - } - } - ], - "more": false -} -```` - -##### Bob reveals his secret -````bash -cleos push action dice reveal '[ "50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129", "15fe76d25e124b08feb835f12e00a879bd15666a33786e64b655891fba7d6c12" ]' -p bob -```` - -##### Game table after bob revealed (now the game has a deadline for alice to reveal) -````bash -cleos get table dice dice game -```` -````json -{ - "rows": [{ - "id": 1, - "bet": "3.0000 EOS", - "deadline": "2018-04-17T07:45:49", - "player1": { - "commitment": "d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883", - "reveal": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "player2": { - "commitment": "50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129", - "reveal": "15fe76d25e124b08feb835f12e00a879bd15666a33786e64b655891fba7d6c12" - } - } - ], - "more": false -} -```` - -##### Alice reveals her secret (the winner is determined, the game is removed) -````bash -cleos push action dice reveal '[ "d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883", "28349b1d4bcdc9905e4ef9719019e55743c84efa0c5e9a0b077f0b54fcd84905" ]' -p alice -```` - -##### Balance of the accounts after game ends -````bash -cleos get table dice dice account -```` -````json -{ - "rows": [{ - "owner": "alice", - "eos_balance": "103.0000 EOS", - "open_offers": 0, - "open_games": 0 - },{ - "owner": "bob", - "eos_balance": "97.0000 EOS", - "open_offers": 0, - "open_games": 0 - } - ], - "more": false -} -```` - -##### Alice withdraw from her dice account 103 EOS -````bash -cleos push action dice withdraw '[ "alice", "103.0000 EOS" ]' -p alice -```` - -##### Balance of alice after withdraw -````bash -cleos get currency balance eosio.token alice eos -1003.0000 EOS -```` - diff --git a/contracts/dice/dice.abi b/contracts/dice/dice.abi deleted file mode 100644 index ba47085f1db..00000000000 --- a/contracts/dice/dice.abi +++ /dev/null @@ -1,221 +0,0 @@ -{ - "version": "eosio::abi/1.0", - "types": [{ - "new_type_name": "account_name", - "type": "name" - }], - "structs": [{ - "name": "offer", - "base": "", - "fields": [{ - "name": "id", - "type": "uint64" - },{ - "name": "owner", - "type": "account_name" - },{ - "name": "bet", - "type": "asset" - },{ - "name": "commitment", - "type": "checksum256" - },{ - "name": "gameid", - "type": "uint64" - } - ] - },{ - "name": "player", - "base": "", - "fields": [{ - "name": "commitment", - "type": "checksum256" - },{ - "name": "reveal", - "type": "checksum256" - } - ] - },{ - "name": "game", - "base": "", - "fields": [{ - "name": "id", - "type": "uint64" - },{ - "name": "bet", - "type": "asset" - },{ - "name": "deadline", - "type": "time_point_sec" - },{ - "name": "player1", - "type": "player" - },{ - "name": "player2", - "type": "player" - } - ] - },{ - "name": "global_dice", - "base": "", - "fields": [{ - "name": "id", - "type": "uint64" - },{ - "name": "nextgameid", - "type": "uint64" - } - ] - },{ - "name": "account", - "base": "", - "fields": [{ - "name": "owner", - "type": "account_name" - },{ - "name": "eos_balance", - "type": "asset" - },{ - "name": "open_offers", - "type": "uint32" - },{ - "name": "open_games", - "type": "uint32" - } - ] - },{ - "name": "offerbet", - "base": "", - "fields": [{ - "name": "bet", - "type": "asset" - },{ - "name": "player", - "type": "account_name" - },{ - "name": "commitment", - "type": "checksum256" - } - ] - },{ - "name": "canceloffer", - "base": "", - "fields": [{ - "name": "commitment", - "type": "checksum256" - } - ] - },{ - "name": "reveal", - "base": "", - "fields": [{ - "name": "commitment", - "type": "checksum256" - },{ - "name": "source", - "type": "checksum256" - } - ] - },{ - "name": "claimexpired", - "base": "", - "fields": [{ - "name": "gameid", - "type": "uint64" - } - ] - },{ - "name": "deposit", - "base": "", - "fields": [{ - "name": "from", - "type": "account_name" - },{ - "name": "a", - "type": "asset" - } - ] - },{ - "name": "withdraw", - "base": "", - "fields": [{ - "name": "to", - "type": "account_name" - },{ - "name": "a", - "type": "asset" - } - ] - } - ], - "actions": [{ - "name": "offerbet", - "type": "offerbet", - "ricardian_contract": "" - },{ - "name": "canceloffer", - "type": "canceloffer", - "ricardian_contract": "" - },{ - "name": "reveal", - "type": "reveal", - "ricardian_contract": "" - },{ - "name": "claimexpired", - "type": "claimexpired", - "ricardian_contract": "" - },{ - "name": "deposit", - "type": "deposit", - "ricardian_contract": "" - },{ - "name": "withdraw", - "type": "withdraw", - "ricardian_contract": "" - } - ], - "tables": [{ - "name": "offer", - "index_type": "i64", - "key_names": [ - "id" - ], - "key_types": [ - "uint64" - ], - "type": "offer" - },{ - "name": "game", - "index_type": "i64", - "key_names": [ - "id" - ], - "key_types": [ - "uint64" - ], - "type": "game" - },{ - "name": "global", - "index_type": "i64", - "key_names": [ - "id" - ], - "key_types": [ - "uint64" - ], - "type": "global_dice" - },{ - "name": "account", - "index_type": "i64", - "key_names": [ - "owner" - ], - "key_types": [ - "account_name" - ], - "type": "account" - } - ], - "ricardian_clauses": [], - "abi_extensions": [] -} diff --git a/contracts/dice/dice.cpp b/contracts/dice/dice.cpp deleted file mode 100644 index 5dbe32b012a..00000000000 --- a/contracts/dice/dice.cpp +++ /dev/null @@ -1,391 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ -#include -#include -#include -#include -#include -#include -#include -#include - -using eosio::key256; -using eosio::indexed_by; -using eosio::const_mem_fun; -using eosio::asset; -using eosio::permission_level; -using eosio::action; -using eosio::print; -using eosio::name; - -class dice : public eosio::contract { - public: - const uint32_t FIVE_MINUTES = 5*60; - - dice(account_name self) - :eosio::contract(self), - offers(_self, _self), - games(_self, _self), - global_dices(_self, _self), - accounts(_self, _self) - {} - - //@abi action - void offerbet(const asset& bet, const account_name player, const checksum256& commitment) { - - eosio_assert( bet.symbol == CORE_SYMBOL, "only core token allowed" ); - eosio_assert( bet.is_valid(), "invalid bet" ); - eosio_assert( bet.amount > 0, "must bet positive quantity" ); - - eosio_assert( !has_offer( commitment ), "offer with this commitment already exist" ); - require_auth( player ); - - auto cur_player_itr = accounts.find( player ); - eosio_assert(cur_player_itr != accounts.end(), "unknown account"); - - // Store new offer - auto new_offer_itr = offers.emplace(_self, [&](auto& offer){ - offer.id = offers.available_primary_key(); - offer.bet = bet; - offer.owner = player; - offer.commitment = commitment; - offer.gameid = 0; - }); - - // Try to find a matching bet - auto idx = offers.template get_index(); - auto matched_offer_itr = idx.lower_bound( (uint64_t)new_offer_itr->bet.amount ); - - if( matched_offer_itr == idx.end() - || matched_offer_itr->bet != new_offer_itr->bet - || matched_offer_itr->owner == new_offer_itr->owner ) { - - // No matching bet found, update player's account - accounts.modify( cur_player_itr, 0, [&](auto& acnt) { - eosio_assert( acnt.eos_balance >= bet, "insufficient balance" ); - acnt.eos_balance -= bet; - acnt.open_offers++; - }); - - } else { - // Create global game counter if not exists - auto gdice_itr = global_dices.begin(); - if( gdice_itr == global_dices.end() ) { - gdice_itr = global_dices.emplace(_self, [&](auto& gdice){ - gdice.nextgameid=0; - }); - } - - // Increment global game counter - global_dices.modify(gdice_itr, 0, [&](auto& gdice){ - gdice.nextgameid++; - }); - - // Create a new game - auto game_itr = games.emplace(_self, [&](auto& new_game){ - new_game.id = gdice_itr->nextgameid; - new_game.bet = new_offer_itr->bet; - new_game.deadline = eosio::time_point_sec(0); - - new_game.player1.commitment = matched_offer_itr->commitment; - memset(&new_game.player1.reveal, 0, sizeof(checksum256)); - - new_game.player2.commitment = new_offer_itr->commitment; - memset(&new_game.player2.reveal, 0, sizeof(checksum256)); - }); - - // Update player's offers - idx.modify(matched_offer_itr, 0, [&](auto& offer){ - offer.bet.amount = 0; - offer.gameid = game_itr->id; - }); - - offers.modify(new_offer_itr, 0, [&](auto& offer){ - offer.bet.amount = 0; - offer.gameid = game_itr->id; - }); - - // Update player's accounts - accounts.modify( accounts.find( matched_offer_itr->owner ), 0, [&](auto& acnt) { - acnt.open_offers--; - acnt.open_games++; - }); - - accounts.modify( cur_player_itr, 0, [&](auto& acnt) { - eosio_assert( acnt.eos_balance >= bet, "insufficient balance" ); - acnt.eos_balance -= bet; - acnt.open_games++; - }); - } - } - - //@abi action - void canceloffer( const checksum256& commitment ) { - - auto idx = offers.template get_index(); - auto offer_itr = idx.find( offer::get_commitment(commitment) ); - - eosio_assert( offer_itr != idx.end(), "offer does not exists" ); - eosio_assert( offer_itr->gameid == 0, "unable to cancel offer" ); - require_auth( offer_itr->owner ); - - auto acnt_itr = accounts.find(offer_itr->owner); - accounts.modify(acnt_itr, 0, [&](auto& acnt){ - acnt.open_offers--; - acnt.eos_balance += offer_itr->bet; - }); - - idx.erase(offer_itr); - } - - //@abi action - void reveal( const checksum256& commitment, const checksum256& source ) { - - assert_sha256( (char *)&source, sizeof(source), (const checksum256 *)&commitment ); - - auto idx = offers.template get_index(); - auto curr_revealer_offer = idx.find( offer::get_commitment(commitment) ); - - eosio_assert(curr_revealer_offer != idx.end(), "offer not found"); - eosio_assert(curr_revealer_offer->gameid > 0, "unable to reveal"); - - auto game_itr = games.find( curr_revealer_offer->gameid ); - - player curr_reveal = game_itr->player1; - player prev_reveal = game_itr->player2; - - if( !is_equal(curr_reveal.commitment, commitment) ) { - std::swap(curr_reveal, prev_reveal); - } - - eosio_assert( is_zero(curr_reveal.reveal) == true, "player already revealed"); - - if( !is_zero(prev_reveal.reveal) ) { - - checksum256 result; - sha256( (char *)&game_itr->player1, sizeof(player)*2, &result); - - auto prev_revealer_offer = idx.find( offer::get_commitment(prev_reveal.commitment) ); - - int winner = result.hash[1] < result.hash[0] ? 0 : 1; - - if( winner ) { - pay_and_clean(*game_itr, *curr_revealer_offer, *prev_revealer_offer); - } else { - pay_and_clean(*game_itr, *prev_revealer_offer, *curr_revealer_offer); - } - - } else { - games.modify(game_itr, 0, [&](auto& game){ - - if( is_equal(curr_reveal.commitment, game.player1.commitment) ) - game.player1.reveal = source; - else - game.player2.reveal = source; - - game.deadline = eosio::time_point_sec(now() + FIVE_MINUTES); - }); - } - } - - //@abi action - void claimexpired( const uint64_t gameid ) { - - auto game_itr = games.find(gameid); - - eosio_assert(game_itr != games.end(), "game not found"); - eosio_assert(game_itr->deadline != eosio::time_point_sec(0) && eosio::time_point_sec(now()) > game_itr->deadline, "game not expired"); - - auto idx = offers.template get_index(); - auto player1_offer = idx.find( offer::get_commitment(game_itr->player1.commitment) ); - auto player2_offer = idx.find( offer::get_commitment(game_itr->player2.commitment) ); - - if( !is_zero(game_itr->player1.reveal) ) { - eosio_assert( is_zero(game_itr->player2.reveal), "game error"); - pay_and_clean(*game_itr, *player1_offer, *player2_offer); - } else { - eosio_assert( is_zero(game_itr->player1.reveal), "game error"); - pay_and_clean(*game_itr, *player2_offer, *player1_offer); - } - - } - - //@abi action - void deposit( const account_name from, const asset& quantity ) { - - eosio_assert( quantity.is_valid(), "invalid quantity" ); - eosio_assert( quantity.amount > 0, "must deposit positive quantity" ); - - auto itr = accounts.find(from); - if( itr == accounts.end() ) { - itr = accounts.emplace(_self, [&](auto& acnt){ - acnt.owner = from; - }); - } - - action( - permission_level{ from, N(active) }, - N(eosio.token), N(transfer), - std::make_tuple(from, _self, quantity, std::string("")) - ).send(); - - accounts.modify( itr, 0, [&]( auto& acnt ) { - acnt.eos_balance += quantity; - }); - } - - //@abi action - void withdraw( const account_name to, const asset& quantity ) { - require_auth( to ); - - eosio_assert( quantity.is_valid(), "invalid quantity" ); - eosio_assert( quantity.amount > 0, "must withdraw positive quantity" ); - - auto itr = accounts.find( to ); - eosio_assert(itr != accounts.end(), "unknown account"); - - accounts.modify( itr, 0, [&]( auto& acnt ) { - eosio_assert( acnt.eos_balance >= quantity, "insufficient balance" ); - acnt.eos_balance -= quantity; - }); - - action( - permission_level{ _self, N(active) }, - N(eosio.token), N(transfer), - std::make_tuple(_self, to, quantity, std::string("")) - ).send(); - - if( itr->is_empty() ) { - accounts.erase(itr); - } - } - - private: - //@abi table offer i64 - struct offer { - uint64_t id; - account_name owner; - asset bet; - checksum256 commitment; - uint64_t gameid = 0; - - uint64_t primary_key()const { return id; } - - uint64_t by_bet()const { return (uint64_t)bet.amount; } - - key256 by_commitment()const { return get_commitment(commitment); } - - static key256 get_commitment(const checksum256& commitment) { - const uint64_t *p64 = reinterpret_cast(&commitment); - return key256::make_from_word_sequence(p64[0], p64[1], p64[2], p64[3]); - } - - EOSLIB_SERIALIZE( offer, (id)(owner)(bet)(commitment)(gameid) ) - }; - - typedef eosio::multi_index< N(offer), offer, - indexed_by< N(bet), const_mem_fun >, - indexed_by< N(commitment), const_mem_fun > - > offer_index; - - struct player { - checksum256 commitment; - checksum256 reveal; - - EOSLIB_SERIALIZE( player, (commitment)(reveal) ) - }; - - //@abi table game i64 - struct game { - uint64_t id; - asset bet; - eosio::time_point_sec deadline; - player player1; - player player2; - - uint64_t primary_key()const { return id; } - - EOSLIB_SERIALIZE( game, (id)(bet)(deadline)(player1)(player2) ) - }; - - typedef eosio::multi_index< N(game), game> game_index; - - //@abi table global i64 - struct global_dice { - uint64_t id = 0; - uint64_t nextgameid = 0; - - uint64_t primary_key()const { return id; } - - EOSLIB_SERIALIZE( global_dice, (id)(nextgameid) ) - }; - - typedef eosio::multi_index< N(global), global_dice> global_dice_index; - - //@abi table account i64 - struct account { - account( account_name o = account_name() ):owner(o){} - - account_name owner; - asset eos_balance; - uint32_t open_offers = 0; - uint32_t open_games = 0; - - bool is_empty()const { return !( eos_balance.amount | open_offers | open_games ); } - - uint64_t primary_key()const { return owner; } - - EOSLIB_SERIALIZE( account, (owner)(eos_balance)(open_offers)(open_games) ) - }; - - typedef eosio::multi_index< N(account), account> account_index; - - offer_index offers; - game_index games; - global_dice_index global_dices; - account_index accounts; - - bool has_offer( const checksum256& commitment )const { - auto idx = offers.template get_index(); - auto itr = idx.find( offer::get_commitment(commitment) ); - return itr != idx.end(); - } - - bool is_equal(const checksum256& a, const checksum256& b)const { - return memcmp((void *)&a, (const void *)&b, sizeof(checksum256)) == 0; - } - - bool is_zero(const checksum256& a)const { - const uint64_t *p64 = reinterpret_cast(&a); - return p64[0] == 0 && p64[1] == 0 && p64[2] == 0 && p64[3] == 0; - } - - void pay_and_clean(const game& g, const offer& winner_offer, - const offer& loser_offer) { - - // Update winner account balance and game count - auto winner_account = accounts.find(winner_offer.owner); - accounts.modify( winner_account, 0, [&]( auto& acnt ) { - acnt.eos_balance += 2*g.bet; - acnt.open_games--; - }); - - // Update losser account game count - auto loser_account = accounts.find(loser_offer.owner); - accounts.modify( loser_account, 0, [&]( auto& acnt ) { - acnt.open_games--; - }); - - if( loser_account->is_empty() ) { - accounts.erase(loser_account); - } - - games.erase(g); - offers.erase(winner_offer); - offers.erase(loser_offer); - } -}; - -EOSIO_ABI( dice, (offerbet)(canceloffer)(reveal)(claimexpired)(deposit)(withdraw) ) diff --git a/contracts/eosio.bios/eosio.bios.abi b/contracts/eosio.bios/eosio.bios.abi index c81d774c689..2dd3310fc67 100644 --- a/contracts/eosio.bios/eosio.bios.abi +++ b/contracts/eosio.bios/eosio.bios.abi @@ -53,6 +53,28 @@ {"name":"accounts", "type":"permission_level_weight[]"}, {"name":"waits", "type":"wait_weight[]"} ] + },{ + "name": "blockchain_parameters", + "base": "", + "fields": [ + {"name":"max_block_net_usage", "type":"uint64"}, + {"name":"target_block_net_usage_pct", "type":"uint32"}, + {"name":"max_transaction_net_usage", "type":"uint32"}, + {"name":"base_per_transaction_net_usage", "type":"uint32"}, + {"name":"net_usage_leeway", "type":"uint32"}, + {"name":"context_free_discount_net_usage_num", "type":"uint32"}, + {"name":"context_free_discount_net_usage_den", "type":"uint32"}, + {"name":"max_block_cpu_usage", "type":"uint32"}, + {"name":"target_block_cpu_usage_pct", "type":"uint32"}, + {"name":"max_transaction_cpu_usage", "type":"uint32"}, + {"name":"min_transaction_cpu_usage", "type":"uint32"}, + {"name":"max_transaction_lifetime", "type":"uint32"}, + {"name":"deferred_trx_expiration_window", "type":"uint32"}, + {"name":"max_transaction_delay", "type":"uint32"}, + {"name":"max_inline_action_size", "type":"uint32"}, + {"name":"max_inline_action_depth", "type":"uint16"}, + {"name":"max_authority_depth", "type":"uint16"} + ] },{ "name": "newaccount", "base": "", @@ -160,6 +182,12 @@ "fields": [ {"name":"schedule", "type":"producer_key[]"} ] + },{ + "name": "setparams", + "base": "", + "fields": [ + {"name":"params", "type":"blockchain_parameters"} + ] },{ "name": "require_auth", "base": "", @@ -219,6 +247,10 @@ "name": "setprods", "type": "set_producers", "ricardian_contract": "" + },{ + "name": "setparams", + "type": "setparams", + "ricardian_contract": "" },{ "name": "reqauth", "type": "require_auth", diff --git a/contracts/eosio.bios/eosio.bios.cpp b/contracts/eosio.bios/eosio.bios.cpp index 70279d6e460..66d70f0c47e 100644 --- a/contracts/eosio.bios/eosio.bios.cpp +++ b/contracts/eosio.bios/eosio.bios.cpp @@ -1,3 +1,3 @@ #include -EOSIO_ABI( eosio::bios, (setpriv)(setalimits)(setglimits)(setprods)(reqauth) ) +EOSIO_ABI( eosio::bios, (setpriv)(setalimits)(setglimits)(setprods)(setparams)(reqauth) ) diff --git a/contracts/eosio.bios/eosio.bios.hpp b/contracts/eosio.bios/eosio.bios.hpp index 99807d811c1..0abca64c90e 100644 --- a/contracts/eosio.bios/eosio.bios.hpp +++ b/contracts/eosio.bios/eosio.bios.hpp @@ -34,6 +34,11 @@ namespace eosio { set_proposed_producers(buffer, size); } + void setparams( const eosio::blockchain_parameters& params ) { + require_auth( _self ); + set_blockchain_parameters( params ); + } + void reqauth( action_name from ) { require_auth( from ); } diff --git a/contracts/multi_index_test/multi_index_test.cpp b/contracts/multi_index_test/multi_index_test.cpp index 4b3c5a96702..531984f28fe 100644 --- a/contracts/multi_index_test/multi_index_test.cpp +++ b/contracts/multi_index_test/multi_index_test.cpp @@ -30,7 +30,7 @@ struct limit_order { EOSLIB_SERIALIZE( test_k256, (id)(val) ) }; - class multi_index_test { + class snapshot_test { public: ACTION(N(multitest), trigger) { @@ -170,7 +170,7 @@ namespace multi_index_test { /// The apply method implements the dispatch of events to this contract void apply( uint64_t /* receiver */, uint64_t code, uint64_t action ) { require_auth(code); - eosio_assert(eosio::dispatch(code, action), + eosio_assert(eosio::dispatch(code, action), "Could not dispatch"); } } diff --git a/contracts/dice/CMakeLists.txt b/contracts/snapshot_test/CMakeLists.txt similarity index 84% rename from contracts/dice/CMakeLists.txt rename to contracts/snapshot_test/CMakeLists.txt index 3caf729a2d5..81af479e479 100644 --- a/contracts/dice/CMakeLists.txt +++ b/contracts/snapshot_test/CMakeLists.txt @@ -1,8 +1,8 @@ file(GLOB ABI_FILES "*.abi") configure_file("${ABI_FILES}" "${CMAKE_CURRENT_BINARY_DIR}" COPYONLY) - -add_wast_executable(TARGET dice +add_wast_executable(TARGET snapshot_test INCLUDE_FOLDERS "${STANDARD_INCLUDE_FOLDERS}" LIBRARIES libc++ libc eosiolib DESTINATION_FOLDER ${CMAKE_CURRENT_BINARY_DIR} ) + diff --git a/contracts/snapshot_test/snapshot_test.abi b/contracts/snapshot_test/snapshot_test.abi new file mode 100644 index 00000000000..0bddc7293ce --- /dev/null +++ b/contracts/snapshot_test/snapshot_test.abi @@ -0,0 +1,21 @@ +{ + "version": "eosio::abi/1.0", + "types": [], + "structs": [{ + "name": "increment", + "base": "", + "fields": [ + {"name": "value", "type": "uint32" } + ] + } + ], + "actions": [{ + "name": "increment", + "type": "increment", + "ricaridian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "abi_extensions": [] +} diff --git a/contracts/snapshot_test/snapshot_test.cpp b/contracts/snapshot_test/snapshot_test.cpp new file mode 100644 index 00000000000..0ef6939f07e --- /dev/null +++ b/contracts/snapshot_test/snapshot_test.cpp @@ -0,0 +1,79 @@ +#include +#include + +using namespace eosio; + +namespace snapshot_test { + + struct main_record { + uint64_t id; + double index_f64 = 0.0; + long double index_f128 = 0.0L; + uint64_t index_i64 = 0ULL; + uint128_t index_i128 = 0ULL; + key256 index_i256 = key256(); + + auto primary_key() const { return id; } + + auto get_index_f64 () const { return index_f64 ; } + auto get_index_f128 () const { return index_f128; } + auto get_index_i64 () const { return index_i64 ; } + auto get_index_i128 () const { return index_i128; } + const key256& get_index_i256 () const { return index_i256; } + + EOSLIB_SERIALIZE( main_record, (id)(index_f64)(index_f128)(index_i64)(index_i128)(index_i256) ) + }; + + struct increment { + increment(): value(0) {} + increment(uint32_t v): value(v) {} + + uint32_t value; + + EOSLIB_SERIALIZE(increment, (value)) + }; + + using multi_index_type = eosio::multi_index>, + indexed_by< N(byff), const_mem_fun>, + indexed_by< N(byi ), const_mem_fun>, + indexed_by< N(byii), const_mem_fun>, + indexed_by< N(byiiii), const_mem_fun> + >; + + static void exec( uint64_t self, uint32_t value ) { + multi_index_type data(self, self); + auto current = data.begin( ); + if( current == data.end() ) { + data.emplace( self, [&]( auto& r ) { + r.id = value; + r.index_f64 = value; + r.index_f128 = value; + r.index_i64 = value; + r.index_i128 = value; + r.index_i256.data()[0] = value; + }); + + } else { + data.modify( current, self, [&]( auto& r ) { + r.index_f64 += value; + r.index_f128 += value; + r.index_i64 += value; + r.index_i128 += value; + r.index_i256.data()[0] += value; + }); + } + } + +} /// multi_index_test + +namespace multi_index_test { + extern "C" { + /// The apply method implements the dispatch of events to this contract + void apply( uint64_t self, uint64_t code, uint64_t action ) { + require_auth(code); + eosio_assert(action == N(increment), "unsupported action"); + snapshot_test::exec(self, unpack_action_data().value); + } + } +} diff --git a/eosio_build.sh b/eosio_build.sh index f24e80d4600..b1988d74f0c 100755 --- a/eosio_build.sh +++ b/eosio_build.sh @@ -85,7 +85,7 @@ DOXYGEN=true ;; s) - if [ "${#OPTARG}" -gt 6 ] || [ -z "${#OPTARG}" ]; then + if [ "${#OPTARG}" -gt 7 ] || [ -z "${#OPTARG}" ]; then printf "\\n\\tInvalid argument: %s\\n" "${OPTARG}" 1>&2 usage exit 1 @@ -265,7 +265,7 @@ -DCMAKE_C_COMPILER="${C_COMPILER}" -DWASM_ROOT="${WASM_ROOT}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ - -DCMAKE_INSTALL_PREFIX="/usr/local/eosio" "${SOURCE_DIR}" + -DCMAKE_INSTALL_PREFIX="/usr/local/eosio" ${LOCAL_CMAKE_FLAGS} "${SOURCE_DIR}" then printf "\\n\\t>>>>>>>>>>>>>>>>>>>> CMAKE building EOSIO has exited with the above error.\\n\\n" exit -1 diff --git a/eosio_install.sh b/eosio_install.sh index e442ce91373..9ed195df7d0 100755 --- a/eosio_install.sh +++ b/eosio_install.sh @@ -57,6 +57,13 @@ fi popd &> /dev/null } + create_cmake_symlink() { + mkdir -p /usr/local/lib/cmake/eosio + pushd /usr/local/lib/cmake/eosio &> /dev/null + ln -sf ../../../eosio/lib/cmake/eosio/$1 $1 + popd &> /dev/null + } + install_symlinks() { printf "\\n\\tInstalling EOSIO Binary Symlinks\\n\\n" create_symlink "cleos" @@ -94,6 +101,7 @@ fi popd &> /dev/null install_symlinks + create_cmake_symlink "eosio-config.cmake" printf "\n\n${bldred}\t _______ _______ _______ _________ _______\n" printf '\t( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index cf4c1be184d..a8eeadaa94a 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -35,9 +35,9 @@ add_library( eosio_chain apply_context.cpp abi_serializer.cpp asset.cpp + snapshot.cpp webassembly/wavm.cpp - webassembly/binaryen.cpp webassembly/wabt.cpp # get_config.cpp @@ -51,12 +51,11 @@ add_library( eosio_chain ) target_link_libraries( eosio_chain eos_utilities fc chainbase Logging IR WAST WASM Runtime - wasm asmjs passes cfg ast emscripten-optimizer support softfloat builtins wabt + softfloat builtins wabt ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../wasm-jit/Include" - "${CMAKE_CURRENT_SOURCE_DIR}/../../externals/binaryen/src" "${CMAKE_SOURCE_DIR}/libraries/wabt" "${CMAKE_BINARY_DIR}/libraries/wabt" ) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 02c519f4ae1..b5f67e51059 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -104,7 +104,8 @@ namespace eosio { namespace chain { } void abi_serializer::set_abi(const abi_def& abi, const fc::microseconds& max_serialization_time) { - const fc::time_point deadline = fc::time_point::now() + max_serialization_time; + impl::abi_traverse_context ctx(max_serialization_time); + EOS_ASSERT(starts_with(abi.version, "eosio::abi/1."), unsupported_abi_version_exception, "ABI has an unsupported version"); typedefs.clear(); @@ -118,8 +119,8 @@ namespace eosio { namespace chain { structs[st.name] = st; for( const auto& td : abi.types ) { - EOS_ASSERT(_is_type(td.type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "invalid type ${type}", ("type",td.type)); - EOS_ASSERT(!_is_type(td.new_type_name, 0, deadline, max_serialization_time), duplicate_abi_type_def_exception, "type already exists", ("new_type_name",td.new_type_name)); + EOS_ASSERT(_is_type(td.type, ctx), invalid_type_inside_abi, "invalid type ${type}", ("type",td.type)); + EOS_ASSERT(!_is_type(td.new_type_name, ctx), duplicate_abi_type_def_exception, "type already exists", ("new_type_name",td.new_type_name)); typedefs[td.new_type_name] = td.type; } @@ -146,7 +147,7 @@ namespace eosio { namespace chain { EOS_ASSERT( error_messages.size() == abi.error_messages.size(), duplicate_abi_err_msg_def_exception, "duplicate error message definition detected" ); EOS_ASSERT( variants.size() == abi.variants.value.size(), duplicate_abi_variant_def_exception, "duplicate variant definition detected" ); - validate(deadline, max_serialization_time); + validate(ctx); } bool abi_serializer::is_builtin_type(const type_name& type)const { @@ -180,6 +181,11 @@ namespace eosio { namespace chain { return ends_with(string(type), "?"); } + bool abi_serializer::is_type(const type_name& type, const fc::microseconds& max_serialization_time)const { + impl::abi_traverse_context ctx(max_serialization_time); + return _is_type(type, ctx); + } + type_name abi_serializer::fundamental_type(const type_name& type)const { if( is_array(type) ) { return type_name(string(type).substr(0, type.size()-2)); @@ -197,12 +203,11 @@ namespace eosio { namespace chain { return type; } - bool abi_serializer::_is_type(const type_name& rtype, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - if( ++recursion_depth > max_recursion_depth) return false; + bool abi_serializer::_is_type(const type_name& rtype, impl::abi_traverse_context& ctx )const { + auto h = ctx.enter_scope(); auto type = fundamental_type(rtype); if( built_in_types.find(type) != built_in_types.end() ) return true; - if( typedefs.find(type) != typedefs.end() ) return _is_type(typedefs.find(type)->second, recursion_depth, deadline, max_serialization_time); + if( typedefs.find(type) != typedefs.end() ) return _is_type(typedefs.find(type)->second, ctx); if( structs.find(type) != structs.end() ) return true; if( variants.find(type) != variants.end() ) return true; return false; @@ -214,26 +219,26 @@ namespace eosio { namespace chain { return itr->second; } - void abi_serializer::validate(const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const { + void abi_serializer::validate( impl::abi_traverse_context& ctx )const { for( const auto& t : typedefs ) { try { vector types_seen{t.first, t.second}; auto itr = typedefs.find(t.second); while( itr != typedefs.end() ) { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + ctx.check_deadline(); EOS_ASSERT( find(types_seen.begin(), types_seen.end(), itr->second) == types_seen.end(), abi_circular_def_exception, "Circular reference in type ${type}", ("type",t.first) ); types_seen.emplace_back(itr->second); itr = typedefs.find(itr->second); } } FC_CAPTURE_AND_RETHROW( (t) ) } for( const auto& t : typedefs ) { try { - EOS_ASSERT(_is_type(t.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",t.second) ); + EOS_ASSERT(_is_type(t.second, ctx), invalid_type_inside_abi, "${type}", ("type",t.second) ); } FC_CAPTURE_AND_RETHROW( (t) ) } for( const auto& s : structs ) { try { if( s.second.base != type_name() ) { struct_def current = s.second; vector types_seen{current.name}; while( current.base != type_name() ) { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + ctx.check_deadline(); const auto& base = get_struct(current.base); //<-- force struct to inherit from another struct EOS_ASSERT( find(types_seen.begin(), types_seen.end(), base.name) == types_seen.end(), abi_circular_def_exception, "Circular reference in struct ${type}", ("type",s.second.name) ); types_seen.emplace_back(base.name); @@ -241,24 +246,24 @@ namespace eosio { namespace chain { } } for( const auto& field : s.second.fields ) { try { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(_remove_bin_extension(field.type), 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",field.type) ); + ctx.check_deadline(); + EOS_ASSERT(_is_type(_remove_bin_extension(field.type), ctx), invalid_type_inside_abi, "${type}", ("type",field.type) ); } FC_CAPTURE_AND_RETHROW( (field) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& s : variants ) { try { for( const auto& type : s.second.types ) { try { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",type) ); + ctx.check_deadline(); + EOS_ASSERT(_is_type(type, ctx), invalid_type_inside_abi, "${type}", ("type",type) ); } FC_CAPTURE_AND_RETHROW( (type) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& a : actions ) { try { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(a.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",a.second) ); + ctx.check_deadline(); + EOS_ASSERT(_is_type(a.second, ctx), invalid_type_inside_abi, "${type}", ("type",a.second) ); } FC_CAPTURE_AND_RETHROW( (a) ) } for( const auto& t : tables ) { try { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(t.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",t.second) ); + ctx.check_deadline(); + EOS_ASSERT(_is_type(t.second, ctx), invalid_type_inside_abi, "${type}", ("type",t.second) ); } FC_CAPTURE_AND_RETHROW( (t) ) } } @@ -275,156 +280,247 @@ namespace eosio { namespace chain { } void abi_serializer::_binary_to_variant( const type_name& type, fc::datastream& stream, - fc::mutable_variant_object& obj, size_t recursion_depth, - const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + fc::mutable_variant_object& obj, impl::binary_to_variant_context& ctx )const { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - const auto& st = get_struct(type); + auto h = ctx.enter_scope(); + auto s_itr = structs.find(type); + EOS_ASSERT( s_itr != structs.end(), invalid_type_inside_abi, "Unknown type ${type}", ("type",ctx.maybe_shorten(type)) ); + ctx.hint_struct_type_if_in_array( s_itr ); + const auto& st = s_itr->second; if( st.base != type_name() ) { - _binary_to_variant(resolve_type(st.base), stream, obj, recursion_depth, deadline, max_serialization_time); + _binary_to_variant(resolve_type(st.base), stream, obj, ctx); } - for( const auto& field : st.fields ) { - if( !stream.remaining() && ends_with(field.type, "$") ) - continue; - obj( field.name, _binary_to_variant(resolve_type(_remove_bin_extension(field.type)), stream, recursion_depth, deadline, max_serialization_time) ); + bool encountered_extension = false; + for( uint32_t i = 0; i < st.fields.size(); ++i ) { + const auto& field = st.fields[i]; + bool extension = ends_with(field.type, "$"); + encountered_extension |= extension; + if( !stream.remaining() ) { + if( extension ) { + continue; + } + if( encountered_extension ) { + EOS_THROW( abi_exception, "Encountered field '${f}' without binary extension designation while processing struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); + } + EOS_THROW( unpack_exception, "Stream unexpectedly ended; unable to unpack field '${f}' of struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); + + } + auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); + obj( field.name, _binary_to_variant(resolve_type( extension ? _remove_bin_extension(field.type) : field.type ), stream, ctx) ); } } fc::variant abi_serializer::_binary_to_variant( const type_name& type, fc::datastream& stream, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + impl::binary_to_variant_context& ctx )const { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); type_name rtype = resolve_type(type); auto ftype = fundamental_type(rtype); auto btype = built_in_types.find(ftype ); if( btype != built_in_types.end() ) { - return btype->second.first(stream, is_array(rtype), is_optional(rtype)); + try { + return btype->second.first(stream, is_array(rtype), is_optional(rtype)); + } EOS_RETHROW_EXCEPTIONS( unpack_exception, "Unable to unpack ${class} type '${type}' while processing '${p}'", + ("class", is_array(rtype) ? "array of built-in" : is_optional(rtype) ? "optional of built-in" : "built-in") + ("type", ftype)("p", ctx.get_path_string()) ) } if ( is_array(rtype) ) { - fc::unsigned_int size; - fc::raw::unpack(stream, size); - vector vars; - for( decltype(size.value) i = 0; i < size; ++i ) { - auto v = _binary_to_variant(ftype, stream, recursion_depth, deadline, max_serialization_time); - EOS_ASSERT( !v.is_null(), unpack_exception, "Invalid packed array" ); - vars.emplace_back(std::move(v)); - } - EOS_ASSERT( vars.size() == size.value, - unpack_exception, - "packed size does not match unpacked array size, packed size ${p} actual size ${a}", - ("p", size)("a", vars.size()) ); - return fc::variant( std::move(vars) ); + ctx.hint_array_type_if_in_array(); + fc::unsigned_int size; + try { + fc::raw::unpack(stream, size); + } EOS_RETHROW_EXCEPTIONS( unpack_exception, "Unable to unpack size of array '${p}'", ("p", ctx.get_path_string()) ) + vector vars; + auto h1 = ctx.push_to_path( impl::array_index_path_item{} ); + for( decltype(size.value) i = 0; i < size; ++i ) { + ctx.set_array_index_of_path_back(i); + auto v = _binary_to_variant(ftype, stream, ctx); + // QUESTION: Is it actually desired behavior to require the returned variant to not be null? + // This would disallow arrays of optionals in general (though if all optionals in the array were present it would be allowed). + // Is there any scenario in which the returned variant would be null other than in the case of an empty optional? + EOS_ASSERT( !v.is_null(), unpack_exception, "Invalid packed array '${p}'", ("p", ctx.get_path_string()) ); + vars.emplace_back(std::move(v)); + } + // QUESTION: Why would the assert below ever fail? + EOS_ASSERT( vars.size() == size.value, + unpack_exception, + "packed size does not match unpacked array size, packed size ${p} actual size ${a}", + ("p", size)("a", vars.size()) ); + return fc::variant( std::move(vars) ); } else if ( is_optional(rtype) ) { - char flag; - fc::raw::unpack(stream, flag); - return flag ? _binary_to_variant(ftype, stream, recursion_depth, deadline, max_serialization_time) : fc::variant(); + char flag; + try { + fc::raw::unpack(stream, flag); + } EOS_RETHROW_EXCEPTIONS( unpack_exception, "Unable to unpack presence flag of optional '${p}'", ("p", ctx.get_path_string()) ) + return flag ? _binary_to_variant(ftype, stream, ctx) : fc::variant(); } else { - auto v = variants.find(rtype); - if( v != variants.end() ) { + auto v_itr = variants.find(rtype); + if( v_itr != variants.end() ) { + ctx.hint_variant_type_if_in_array( v_itr ); fc::unsigned_int select; - fc::raw::unpack(stream, select); - EOS_ASSERT( (size_t)select < v->second.types.size(), unpack_exception, "Invalid packed variant" ); - return vector{v->second.types[select], _binary_to_variant(v->second.types[select], stream, recursion_depth, deadline, max_serialization_time)}; + try { + fc::raw::unpack(stream, select); + } EOS_RETHROW_EXCEPTIONS( unpack_exception, "Unable to unpack tag of variant '${p}'", ("p", ctx.get_path_string()) ) + EOS_ASSERT( (size_t)select < v_itr->second.types.size(), unpack_exception, + "Unpacked invalid tag (${select}) for variant '${p}'", ("select", select.value)("p",ctx.get_path_string()) ); + auto h1 = ctx.push_to_path( impl::variant_path_item{ .variant_itr = v_itr, .variant_ordinal = static_cast(select) } ); + return vector{v_itr->second.types[select], _binary_to_variant(v_itr->second.types[select], stream, ctx)}; } } fc::mutable_variant_object mvo; - _binary_to_variant(rtype, stream, mvo, recursion_depth, deadline, max_serialization_time); - EOS_ASSERT( mvo.size() > 0, unpack_exception, "Unable to unpack stream ${type}", ("type", type) ); + _binary_to_variant(rtype, stream, mvo, ctx); + // QUESTION: Is this assert actually desired? It disallows unpacking empty structs from datastream. + EOS_ASSERT( mvo.size() > 0, unpack_exception, "Unable to unpack '${p}' from stream", ("p", ctx.get_path_string()) ); return fc::variant( std::move(mvo) ); } - fc::variant abi_serializer::_binary_to_variant( const type_name& type, const bytes& binary, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + fc::variant abi_serializer::_binary_to_variant( const type_name& type, const bytes& binary, impl::binary_to_variant_context& ctx )const { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); fc::datastream ds( binary.data(), binary.size() ); - return _binary_to_variant(type, ds, recursion_depth, deadline, max_serialization_time); + return _binary_to_variant(type, ds, ctx); } - void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, bool allow_extensions, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + fc::variant abi_serializer::binary_to_variant( const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time, bool short_path )const { + impl::binary_to_variant_context ctx(*this, max_serialization_time, type); + ctx.short_path = short_path; + return _binary_to_variant(type, binary, ctx); + } + + fc::variant abi_serializer::binary_to_variant( const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time, bool short_path )const { + impl::binary_to_variant_context ctx(*this, max_serialization_time, type); + ctx.short_path = short_path; + return _binary_to_variant(type, binary, ctx); + } + + void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, impl::variant_to_binary_context& ctx )const { try { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); auto rtype = resolve_type(type); + auto v_itr = variants.end(); + auto s_itr = structs.end(); + auto btype = built_in_types.find(fundamental_type(rtype)); if( btype != built_in_types.end() ) { btype->second.second(var, ds, is_array(rtype), is_optional(rtype)); } else if ( is_array(rtype) ) { + ctx.hint_array_type_if_in_array(); vector vars = var.get_array(); fc::raw::pack(ds, (fc::unsigned_int)vars.size()); + + auto h1 = ctx.push_to_path( impl::array_index_path_item{} ); + auto h2 = ctx.disallow_extensions_unless(false); + + int64_t i = 0; for (const auto& var : vars) { - _variant_to_binary(fundamental_type(rtype), var, ds, false, recursion_depth, deadline, max_serialization_time); + ctx.set_array_index_of_path_back(i); + _variant_to_binary(fundamental_type(rtype), var, ds, ctx); + ++i; } - } else if ( variants.find(rtype) != variants.end() ) { - EOS_ASSERT( var.is_array() && var.size() == 2 && var[size_t(0)].is_string(), abi_exception, "expected array containing variant" ); - auto& v = variants.find(rtype)->second; - auto it = find(v.types.begin(), v.types.end(), var[size_t(0)].get_string()); - EOS_ASSERT( it != v.types.end(), abi_exception, "type is not valid within this variant" ); + } else if( (v_itr = variants.find(rtype)) != variants.end() ) { + ctx.hint_variant_type_if_in_array( v_itr ); + auto& v = v_itr->second; + EOS_ASSERT( var.is_array() && var.size() == 2, pack_exception, + "Expected input to be an array of two items while processing variant '${p}'", ("p", ctx.get_path_string()) ); + EOS_ASSERT( var[size_t(0)].is_string(), pack_exception, + "Encountered non-string as first item of input array while processing variant '${p}'", ("p", ctx.get_path_string()) ); + auto variant_type_str = var[size_t(0)].get_string(); + auto it = find(v.types.begin(), v.types.end(), variant_type_str); + EOS_ASSERT( it != v.types.end(), pack_exception, + "Specified type '${t}' in input array is not valid within the variant '${p}'", + ("t", ctx.maybe_shorten(variant_type_str))("p", ctx.get_path_string()) ); fc::raw::pack(ds, fc::unsigned_int(it - v.types.begin())); - _variant_to_binary( *it, var[size_t(1)], ds, allow_extensions, recursion_depth, deadline, max_serialization_time ); - } else { - const auto& st = get_struct(rtype); + auto h1 = ctx.push_to_path( impl::variant_path_item{ .variant_itr = v_itr, .variant_ordinal = static_cast(it - v.types.begin()) } ); + _variant_to_binary( *it, var[size_t(1)], ds, ctx ); + } else if( (s_itr = structs.find(rtype)) != structs.end() ) { + ctx.hint_struct_type_if_in_array( s_itr ); + const auto& st = s_itr->second; if( var.is_object() ) { const auto& vo = var.get_object(); if( st.base != type_name() ) { - _variant_to_binary(resolve_type(st.base), var, ds, false, recursion_depth, deadline, max_serialization_time); + auto h2 = ctx.disallow_extensions_unless(false); + _variant_to_binary(resolve_type(st.base), var, ds, ctx); } - bool missing_extension = false; - for( const auto& field : st.fields ) { + bool disallow_additional_fields = false; + for( uint32_t i = 0; i < st.fields.size(); ++i ) { + const auto& field = st.fields[i]; if( vo.contains( string(field.name).c_str() ) ) { - if( missing_extension ) - EOS_THROW( pack_exception, "Unexpected '${f}' in variant object", ("f",field.name) ); - _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); - } else if( ends_with(field.type, "$") && allow_extensions ) { - missing_extension = true; + if( disallow_additional_fields ) + EOS_THROW( pack_exception, "Unexpected field '${f}' found in input object while processing struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); + { + auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); + auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); + _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, ctx); + } + } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { + disallow_additional_fields = true; + } else if( disallow_additional_fields ) { + EOS_THROW( abi_exception, "Encountered field '${f}' without binary extension designation while processing struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); } else { - EOS_THROW( pack_exception, "Missing '${f}' in variant object", ("f",field.name) ); + EOS_THROW( pack_exception, "Missing field '${f}' in input object while processing struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); } } } else if( var.is_array() ) { const auto& va = var.get_array(); - EOS_ASSERT( st.base == type_name(), invalid_type_inside_abi, "support for base class as array not yet implemented" ); - uint32_t i = 0; - for( const auto& field : st.fields ) { - if( va.size() > i ) - _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); - else if( ends_with(field.type, "$") && allow_extensions ) + EOS_ASSERT( st.base == type_name(), invalid_type_inside_abi, + "Using input array to specify the fields of the derived struct '${p}'; input arrays are currently only allowed for structs without a base", + ("p",ctx.get_path_string()) ); + for( uint32_t i = 0; i < st.fields.size(); ++i ) { + const auto& field = st.fields[i]; + if( va.size() > i ) { + auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); + auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); + _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, ctx); + } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { break; - else - EOS_THROW( pack_exception, "Early end to array specifying the fields of struct '${t}'; require input for field '${f}'", - ("t", st.name)("f", field.name) ); - ++i; + } else { + EOS_THROW( pack_exception, "Early end to input array specifying the fields of struct '${p}'; require input for field '${f}'", + ("p", ctx.get_path_string())("f", ctx.maybe_shorten(field.name)) ); + } } } else { - EOS_THROW( pack_exception, "Failed to serialize struct '${t}' in variant object", ("t", st.name)); + EOS_THROW( pack_exception, "Unexpected input encountered while processing struct '${p}'", ("p",ctx.get_path_string()) ); } + } else { + EOS_THROW( invalid_type_inside_abi, "Unknown type ${type}", ("type",ctx.maybe_shorten(type)) ); } } FC_CAPTURE_AND_RETHROW( (type)(var) ) } - bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, bool allow_extensions, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, impl::variant_to_binary_context& ctx )const { try { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - if( !_is_type(type, recursion_depth, deadline, max_serialization_time) ) { + auto h = ctx.enter_scope(); + if( !_is_type(type, ctx) ) { return var.as(); } bytes temp( 1024*1024 ); fc::datastream ds(temp.data(), temp.size() ); - _variant_to_binary(type, var, ds, allow_extensions, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(type, var, ds, ctx); temp.resize(ds.tellp()); return temp; } FC_CAPTURE_AND_RETHROW( (type)(var) ) } + bytes abi_serializer::variant_to_binary( const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path )const { + impl::variant_to_binary_context ctx(*this, max_serialization_time, type); + ctx.short_path = short_path; + return _variant_to_binary(type, var, ctx); + } + + void abi_serializer::variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path )const { + impl::variant_to_binary_context ctx(*this, max_serialization_time, type); + ctx.short_path = short_path; + _variant_to_binary(type, var, ds, ctx); + } + type_name abi_serializer::get_action_type(name action)const { auto itr = actions.find(action); if( itr != actions.end() ) return itr->second; @@ -444,4 +540,276 @@ namespace eosio { namespace chain { return itr->second; } + namespace impl { + + void abi_traverse_context::check_deadline()const { + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, + "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + } + + fc::scoped_exit> abi_traverse_context::enter_scope() { + std::function callback = [old_recursion_depth=recursion_depth, this](){ + recursion_depth = old_recursion_depth; + }; + + ++recursion_depth; + EOS_ASSERT( recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, + "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); + + check_deadline(); + + return {std::move(callback)}; + } + + void abi_traverse_context_with_path::set_path_root( const type_name& type ) { + auto rtype = abis.resolve_type(type); + + if( abis.is_array(rtype) ) { + root_of_path = array_type_path_root{}; + } else { + auto itr1 = abis.structs.find(rtype); + if( itr1 != abis.structs.end() ) { + root_of_path = struct_type_path_root{ .struct_itr = itr1 }; + } else { + auto itr2 = abis.variants.find(rtype); + if( itr2 != abis.variants.end() ) { + root_of_path = variant_type_path_root{ .variant_itr = itr2 }; + } + } + } + } + + fc::scoped_exit> abi_traverse_context_with_path::push_to_path( const path_item& item ) { + std::function callback = [this](){ + EOS_ASSERT( path.size() > 0, abi_exception, + "invariant failure in variant_to_binary_context: path is empty on scope exit" ); + path.pop_back(); + }; + + path.push_back( item ); + + return {std::move(callback)}; + } + + void abi_traverse_context_with_path::set_array_index_of_path_back( uint32_t i ) { + EOS_ASSERT( path.size() > 0, abi_exception, "path is empty" ); + + auto& b = path.back(); + + EOS_ASSERT( b.contains(), abi_exception, "trying to set array index without first pushing new array index item" ); + + b.get().array_index = i; + } + + void abi_traverse_context_with_path::hint_array_type_if_in_array() { + if( path.size() == 0 || !path.back().contains() ) + return; + + path.back().get().type_hint = array_type_path_root{}; + } + + void abi_traverse_context_with_path::hint_struct_type_if_in_array( const map::const_iterator& itr ) { + if( path.size() == 0 || !path.back().contains() ) + return; + + path.back().get().type_hint = struct_type_path_root{ .struct_itr = itr }; + } + + void abi_traverse_context_with_path::hint_variant_type_if_in_array( const map::const_iterator& itr ) { + if( path.size() == 0 || !path.back().contains() ) + return; + + path.back().get().type_hint = variant_type_path_root{ .variant_itr = itr }; + } + + constexpr size_t const_strlen( const char* str ) + { + return (*str == 0) ? 0 : const_strlen(str + 1) + 1; + } + + void output_name( std::ostream& s, const string& str, bool shorten, size_t max_length = 64 ) { + constexpr size_t min_num_characters_at_ends = 4; + constexpr size_t preferred_num_tail_end_characters = 6; + constexpr const char* fill_in = "..."; + + static_assert( min_num_characters_at_ends <= preferred_num_tail_end_characters, + "preferred number of tail end characters cannot be less than the imposed absolute minimum" ); + + constexpr size_t fill_in_length = const_strlen( fill_in ); + constexpr size_t min_length = fill_in_length + 2*min_num_characters_at_ends; + constexpr size_t preferred_min_length = fill_in_length + 2*preferred_num_tail_end_characters; + + max_length = std::max( max_length, min_length ); + + if( !shorten || str.size() <= max_length ) { + s << str; + return; + } + + size_t actual_num_tail_end_characters = preferred_num_tail_end_characters; + if( max_length < preferred_min_length ) { + actual_num_tail_end_characters = min_num_characters_at_ends + (max_length - min_length)/2; + } + + s.write( str.data(), max_length - fill_in_length - actual_num_tail_end_characters ); + s.write( fill_in, fill_in_length ); + s.write( str.data() + (str.size() - actual_num_tail_end_characters), actual_num_tail_end_characters ); + } + + struct generate_path_string_visitor { + using result_type = void; + + generate_path_string_visitor( bool shorten_names, bool track_only ) + : shorten_names(shorten_names), track_only( track_only ) + {} + + std::stringstream s; + bool shorten_names = false; + bool track_only = false; + path_item last_path_item; + + void add_dot() { + s << "."; + } + + void operator()( const empty_path_item& item ) { + } + + void operator()( const array_index_path_item& item ) { + if( track_only ) { + last_path_item = item; + return; + } + + s << "[" << item.array_index << "]"; + } + + void operator()( const field_path_item& item ) { + if( track_only ) { + last_path_item = item; + return; + } + + const auto& str = item.parent_struct_itr->second.fields.at(item.field_ordinal).name; + output_name( s, str, shorten_names ); + } + + void operator()( const variant_path_item& item ) { + if( track_only ) { + last_path_item = item; + return; + } + + s << ""; + } + + void operator()( const empty_path_root& item ) { + } + + void operator()( const array_type_path_root& item ) { + s << "ARRAY"; + } + + void operator()( const struct_type_path_root& item ) { + const auto& str = item.struct_itr->first; + output_name( s, str, shorten_names ); + } + + void operator()( const variant_type_path_root& item ) { + const auto& str = item.variant_itr->first; + output_name( s, str, shorten_names ); + } + }; + + struct path_item_type_visitor { + using result_type = void; + + path_item_type_visitor( std::stringstream& s, bool shorten_names ) + : s(s), shorten_names(shorten_names) + {} + + std::stringstream& s; + bool shorten_names = false; + + void operator()( const empty_path_item& item ) { + } + + void operator()( const array_index_path_item& item ) { + const auto& th = item.type_hint; + if( th.contains() ) { + const auto& str = th.get().struct_itr->first; + output_name( s, str, shorten_names ); + } else if( th.contains() ) { + const auto& str = th.get().variant_itr->first; + output_name( s, str, shorten_names ); + } else if( th.contains() ) { + s << "ARRAY"; + } else { + s << "UNKNOWN"; + } + } + + void operator()( const field_path_item& item ) { + const auto& str = item.parent_struct_itr->second.fields.at(item.field_ordinal).type; + output_name( s, str, shorten_names ); + } + + void operator()( const variant_path_item& item ) { + const auto& str = item.variant_itr->second.types.at(item.variant_ordinal); + output_name( s, str, shorten_names ); + } + }; + + string abi_traverse_context_with_path::get_path_string()const { + bool full_path = !short_path; + bool shorten_names = short_path; + + generate_path_string_visitor visitor(shorten_names, !full_path); + if( full_path ) + root_of_path.visit( visitor ); + for( size_t i = 0, n = path.size(); i < n; ++i ) { + if( full_path && !path[i].contains() ) + visitor.add_dot(); + + path[i].visit( visitor ); + + } + + if( !full_path ) { + if( visitor.last_path_item.contains() ) { + root_of_path.visit( visitor ); + } else { + path_item_type_visitor vis2(visitor.s, shorten_names); + visitor.last_path_item.visit(vis2); + } + } + + return visitor.s.str(); + } + + string abi_traverse_context_with_path::maybe_shorten( const string& str ) { + if( !short_path ) + return str; + + std::stringstream s; + output_name( s, str, true ); + return s.str(); + } + + fc::scoped_exit> variant_to_binary_context::disallow_extensions_unless( bool condition ) { + std::function callback = [old_allow_extensions=allow_extensions, this](){ + allow_extensions = old_allow_extensions; + }; + + if( !condition ) { + allow_extensions = false; + } + + return {std::move(callback)}; + } + } + } } diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index f7513debf62..de1450013d8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -29,81 +29,95 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { } } -action_trace apply_context::exec_one() +void apply_context::exec_one( action_trace& trace ) { auto start = fc::time_point::now(); + action_receipt r; + r.receiver = receiver; + r.act_digest = digest_type::hash(act); + + trace.trx_id = trx_context.id; + trace.block_num = control.pending_block_state()->block_num; + trace.block_time = control.pending_block_time(); + trace.producer_block_id = control.pending_producer_block_id(); + trace.act = act; + trace.context_free = context_free; + const auto& cfg = control.get_global_properties().configuration; try { - const auto& a = control.get_account( receiver ); - privileged = a.privileged; - auto native = control.find_apply_handler( receiver, act.account, act.name ); - if( native ) { - if( trx_context.can_subjectively_fail && control.is_producing_block()) { - control.check_contract_list( receiver ); - control.check_action_list( act.account, act.name ); + try { + const auto& a = control.get_account( receiver ); + privileged = a.privileged; + auto native = control.find_apply_handler( receiver, act.account, act.name ); + if( native ) { + if( trx_context.can_subjectively_fail && control.is_producing_block() ) { + control.check_contract_list( receiver ); + control.check_action_list( act.account, act.name ); + } + (*native)( *this ); } - (*native)( *this ); - } - if( a.code.size() > 0 - && !(act.account == config::system_account_name && act.name == N( setcode ) && - receiver == config::system_account_name)) { - if( trx_context.can_subjectively_fail && control.is_producing_block()) { - control.check_contract_list( receiver ); - control.check_action_list( act.account, act.name ); + if( a.code.size() > 0 + && !(act.account == config::system_account_name && act.name == N( setcode ) && + receiver == config::system_account_name) ) { + if( trx_context.can_subjectively_fail && control.is_producing_block() ) { + control.check_contract_list( receiver ); + control.check_action_list( act.account, act.name ); + } + try { + control.get_wasm_interface().apply( a.code_version, a.code, *this ); + } catch( const wasm_exit& ) {} } - try { - control.get_wasm_interface().apply( a.code_version, a.code, *this ); - } catch( const wasm_exit& ) {} - } - - } FC_RETHROW_EXCEPTIONS(warn, "pending console output: ${console}", ("console", _pending_console_output.str())) + } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output.str()) ) + } catch( fc::exception& e ) { + trace.receipt = r; // fill with known data + trace.except = e; + finalize_trace( trace, start ); + throw; + } - action_receipt r; - r.receiver = receiver; - r.act_digest = digest_type::hash(act); r.global_sequence = next_global_sequence(); r.recv_sequence = next_recv_sequence( receiver ); const auto& account_sequence = db.get(act.account); - r.code_sequence = account_sequence.code_sequence; - r.abi_sequence = account_sequence.abi_sequence; + r.code_sequence = account_sequence.code_sequence; // could be modified by action execution above + r.abi_sequence = account_sequence.abi_sequence; // could be modified by action execution above for( const auto& auth : act.authorization ) { r.auth_sequence[auth.actor] = next_auth_sequence( auth.actor ); } - action_trace t(r); - t.trx_id = trx_context.id; - t.block_num = control.pending_block_state()->block_num; - t.block_time = control.pending_block_time(); - t.producer_block_id = control.pending_producer_block_id(); - t.account_ram_deltas = std::move( _account_ram_deltas ); - _account_ram_deltas.clear(); - t.act = act; - t.context_free = context_free; - t.console = _pending_console_output.str(); + trace.receipt = r; trx_context.executed.emplace_back( move(r) ); + finalize_trace( trace, start ); + if ( control.contracts_console() ) { - print_debug(receiver, t); + print_debug(receiver, trace); } +} + +void apply_context::finalize_trace( action_trace& trace, const fc::time_point& start ) +{ + trace.account_ram_deltas = std::move( _account_ram_deltas ); + _account_ram_deltas.clear(); + trace.console = _pending_console_output.str(); reset_console(); - t.elapsed = fc::time_point::now() - start; - return t; + trace.elapsed = fc::time_point::now() - start; } -void apply_context::exec() +void apply_context::exec( action_trace& trace ) { _notified.push_back(receiver); - trace = exec_one(); + exec_one( trace ); for( uint32_t i = 1; i < _notified.size(); ++i ) { receiver = _notified[i]; - trace.inline_traces.emplace_back( exec_one() ); + trace.inline_traces.emplace_back( ); + exec_one( trace.inline_traces.back() ); } if( _cfa_inline_actions.size() > 0 || _inline_actions.size() > 0 ) { @@ -272,8 +286,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a } uint32_t trx_size = 0; - auto& d = control.db(); - if ( auto ptr = d.find(boost::make_tuple(receiver, sender_id)) ) { + if ( auto ptr = db.find(boost::make_tuple(receiver, sender_id)) ) { EOS_ASSERT( replace_existing, deferred_tx_duplicate, "deferred transaction with the same sender_id and payer already exists" ); // TODO: Remove the following subjective check when the deferred trx replacement RAM bug has been fixed with a hard fork. @@ -283,7 +296,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a // TODO: The logic of the next line needs to be incorporated into the next hard fork. // add_ram_usage( ptr->payer, -(config::billable_size_v + ptr->packed_trx.size()) ); - d.modify( *ptr, [&]( auto& gtx ) { + db.modify( *ptr, [&]( auto& gtx ) { gtx.sender = receiver; gtx.sender_id = sender_id; gtx.payer = payer; @@ -294,7 +307,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a trx_size = gtx.set( trx ); }); } else { - d.create( [&]( auto& gtx ) { + db.create( [&]( auto& gtx ) { gtx.trx_id = trx.id(); gtx.sender = receiver; gtx.sender_id = sender_id; diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 02207578d98..e03783c8f9d 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -13,22 +13,134 @@ #include #include #include +#include + namespace eosio { namespace chain { + using authorization_index_set = index_set< + permission_index, + permission_usage_index, + permission_link_index + >; + authorization_manager::authorization_manager(controller& c, database& d) :_control(c),_db(d){} void authorization_manager::add_indices() { - _db.add_index(); - _db.add_index(); - _db.add_index(); + authorization_index_set::add_indices(_db); } void authorization_manager::initialize_database() { _db.create([](auto&){}); /// reserve perm 0 (used else where) } + void authorization_manager::calculate_integrity_hash( fc::sha256::encoder& enc ) const { + authorization_index_set::walk_indices([this, &enc]( auto utils ){ + decltype(utils)::walk(_db, [&enc]( const auto &row ) { + fc::raw::pack(enc, row); + }); + }); + } + + namespace detail { + template<> + struct snapshot_row_traits { + using value_type = permission_object; + using snapshot_type = snapshot_permission_object; + + static snapshot_permission_object to_snapshot_row(const permission_object& value, const chainbase::database& db) { + snapshot_permission_object res; + res.name = value.name; + res.owner = value.owner; + res.last_updated = value.last_updated; + res.auth = value.auth.to_authority(); + + // lookup parent name + const auto& parent = db.get(value.parent); + res.parent = parent.name; + + // lookup the usage object + const auto& usage = db.get(value.usage_id); + res.last_used = usage.last_used; + + return res; + }; + + static void from_snapshot_row(snapshot_permission_object&& row, permission_object& value, chainbase::database& db) { + value.name = row.name; + value.owner = row.owner; + value.last_updated = row.last_updated; + value.auth = row.auth; + + value.parent = 0; + if (value.id == 0) { + EOS_ASSERT(row.parent == permission_name(), snapshot_exception, "Unexpected parent name on reserved permission 0"); + EOS_ASSERT(row.name == permission_name(), snapshot_exception, "Unexpected permission name on reserved permission 0"); + EOS_ASSERT(row.owner == name(), snapshot_exception, "Unexpected owner name on reserved permission 0"); + EOS_ASSERT(row.auth.accounts.size() == 0, snapshot_exception, "Unexpected auth accounts on reserved permission 0"); + EOS_ASSERT(row.auth.keys.size() == 0, snapshot_exception, "Unexpected auth keys on reserved permission 0"); + EOS_ASSERT(row.auth.waits.size() == 0, snapshot_exception, "Unexpected auth waits on reserved permission 0"); + EOS_ASSERT(row.auth.threshold == 0, snapshot_exception, "Unexpected auth threshold on reserved permission 0"); + EOS_ASSERT(row.last_updated == time_point(), snapshot_exception, "Unexpected auth last updated on reserved permission 0"); + value.parent = 0; + } else if ( row.parent != permission_name()){ + const auto& parent = db.get(boost::make_tuple(row.owner, row.parent)); + + EOS_ASSERT(parent.id != 0, snapshot_exception, "Unexpected mapping to reserved permission 0"); + value.parent = parent.id; + } + + if (value.id != 0) { + // create the usage object + const auto& usage = db.create([&](auto& p) { + p.last_used = row.last_used; + }); + value.usage_id = usage.id; + } else { + value.usage_id = 0; + } + } + }; + } + + void authorization_manager::add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { + authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ + using section_t = typename decltype(utils)::index_t::value_type; + + // skip the permission_usage_index as its inlined with permission_index + if (std::is_same::value) { + return; + } + + snapshot->write_section([this]( auto& section ){ + decltype(utils)::walk(_db, [this, §ion]( const auto &row ) { + section.add_row(row, _db); + }); + }); + }); + } + + void authorization_manager::read_from_snapshot( const snapshot_reader_ptr& snapshot ) { + authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ + using section_t = typename decltype(utils)::index_t::value_type; + + // skip the permission_usage_index as its inlined with permission_index + if (std::is_same::value) { + return; + } + + snapshot->read_section([this]( auto& section ) { + bool more = !section.empty(); + while(more) { + decltype(utils)::create(_db, [this, §ion, &more]( auto &row ) { + more = section.read_row(row, _db); + }); + } + }); + }); + } + const permission_object& authorization_manager::create_permission( account_name account, permission_name name, permission_id_type parent, diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index fc96d309354..dc769cc9612 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -12,7 +12,15 @@ namespace eosio { namespace chain { - const uint32_t block_log::supported_version = 1; + const uint32_t block_log::min_supported_version = 1; + + /** + * History: + * Version 1: complete block log from genesis + * Version 2: adds optional partial block log, cannot be used for replay without snapshot + * this is in the form of an first_block_num that is written immediately after the version + */ + const uint32_t block_log::max_supported_version = 2; namespace detail { class block_log_impl { @@ -26,6 +34,8 @@ namespace eosio { namespace chain { bool block_write; bool index_write; bool genesis_written_to_block_log = false; + uint32_t version = 0; + uint32_t first_block_num = 0; inline void check_block_read() { if (block_write) { @@ -124,14 +134,23 @@ namespace eosio { namespace chain { ilog("Log is nonempty"); my->check_block_read(); my->block_stream.seekg( 0 ); - uint32_t version = 0; - my->block_stream.read( (char*)&version, sizeof(version) ); - EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly with genesis information." ); - EOS_ASSERT( version == block_log::supported_version, block_log_unsupported_version, - "Unsupported version of block log. Block log version is ${version} while code supports version ${supported}", - ("version", version)("supported", block_log::supported_version) ); + my->version = 0; + my->block_stream.read( (char*)&my->version, sizeof(my->version) ); + EOS_ASSERT( my->version > 0, block_log_exception, "Block log was not setup properly" ); + EOS_ASSERT( my->version >= min_supported_version && my->version <= max_supported_version, block_log_unsupported_version, + "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", + ("version", my->version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); + my->genesis_written_to_block_log = true; // Assume it was constructed properly. + if (my->version > 1){ + my->first_block_num = 0; + my->block_stream.read( (char*)&my->first_block_num, sizeof(my->first_block_num) ); + EOS_ASSERT(my->first_block_num > 0, block_log_exception, "Block log is malformed, first recorded block number is 0 but must be greater than or equal to 1"); + } else { + my->first_block_num = 1; + } + my->head = read_head(); my->head_id = my->head->id(); @@ -176,11 +195,11 @@ namespace eosio { namespace chain { my->check_index_write(); uint64_t pos = my->block_stream.tellp(); - EOS_ASSERT(my->index_stream.tellp() == sizeof(uint64_t) * (b->block_num() - 1), + EOS_ASSERT(my->index_stream.tellp() == sizeof(uint64_t) * (b->block_num() - my->first_block_num), block_log_append_fail, "Append to index file occuring at wrong position.", ("position", (uint64_t) my->index_stream.tellp()) - ("expected", (b->block_num() - 1) * sizeof(uint64_t))); + ("expected", (b->block_num() - my->first_block_num) * sizeof(uint64_t))); auto data = fc::raw::pack(*b); my->block_stream.write(data.data(), data.size()); my->block_stream.write((char*)&pos, sizeof(pos)); @@ -200,44 +219,50 @@ namespace eosio { namespace chain { my->index_stream.flush(); } - uint64_t block_log::reset_to_genesis( const genesis_state& gs, const signed_block_ptr& genesis_block ) { - if( my->block_stream.is_open() ) + void block_log::reset( const genesis_state& gs, const signed_block_ptr& first_block, uint32_t first_block_num ) { + if (my->block_stream.is_open()) my->block_stream.close(); - if( my->index_stream.is_open() ) + if (my->index_stream.is_open()) my->index_stream.close(); - fc::remove_all( my->block_file ); - fc::remove_all( my->index_file ); + fc::remove_all(my->block_file); + fc::remove_all(my->index_file); my->block_stream.open(my->block_file.generic_string().c_str(), LOG_WRITE); my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); my->block_write = true; my->index_write = true; - auto data = fc::raw::pack( gs ); - uint32_t version = 0; // version of 0 is invalid; it indicates that the genesis was not properly written to the block log - my->block_stream.write( (char*)&version, sizeof(version) ); - my->block_stream.write( data.data(), data.size() ); + auto data = fc::raw::pack(gs); + my->version = 0; // version of 0 is invalid; it indicates that the genesis was not properly written to the block log + my->first_block_num = first_block_num; + my->block_stream.write((char*)&my->version, sizeof(my->version)); + my->block_stream.write((char*)&my->first_block_num, sizeof(my->first_block_num)); + my->block_stream.write(data.data(), data.size()); my->genesis_written_to_block_log = true; - auto ret = append( genesis_block ); + // append a totem to indicate the division between blocks and header + auto totem = npos; + my->block_stream.write((char*)&totem, sizeof(totem)); + + if (first_block) { + append(first_block); + } auto pos = my->block_stream.tellp(); my->block_stream.close(); my->block_stream.open(my->block_file.generic_string().c_str(), std::ios::in | std::ios::out | std::ios::binary ); // Bypass append-only writing just once - static_assert( block_log::supported_version > 0, "a version number of zero is not supported" ); - version = block_log::supported_version; + static_assert( block_log::max_supported_version > 0, "a version number of zero is not supported" ); + my->version = block_log::max_supported_version; my->block_stream.seekp( 0 ); - my->block_stream.write( (char*)&version, sizeof(version) ); // Finally write actual version to disk. + my->block_stream.write( (char*)&my->version, sizeof(my->version) ); my->block_stream.seekp( pos ); flush(); my->block_write = false; my->check_block_write(); // Reset to append-only writing. - - return ret; } std::pair block_log::read_block(uint64_t pos)const { @@ -266,10 +291,9 @@ namespace eosio { namespace chain { uint64_t block_log::get_block_pos(uint32_t block_num) const { my->check_index_read(); - - if (!(my->head && block_num <= block_header::num_from_id(my->head_id) && block_num > 0)) + if (!(my->head && block_num <= block_header::num_from_id(my->head_id) && block_num >= my->first_block_num)) return npos; - my->index_stream.seekg(sizeof(uint64_t) * (block_num - 1)); + my->index_stream.seekg(sizeof(uint64_t) * (block_num - my->first_block_num)); uint64_t pos; my->index_stream.read((char*)&pos, sizeof(pos)); return pos; @@ -287,13 +311,21 @@ namespace eosio { namespace chain { my->block_stream.seekg(-sizeof(pos), std::ios::end); my->block_stream.read((char*)&pos, sizeof(pos)); - return read_block(pos).first; + if (pos != npos) { + return read_block(pos).first; + } else { + return {}; + } } const signed_block_ptr& block_log::head()const { return my->head; } + uint32_t block_log::first_block_num() const { + return my->first_block_num; + } + void block_log::construct_index() { ilog("Reconstructing Block Log Index..."); my->index_stream.close(); @@ -308,12 +340,23 @@ namespace eosio { namespace chain { my->block_stream.read((char*)&end_pos, sizeof(end_pos)); signed_block tmp; - uint64_t pos = 4; // Skip version which should have already been checked. + uint64_t pos = 0; + if (my->version == 1) { + pos = 4; // Skip version which should have already been checked. + } else { + pos = 8; // Skip version and first block offset which should have already been checked + } my->block_stream.seekg(pos); genesis_state gs; fc::raw::unpack(my->block_stream, gs); + // skip the totem + if (my->version > 1) { + uint64_t totem; + my->block_stream.read((char*) &totem, sizeof(totem)); + } + while( pos < end_pos ) { fc::raw::unpack(my->block_stream, tmp); my->block_stream.read((char*)&pos, sizeof(pos)); @@ -361,18 +404,37 @@ namespace eosio { namespace chain { uint32_t version = 0; old_block_stream.read( (char*)&version, sizeof(version) ); - EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly with genesis information." ); - EOS_ASSERT( version == block_log::supported_version, block_log_unsupported_version, - "Unsupported version of block log. Block log version is ${version} while code supports version ${supported}", - ("version", version)("supported", block_log::supported_version) ); + EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly" ); + EOS_ASSERT( version >= min_supported_version && version <= max_supported_version, block_log_unsupported_version, + "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", + ("version", version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); + + new_block_stream.write( (char*)&version, sizeof(version) ); + + uint32_t first_block_num = 1; + if (version != 1) { + old_block_stream.read ( (char*)&first_block_num, sizeof(first_block_num) ); + new_block_stream.write( (char*)&first_block_num, sizeof(first_block_num) ); + } genesis_state gs; fc::raw::unpack(old_block_stream, gs); auto data = fc::raw::pack( gs ); - new_block_stream.write( (char*)&version, sizeof(version) ); new_block_stream.write( data.data(), data.size() ); + if (version != 1) { + auto expected_totem = npos; + std::decay_t actual_totem; + old_block_stream.read ( (char*)&actual_totem, sizeof(actual_totem) ); + + EOS_ASSERT(actual_totem == expected_totem, block_log_exception, + "Expected separator between block log header and blocks was not found( expected: ${e}, actual: ${a} )", + ("e", fc::to_hex((char*)&expected_totem, sizeof(expected_totem) ))("a", fc::to_hex((char*)&actual_totem, sizeof(actual_totem) ))); + + new_block_stream.write( (char*)&actual_totem, sizeof(actual_totem) ); + } + std::exception_ptr except_ptr; vector incomplete_block_data; optional bad_block; @@ -472,10 +534,15 @@ namespace eosio { namespace chain { uint32_t version = 0; block_stream.read( (char*)&version, sizeof(version) ); - EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly with genesis information." ); - EOS_ASSERT( version == block_log::supported_version, block_log_unsupported_version, - "Unsupported version of block log. Block log version is ${version} while code supports version ${supported}", - ("version", version)("supported", block_log::supported_version) ); + EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly." ); + EOS_ASSERT( version >= min_supported_version && version <= max_supported_version, block_log_unsupported_version, + "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", + ("version", version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); + + uint32_t first_block_num = 1; + if (version != 1) { + block_stream.read ( (char*)&first_block_num, sizeof(first_block_num) ); + } genesis_state gs; fc::raw::unpack(block_stream, gs); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 8c7e00ec783..c2063c1316c 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -15,17 +15,40 @@ #include #include +#include #include #include #include +#include + #include namespace eosio { namespace chain { using resource_limits::resource_limits_manager; +using controller_index_set = index_set< + account_index, + account_sequence_index, + global_property_multi_index, + dynamic_global_property_multi_index, + block_summary_multi_index, + transaction_multi_index, + generated_transaction_multi_index, + table_id_multi_index +>; + +using contract_database_index_set = index_set< + key_value_index, + index64_index, + index128_index, + index256_index, + index_double_index, + index_long_double_index +>; + class maybe_session { public: maybe_session() = default; @@ -109,6 +132,7 @@ struct controller_impl { bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped optional subjective_cpu_leeway; bool trusted_producer_light_validation = false; + uint32_t snapshot_head_block = 0; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -130,6 +154,7 @@ struct controller_impl { } if ( read_mode == db_read_mode::SPECULATIVE ) { + EOS_ASSERT( head->block && head->block->transactions.size() == head->trxs.size(), block_validate_exception, "attempting to pop a block that was sparsely loaded from a snapshot"); for( const auto& t : head->trxs ) unapplied_transactions[t->signed_id] = t; } @@ -216,21 +241,31 @@ struct controller_impl { blog.read_head(); const auto& log_head = blog.head(); - EOS_ASSERT( log_head, block_log_exception, "block log head can not be found" ); - auto lh_block_num = log_head->block_num(); + bool append_to_blog = false; + if (!log_head) { + if (s->block) { + EOS_ASSERT(s->block_num == blog.first_block_num(), block_log_exception, "block log has no blocks and is appending the wrong first block. Expected ${expecgted}, but received: ${actual}", + ("expected", blog.first_block_num())("actual", s->block_num)); + append_to_blog = true; + } else { + EOS_ASSERT(s->block_num == blog.first_block_num() - 1, block_log_exception, "block log has no blocks and is not properly set up to start after the snapshot"); + } + } else { + auto lh_block_num = log_head->block_num(); + if (s->block_num > lh_block_num) { + EOS_ASSERT(s->block_num - 1 == lh_block_num, unlinkable_block_exception, "unlinkable block", ("s->block_num", s->block_num)("lh_block_num", lh_block_num)); + EOS_ASSERT(s->block->previous == log_head->id(), unlinkable_block_exception, "irreversible doesn't link to block log head"); + append_to_blog = true; + } + } + db.commit( s->block_num ); - if( s->block_num <= lh_block_num ) { -// edump((s->block_num)("double call to on_irr")); -// edump((s->block_num)(s->block->previous)(log_head->id())); - return; + if( append_to_blog ) { + blog.append(s->block); } - EOS_ASSERT( s->block_num - 1 == lh_block_num, unlinkable_block_exception, "unlinkable block", ("s->block_num",s->block_num)("lh_block_num", lh_block_num) ); - EOS_ASSERT( s->block->previous == log_head->id(), unlinkable_block_exception, "irreversible doesn't link to block log head" ); - blog.append(s->block); - const auto& ubi = reversible_blocks.get_index(); auto objitr = ubi.begin(); while( objitr != ubi.end() && objitr->blocknum <= s->block_num ) { @@ -238,64 +273,91 @@ struct controller_impl { objitr = ubi.begin(); } - if ( read_mode == db_read_mode::IRREVERSIBLE ) { - apply_block( s->block, controller::block_status::complete ); - fork_db.mark_in_current_chain( s, true ); - fork_db.set_validity( s, true ); - head = s; + // the "head" block when a snapshot is loaded is virtual and has no block data, all of its effects + // should already have been loaded from the snapshot so, it cannot be applied + if (s->block) { + if (read_mode == db_read_mode::IRREVERSIBLE) { + // when applying a snapshot, head may not be present + // when not applying a snapshot, make sure this is the next block + if (!head || s->block_num == head->block_num + 1) { + apply_block(s->block, controller::block_status::complete); + head = s; + } else { + // otherwise, assert the one odd case where initializing a chain + // from genesis creates and applies the first block automatically. + // when syncing from another chain, this is pushed in again + EOS_ASSERT(!head || head->block_num == 1, block_validate_exception, "Attempting to re-apply an irreversible block that was not the implied genesis block"); + } + + fork_db.mark_in_current_chain(head, true); + fork_db.set_validity(head, true); + } + emit(self.irreversible_block, s); } - emit( self.irreversible_block, s ); } - void init() { + void replay() { + auto blog_head = blog.read_head(); + auto blog_head_time = blog_head->timestamp.to_time_point(); + replaying = true; + replay_head_time = blog_head_time; + ilog( "existing block log, attempting to replay ${n} blocks", ("n",blog_head->block_num()) ); + + auto start = fc::time_point::now(); + while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { + self.push_block( next, controller::block_status::irreversible ); + if( next->block_num() % 100 == 0 ) { + std::cerr << std::setw(10) << next->block_num() << " of " << blog_head->block_num() <<"\r"; + } + } + std::cerr<< "\n"; + ilog( "${n} blocks replayed", ("n", head->block_num) ); + + // if the irreverible log is played without undo sessions enabled, we need to sync the + // revision ordinal to the appropriate expected value here. + if( self.skip_db_sessions( controller::block_status::irreversible ) ) + db.set_revision(head->block_num); + + int rev = 0; + while( auto obj = reversible_blocks.find(head->block_num+1) ) { + ++rev; + self.push_block( obj->get_block(), controller::block_status::validated ); + } - /** - * The fork database needs an initial block_state to be set before - * it can accept any new blocks. This initial block state can be found - * in the database (whose head block state should be irreversible) or - * it would be the genesis state. - */ - if( !head ) { + ilog( "${n} reversible blocks replayed", ("n",rev) ); + auto end = fc::time_point::now(); + ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", + ("n", head->block_num)("duration", (end-start).count()/1000000) + ("mspb", ((end-start).count()/1000.0)/head->block_num) ); + replaying = false; + replay_head_time.reset(); + } + + void init(const snapshot_reader_ptr& snapshot) { + + if (snapshot) { + EOS_ASSERT(!head, fork_database_exception, ""); + snapshot->validate(); + + read_from_snapshot(snapshot); + + auto end = blog.read_head(); + if( !end ) { + blog.reset(conf.genesis, signed_block_ptr(), head->block_num + 1); + } else if ( end->block_num() > head->block_num) { + replay(); + } else { + EOS_ASSERT(end->block_num() == head->block_num, fork_database_exception, + "Block log is provided with snapshot but does not contain the head block from the snapshot"); + } + } else if( !head ) { initialize_fork_db(); // set head to genesis state auto end = blog.read_head(); if( end && end->block_num() > 1 ) { - auto end_time = end->timestamp.to_time_point(); - replaying = true; - replay_head_time = end_time; - ilog( "existing block log, attempting to replay ${n} blocks", ("n",end->block_num()) ); - - auto start = fc::time_point::now(); - while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { - self.push_block( next, controller::block_status::irreversible ); - if( next->block_num() % 100 == 0 ) { - std::cerr << std::setw(10) << next->block_num() << " of " << end->block_num() <<"\r"; - } - } - std::cerr<< "\n"; - ilog( "${n} blocks replayed", ("n", head->block_num) ); - - // if the irreverible log is played without undo sessions enabled, we need to sync the - // revision ordinal to the appropriate expected value here. - if( self.skip_db_sessions( controller::block_status::irreversible ) ) - db.set_revision(head->block_num); - - int rev = 0; - while( auto obj = reversible_blocks.find(head->block_num+1) ) { - ++rev; - self.push_block( obj->get_block(), controller::block_status::validated ); - } - - ilog( "${n} reversible blocks replayed", ("n",rev) ); - auto end = fc::time_point::now(); - ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", - ("n", head->block_num)("duration", (end-start).count()/1000000) - ("mspb", ((end-start).count()/1000.0)/head->block_num) ); - replaying = false; - replay_head_time.reset(); - + replay(); } else if( !end ) { - blog.reset_to_genesis( conf.genesis, head->block ); + blog.reset( conf.genesis, head->block ); } } @@ -307,7 +369,7 @@ struct controller_impl { ("head",head->block_num)("unconfimed", objitr->blocknum) ); } else { auto end = blog.read_head(); - EOS_ASSERT( end && end->block_num() == head->block_num, fork_database_exception, + EOS_ASSERT( !end || end->block_num() == head->block_num, fork_database_exception, "fork database exists but reversible block database does not, replay blockchain", ("blog_head",end->block_num())("head",head->block_num) ); } @@ -324,6 +386,8 @@ struct controller_impl { db.undo(); } + ilog( "database initialized with hash: ${hash}", ("hash", calculate_integrity_hash())); + } ~controller_impl() { @@ -336,22 +400,8 @@ struct controller_impl { void add_indices() { reversible_blocks.add_index(); - db.add_index(); - db.add_index(); - - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); + controller_index_set::add_indices(db); + contract_database_index_set::add_indices(db); authorization.add_indices(); resource_limits.add_indices(); @@ -369,6 +419,180 @@ struct controller_impl { }); } + void calculate_contract_tables_integrity_hash( sha256::encoder& enc ) const { + index_utils::walk(db, [this, &enc]( const table_id_object& table_row ){ + fc::raw::pack(enc, table_row); + + contract_database_index_set::walk_indices([this, &enc, &table_row]( auto utils ) { + using value_t = typename decltype(utils)::index_t::value_type; + using by_table_id = object_to_table_id_tag_t; + + auto tid_key = boost::make_tuple(table_row.id); + auto next_tid_key = boost::make_tuple(table_id_object::id_type(table_row.id._id + 1)); + decltype(utils)::template walk_range(db, tid_key, next_tid_key, [&enc](const auto& row){ + fc::raw::pack(enc, row); + }); + }); + }); + } + + void add_contract_tables_to_snapshot( const snapshot_writer_ptr& snapshot ) const { + snapshot->write_section("contract_tables", [this]( auto& section ) { + index_utils::walk(db, [this, §ion]( const table_id_object& table_row ){ + // add a row for the table + section.add_row(table_row, db); + + // followed by a size row and then N data rows for each type of table + contract_database_index_set::walk_indices([this, §ion, &table_row]( auto utils ) { + using utils_t = decltype(utils); + using value_t = typename decltype(utils)::index_t::value_type; + using by_table_id = object_to_table_id_tag_t; + + auto tid_key = boost::make_tuple(table_row.id); + auto next_tid_key = boost::make_tuple(table_id_object::id_type(table_row.id._id + 1)); + + unsigned_int size = utils_t::template size_range(db, tid_key, next_tid_key); + section.add_row(size, db); + + utils_t::template walk_range(db, tid_key, next_tid_key, [this, §ion]( const auto &row ) { + section.add_row(row, db); + }); + }); + }); + }); + } + + void read_contract_tables_from_snapshot( const snapshot_reader_ptr& snapshot ) { + snapshot->read_section("contract_tables", [this]( auto& section ) { + bool more = !section.empty(); + while (more) { + // read the row for the table + table_id_object::id_type t_id; + index_utils::create(db, [this, §ion, &t_id](auto& row) { + section.read_row(row, db); + t_id = row.id; + }); + + // read the size and data rows for each type of table + contract_database_index_set::walk_indices([this, §ion, &t_id, &more](auto utils) { + using utils_t = decltype(utils); + + unsigned_int size; + more = section.read_row(size, db); + + for (size_t idx = 0; idx < size.value; idx++) { + utils_t::create(db, [this, §ion, &more, &t_id](auto& row) { + row.t_id = t_id; + more = section.read_row(row, db); + }); + } + }); + } + }); + } + + sha256 calculate_integrity_hash() const { + sha256::encoder enc; + controller_index_set::walk_indices([this, &enc]( auto utils ){ + using value_t = typename decltype(utils)::index_t::value_type; + + // skip the table_id_object as its inlined with contract tables section + if (std::is_same::value) { + return; + } + + decltype(utils)::walk(db, [&enc]( const auto &row ) { + fc::raw::pack(enc, row); + }); + }); + + calculate_contract_tables_integrity_hash(enc); + + authorization.calculate_integrity_hash(enc); + resource_limits.calculate_integrity_hash(enc); + return enc.result(); + } + + void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { + snapshot->write_section([this]( auto §ion ){ + section.add_row(chain_snapshot_header(), db); + }); + + snapshot->write_section([this]( auto §ion ){ + section.add_row(conf.genesis, db); + }); + + snapshot->write_section([this]( auto §ion ){ + section.template add_row(*fork_db.head(), db); + }); + + controller_index_set::walk_indices([this, &snapshot]( auto utils ){ + using value_t = typename decltype(utils)::index_t::value_type; + + // skip the table_id_object as its inlined with contract tables section + if (std::is_same::value) { + return; + } + + snapshot->write_section([this]( auto& section ){ + decltype(utils)::walk(db, [this, §ion]( const auto &row ) { + section.add_row(row, db); + }); + }); + }); + + add_contract_tables_to_snapshot(snapshot); + + authorization.add_to_snapshot(snapshot); + resource_limits.add_to_snapshot(snapshot); + } + + void read_from_snapshot( const snapshot_reader_ptr& snapshot ) { + snapshot->read_section([this]( auto §ion ){ + chain_snapshot_header header; + section.read_row(header, db); + header.validate(); + }); + + + snapshot->read_section([this]( auto §ion ){ + block_header_state head_header_state; + section.read_row(head_header_state, db); + + auto head_state = std::make_shared(head_header_state); + fork_db.set(head_state); + fork_db.set_validity(head_state, true); + fork_db.mark_in_current_chain(head_state, true); + head = head_state; + snapshot_head_block = head->block_num; + }); + + controller_index_set::walk_indices([this, &snapshot]( auto utils ){ + using value_t = typename decltype(utils)::index_t::value_type; + + // skip the table_id_object as its inlined with contract tables section + if (std::is_same::value) { + return; + } + + snapshot->read_section([this]( auto& section ) { + bool more = !section.empty(); + while(more) { + decltype(utils)::create(db, [this, §ion, &more]( auto &row ) { + more = section.read_row(row, db); + }); + } + }); + }); + + read_contract_tables_from_snapshot(snapshot); + + authorization.read_from_snapshot(snapshot); + resource_limits.read_from_snapshot(snapshot); + + db.set_revision( head->block_num ); + } + /** * Sets fork database head to the genesis state. */ @@ -1392,17 +1616,19 @@ void controller::add_indices() { my->add_indices(); } -void controller::startup() { +void controller::startup( const snapshot_reader_ptr& snapshot ) { my->head = my->fork_db.head(); if( !my->head ) { elog( "No head block in fork db, perhaps we need to replay" ); } - my->init(); + my->init(snapshot); } -chainbase::database& controller::db()const { return my->db; } +const chainbase::database& controller::db()const { return my->db; } + +chainbase::database& controller::mutable_db()const { return my->db; } -fork_database& controller::fork_db()const { return my->fork_db; } +const fork_database& controller::fork_db()const { return my->fork_db; } void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { @@ -1545,7 +1771,7 @@ optional controller::pending_producer_block_id()const { } uint32_t controller::last_irreversible_block_num() const { - return std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); + return std::max(std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum), my->snapshot_head_block); } block_id_type controller::last_irreversible_block_id() const { @@ -1568,7 +1794,7 @@ const global_property_object& controller::get_global_properties()const { signed_block_ptr controller::fetch_block_by_id( block_id_type id )const { auto state = my->fork_db.get_block(id); - if( state ) return state->block; + if( state && state->block ) return state->block; auto bptr = fetch_block_by_number( block_header::num_from_id(id) ); if( bptr && bptr->id() == id ) return bptr; return signed_block_ptr(); @@ -1576,7 +1802,7 @@ signed_block_ptr controller::fetch_block_by_id( block_id_type id )const { signed_block_ptr controller::fetch_block_by_number( uint32_t block_num )const { try { auto blk_state = my->fork_db.get_block_in_current_chain_by_num( block_num ); - if( blk_state ) { + if( blk_state && blk_state->block ) { return blk_state->block; } @@ -1607,6 +1833,15 @@ block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try return signed_blk->id(); } FC_CAPTURE_AND_RETHROW( (block_num) ) } +sha256 controller::calculate_integrity_hash()const { try { + return my->calculate_integrity_hash(); +} FC_LOG_AND_RETHROW() } + +void controller::write_snapshot( const snapshot_writer_ptr& snapshot ) const { + EOS_ASSERT( !my->pending, block_validate_exception, "cannot take a consistent snapshot with a pending block" ); + return my->add_to_snapshot(snapshot); +} + void controller::pop_block() { my->pop_block(); } diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index a8e9ba3e520..221424e041e 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace eosio { namespace chain { @@ -17,8 +18,13 @@ using std::pair; using namespace fc; namespace impl { - struct abi_from_variant; - struct abi_to_variant; + struct abi_from_variant; + struct abi_to_variant; + + struct abi_traverse_context; + struct abi_traverse_context_with_path; + struct binary_to_variant_context; + struct variant_to_binary_context; } /** @@ -33,9 +39,7 @@ struct abi_serializer { type_name resolve_type(const type_name& t)const; bool is_array(const type_name& type)const; bool is_optional(const type_name& type)const; - bool is_type(const type_name& type, const fc::microseconds& max_serialization_time)const { - return _is_type(type, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } + bool is_type(const type_name& type, const fc::microseconds& max_serialization_time)const; bool is_builtin_type(const type_name& type)const; bool is_integer(const type_name& type) const; int get_integer_size(const type_name& type) const; @@ -49,19 +53,11 @@ struct abi_serializer { optional get_error_message( uint64_t error_code )const; - fc::variant binary_to_variant(const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time)const { - return _binary_to_variant(type, binary, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } - bytes variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time)const { - return _variant_to_binary(type, var, true, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } + fc::variant binary_to_variant( const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time, bool short_path = false )const; + fc::variant binary_to_variant( const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time, bool short_path = false )const; - fc::variant binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const { - return _binary_to_variant(type, binary, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } - void variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time)const { - _variant_to_binary(type, var, ds, true, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } + bytes variant_to_binary( const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path = false )const; + void variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path = false )const; template static void to_variant( const T& o, fc::variant& vo, Resolver resolver, const fc::microseconds& max_serialization_time ); @@ -105,29 +101,135 @@ struct abi_serializer { map> built_in_types; void configure_built_in_types(); - fc::variant _binary_to_variant(const type_name& type, const bytes& binary, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - bytes _variant_to_binary(const type_name& type, const fc::variant& var, bool allow_extensions, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - - fc::variant _binary_to_variant(const type_name& type, fc::datastream& binary, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - void _variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, bool allow_extensions, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + fc::variant _binary_to_variant( const type_name& type, const bytes& binary, impl::binary_to_variant_context& ctx )const; + fc::variant _binary_to_variant( const type_name& type, fc::datastream& binary, impl::binary_to_variant_context& ctx )const; + void _binary_to_variant( const type_name& type, fc::datastream& stream, + fc::mutable_variant_object& obj, impl::binary_to_variant_context& ctx )const; - void _binary_to_variant(const type_name& type, fc::datastream& stream, fc::mutable_variant_object& obj, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + bytes _variant_to_binary( const type_name& type, const fc::variant& var, impl::variant_to_binary_context& ctx )const; + void _variant_to_binary( const type_name& type, const fc::variant& var, + fc::datastream& ds, impl::variant_to_binary_context& ctx )const; static type_name _remove_bin_extension(const type_name& type); - bool _is_type(const type_name& type, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + bool _is_type( const type_name& type, impl::abi_traverse_context& ctx )const; - void validate(const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + void validate( impl::abi_traverse_context& ctx )const; friend struct impl::abi_from_variant; friend struct impl::abi_to_variant; + friend struct impl::abi_traverse_context_with_path; }; namespace impl { + + struct abi_traverse_context { + abi_traverse_context( fc::microseconds max_serialization_time ) + : max_serialization_time( max_serialization_time ), deadline( fc::time_point::now() + max_serialization_time ), recursion_depth(0) + {} + + abi_traverse_context( fc::microseconds max_serialization_time, fc::time_point deadline ) + : max_serialization_time( max_serialization_time ), deadline( deadline ), recursion_depth(0) + {} + + void check_deadline()const; + + fc::scoped_exit> enter_scope(); + + protected: + fc::microseconds max_serialization_time; + fc::time_point deadline; + size_t recursion_depth; + }; + + struct empty_path_root {}; + + struct array_type_path_root { + }; + + struct struct_type_path_root { + map::const_iterator struct_itr; + }; + + struct variant_type_path_root { + map::const_iterator variant_itr; + }; + + using path_root = static_variant; + + struct empty_path_item {}; + + struct array_index_path_item { + path_root type_hint; + uint32_t array_index = 0; + }; + + struct field_path_item { + map::const_iterator parent_struct_itr; + uint32_t field_ordinal = 0; + }; + + struct variant_path_item { + map::const_iterator variant_itr; + uint32_t variant_ordinal = 0; + }; + + using path_item = static_variant; + + struct abi_traverse_context_with_path : public abi_traverse_context { + abi_traverse_context_with_path( const abi_serializer& abis, fc::microseconds max_serialization_time, const type_name& type ) + : abi_traverse_context( max_serialization_time ), abis(abis) + { + set_path_root(type); + } + + abi_traverse_context_with_path( const abi_serializer& abis, fc::microseconds max_serialization_time, fc::time_point deadline, const type_name& type ) + : abi_traverse_context( max_serialization_time, deadline ), abis(abis) + { + set_path_root(type); + } + + abi_traverse_context_with_path( const abi_serializer& abis, const abi_traverse_context& ctx, const type_name& type ) + : abi_traverse_context(ctx), abis(abis) + { + set_path_root(type); + } + + void set_path_root( const type_name& type ); + + fc::scoped_exit> push_to_path( const path_item& item ); + + void set_array_index_of_path_back( uint32_t i ); + void hint_array_type_if_in_array(); + void hint_struct_type_if_in_array( const map::const_iterator& itr ); + void hint_variant_type_if_in_array( const map::const_iterator& itr ); + + string get_path_string()const; + + string maybe_shorten( const string& str ); + + protected: + const abi_serializer& abis; + path_root root_of_path; + vector path; + public: + bool short_path = false; + }; + + struct binary_to_variant_context : public abi_traverse_context_with_path { + using abi_traverse_context_with_path::abi_traverse_context_with_path; + }; + + struct variant_to_binary_context : public abi_traverse_context_with_path { + using abi_traverse_context_with_path::abi_traverse_context_with_path; + + fc::scoped_exit> disallow_extensions_unless( bool condition ); + + bool extensions_allowed()const { return allow_extensions; } + + protected: + bool allow_extensions = true; + }; + /** * Determine if a type contains ABI related info, perhaps deeply nested * @tparam T - the type to check @@ -187,11 +289,9 @@ namespace impl { * and can be degraded to the normal ::to_variant(...) processing */ template = 1> - static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mvo(name,v); } @@ -200,25 +300,22 @@ namespace impl { * for these types we create new ABI aware visitors */ template = 1> - static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ); + static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, abi_traverse_context& ctx ); /** * template which overloads add for vectors of types which contain ABI information in their trees * for these members we call ::add in order to trigger further processing */ template = 1> - static void add( mutable_variant_object &mvo, const char* name, const vector& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &mvo, const char* name, const vector& v, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); vector array; array.reserve(v.size()); for (const auto& iter: v) { mutable_variant_object elem_mvo; - add(elem_mvo, "_", iter, resolver, recursion_depth, deadline, max_serialization_time); + add(elem_mvo, "_", iter, resolver, ctx); array.emplace_back(std::move(elem_mvo["_"])); } mvo(name, std::move(array)); @@ -229,14 +326,12 @@ namespace impl { * for these members we call ::add in order to trigger further processing */ template = 1> - static void add( mutable_variant_object &mvo, const char* name, const std::shared_ptr& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &mvo, const char* name, const std::shared_ptr& v, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); if( !v ) return; mutable_variant_object obj_mvo; - add(obj_mvo, "_", *v, resolver, recursion_depth, deadline, max_serialization_time); + add(obj_mvo, "_", *v, resolver, ctx); mvo(name, std::move(obj_mvo["_"])); } @@ -245,27 +340,24 @@ namespace impl { { mutable_variant_object& obj_mvo; Resolver& resolver; - size_t recursion_depth; - fc::time_point deadline; - fc::microseconds max_serialization_time; - add_static_variant( mutable_variant_object& o, Resolver& r, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) - :obj_mvo(o), resolver(r), recursion_depth(recursion_depth), deadline(deadline), max_serialization_time(max_serialization_time){} + abi_traverse_context& ctx; + + add_static_variant( mutable_variant_object& o, Resolver& r, abi_traverse_context& ctx ) + :obj_mvo(o), resolver(r), ctx(ctx) {} typedef void result_type; template void operator()( T& v )const { - add(obj_mvo, "_", v, resolver, recursion_depth, deadline, max_serialization_time); + add(obj_mvo, "_", v, resolver, ctx); } }; template - static void add( mutable_variant_object &mvo, const char* name, const fc::static_variant& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &mvo, const char* name, const fc::static_variant& v, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mutable_variant_object obj_mvo; - add_static_variant adder(obj_mvo, resolver, recursion_depth, deadline, max_serialization_time); + add_static_variant adder(obj_mvo, resolver, ctx); v.visit(adder); mvo(name, std::move(obj_mvo["_"])); } @@ -278,11 +370,9 @@ namespace impl { * @return */ template - static void add( mutable_variant_object &out, const char* name, const action& act, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &out, const char* name, const action& act, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mutable_variant_object mvo; mvo("account", act.account); mvo("name", act.name); @@ -294,7 +384,9 @@ namespace impl { auto type = abi->get_action_type(act.name); if (!type.empty()) { try { - mvo( "data", abi->_binary_to_variant( type, act.data, recursion_depth, deadline, max_serialization_time )); + binary_to_variant_context _ctx(*abi, ctx, type); + _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place + mvo( "data", abi->_binary_to_variant( type, act.data, _ctx )); mvo("hex_data", act.data); } catch(...) { // any failure to serialize data, then leave as not serailzed @@ -320,11 +412,9 @@ namespace impl { * @return */ template - static void add( mutable_variant_object &out, const char* name, const packed_transaction& ptrx, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &out, const char* name, const packed_transaction& ptrx, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mutable_variant_object mvo; auto trx = ptrx.get_transaction(); mvo("id", trx.id()); @@ -333,7 +423,7 @@ namespace impl { mvo("packed_context_free_data", ptrx.packed_context_free_data); mvo("context_free_data", ptrx.get_context_free_data()); mvo("packed_trx", ptrx.packed_trx); - add(mvo, "transaction", trx, resolver, recursion_depth, deadline, max_serialization_time); + add(mvo, "transaction", trx, resolver, ctx); out(name, std::move(mvo)); } @@ -350,14 +440,11 @@ namespace impl { class abi_to_variant_visitor { public: - abi_to_variant_visitor( mutable_variant_object& _mvo, const T& _val, Resolver _resolver, - size_t _recursion_depth, const fc::time_point& _deadline, const fc::microseconds& max_serialization_time ) + abi_to_variant_visitor( mutable_variant_object& _mvo, const T& _val, Resolver _resolver, abi_traverse_context& _ctx ) :_vo(_mvo) ,_val(_val) ,_resolver(_resolver) - ,_recursion_depth(_recursion_depth) - ,_deadline(_deadline) - ,_max_serialization_time(max_serialization_time) + ,_ctx(_ctx) {} /** @@ -370,16 +457,14 @@ namespace impl { template void operator()( const char* name )const { - abi_to_variant::add( _vo, name, (_val.*member), _resolver, _recursion_depth, _deadline, _max_serialization_time ); + abi_to_variant::add( _vo, name, (_val.*member), _resolver, _ctx ); } private: mutable_variant_object& _vo; const T& _val; Resolver _resolver; - size_t _recursion_depth; - fc::time_point _deadline; - fc::microseconds _max_serialization_time; + abi_traverse_context& _ctx; }; struct abi_from_variant { @@ -388,11 +473,9 @@ namespace impl { * and can be degraded to the normal ::from_variant(...) processing */ template = 1> - static void extract( const variant& v, M& o, Resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, M& o, Resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); from_variant(v, o); } @@ -401,25 +484,22 @@ namespace impl { * for these types we create new ABI aware visitors */ template = 1> - static void extract( const variant& v, M& o, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ); + static void extract( const variant& v, M& o, Resolver resolver, abi_traverse_context& ctx ); /** * template which overloads extract for vectors of types which contain ABI information in their trees * for these members we call ::extract in order to trigger further processing */ template = 1> - static void extract( const variant& v, vector& o, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, vector& o, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variants& array = v.get_array(); o.clear(); o.reserve( array.size() ); for( auto itr = array.begin(); itr != array.end(); ++itr ) { M o_iter; - extract(*itr, o_iter, resolver, recursion_depth, deadline, max_serialization_time); + extract(*itr, o_iter, resolver, ctx); o.emplace_back(std::move(o_iter)); } } @@ -429,14 +509,12 @@ namespace impl { * for these members we call ::extract in order to trigger further processing */ template = 1> - static void extract( const variant& v, std::shared_ptr& o, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, std::shared_ptr& o, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); M obj; - extract(vo, obj, resolver, recursion_depth, deadline, max_serialization_time); + extract(vo, obj, resolver, ctx); o = std::make_shared(obj); } @@ -446,11 +524,9 @@ namespace impl { * exploded and processed explicitly */ template - static void extract( const variant& v, action& act, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, action& act, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); EOS_ASSERT(vo.contains("account"), packed_transaction_type_exception, "Missing account"); EOS_ASSERT(vo.contains("name"), packed_transaction_type_exception, "Missing name"); @@ -472,7 +548,9 @@ namespace impl { if (abi.valid()) { auto type = abi->get_action_type(act.name); if (!type.empty()) { - act.data = std::move( abi->_variant_to_binary( type, data, true, recursion_depth, deadline, max_serialization_time )); + variant_to_binary_context _ctx(*abi, ctx, type); + _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place + act.data = std::move( abi->_variant_to_binary( type, data, _ctx )); valid_empty_data = act.data.empty(); } } @@ -493,11 +571,9 @@ namespace impl { } template - static void extract( const variant& v, packed_transaction& ptrx, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, packed_transaction& ptrx, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); EOS_ASSERT(vo.contains("signatures"), packed_transaction_type_exception, "Missing signatures"); EOS_ASSERT(vo.contains("compression"), packed_transaction_type_exception, "Missing compression"); @@ -519,7 +595,7 @@ namespace impl { EOS_ASSERT(vo.contains("transaction"), packed_transaction_type_exception, "Missing transaction"); transaction trx; vector context_free_data; - extract(vo["transaction"], trx, resolver, recursion_depth, deadline, max_serialization_time); + extract(vo["transaction"], trx, resolver, ctx); if( vo.contains("packed_context_free_data") && vo["packed_context_free_data"].is_string() && !vo["packed_context_free_data"].as_string().empty() ) { from_variant(vo["packed_context_free_data"], ptrx.packed_context_free_data ); context_free_data = ptrx.get_context_free_data(); @@ -542,14 +618,11 @@ namespace impl { class abi_from_variant_visitor : reflector_verifier_visitor { public: - abi_from_variant_visitor( const variant_object& _vo, T& v, Resolver _resolver, - size_t _recursion_depth, const fc::time_point& _deadline, const fc::microseconds& max_serialization_time ) + abi_from_variant_visitor( const variant_object& _vo, T& v, Resolver _resolver, abi_traverse_context& _ctx ) : reflector_verifier_visitor(v) ,_vo(_vo) ,_resolver(_resolver) - ,_recursion_depth(_recursion_depth) - ,_deadline(_deadline) - ,_max_serialization_time(max_serialization_time) + ,_ctx(_ctx) {} /** @@ -564,49 +637,45 @@ namespace impl { { auto itr = _vo.find(name); if( itr != _vo.end() ) - abi_from_variant::extract( itr->value(), this->obj.*member, _resolver, _recursion_depth, _deadline, _max_serialization_time ); + abi_from_variant::extract( itr->value(), this->obj.*member, _resolver, _ctx ); } private: const variant_object& _vo; Resolver _resolver; - size_t _recursion_depth; - fc::time_point _deadline; - fc::microseconds _max_serialization_time; + abi_traverse_context& _ctx; }; template> - void abi_to_variant::add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + void abi_to_variant::add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mutable_variant_object member_mvo; - fc::reflector::visit( impl::abi_to_variant_visitor( member_mvo, v, resolver, recursion_depth, deadline, max_serialization_time ) ); + fc::reflector::visit( impl::abi_to_variant_visitor( member_mvo, v, resolver, ctx) ); mvo(name, std::move(member_mvo)); } template> - void abi_from_variant::extract( const variant& v, M& o, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + void abi_from_variant::extract( const variant& v, M& o, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); - fc::reflector::visit( abi_from_variant_visitor( vo, o, resolver, recursion_depth, deadline, max_serialization_time ) ); + fc::reflector::visit( abi_from_variant_visitor( vo, o, resolver, ctx ) ); } -} +} /// namespace eosio::chain::impl template void abi_serializer::to_variant( const T& o, variant& vo, Resolver resolver, const fc::microseconds& max_serialization_time ) try { mutable_variant_object mvo; - impl::abi_to_variant::add(mvo, "_", o, resolver, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); + impl::abi_traverse_context ctx(max_serialization_time); + impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); } FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("object",o)) template void abi_serializer::from_variant( const variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ) try { - impl::abi_from_variant::extract(v, o, resolver, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); + impl::abi_traverse_context ctx(max_serialization_time); + impl::abi_from_variant::extract(v, o, resolver, ctx); } FC_RETHROW_EXCEPTIONS(error, "Failed to deserialize variant", ("variant",v)) diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index e9c7885aea9..8a945512b3c 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -3,7 +3,7 @@ * @copyright defined in eos/LICENSE.txt */ #pragma once -#include +#include #include #include #include @@ -25,8 +25,8 @@ namespace eosio { namespace chain { digest_type code_version; block_timestamp_type creation_date; - shared_string code; - shared_string abi; + shared_blob code; + shared_blob abi; void set_abi( const eosio::chain::abi_def& a ) { abi.resize( fc::raw::pack_size( a ) ); @@ -81,4 +81,5 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_object, eosio::chain::account_ind CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_sequence_object, eosio::chain::account_sequence_index) -FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(code_version)(code)(creation_date)) +FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(privileged)(last_code_update)(code_version)(creation_date)(code)(abi)) +FC_REFLECT(eosio::chain::account_sequence_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 8a4f98a7caa..a253d950358 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -453,7 +453,7 @@ class apply_context { public: apply_context(controller& con, transaction_context& trx_ctx, const action& a, uint32_t depth=0) :control(con) - ,db(con.db()) + ,db(con.mutable_db()) ,trx_context(trx_ctx) ,act(a) ,receiver(act.account) @@ -472,8 +472,8 @@ class apply_context { /// Execution methods: public: - action_trace exec_one(); - void exec(); + void exec_one( action_trace& trace ); + void exec( action_trace& trace ); void execute_inline( action&& a ); void execute_context_free_inline( action&& a ); void schedule_deferred_transaction( const uint128_t& sender_id, account_name payer, transaction&& trx, bool replace_existing ); @@ -573,6 +573,7 @@ class apply_context { uint64_t next_auth_sequence( account_name actor ); void add_ram_usage( account_name account, int64_t ram_delta ); + void finalize_trace( action_trace& trace, const fc::time_point& start ); private: @@ -600,8 +601,6 @@ class apply_context { generic_index idx_double; generic_index idx_long_double; - action_trace trace; - private: iterator_cache keyval_cache; diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index e244475b996..52f211de374 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -27,6 +28,9 @@ namespace eosio { namespace chain { void add_indices(); void initialize_database(); + void calculate_integrity_hash( fc::sha256::encoder& enc ) const; + void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const; + void read_from_snapshot( const snapshot_reader_ptr& snapshot ); const permission_object& create_permission( account_name account, permission_name name, diff --git a/libraries/chain/include/eosio/chain/block_log.hpp b/libraries/chain/include/eosio/chain/block_log.hpp index 95560f94789..24ff0ad2835 100644 --- a/libraries/chain/include/eosio/chain/block_log.hpp +++ b/libraries/chain/include/eosio/chain/block_log.hpp @@ -11,10 +11,10 @@ namespace eosio { namespace chain { namespace detail { class block_log_impl; } - /* The block log is an external append only log of the blocks. Blocks should only be written - * to the log after they irreverisble as the log is append only. The log is a doubly linked - * list of blocks. There is a secondary index file of only block positions that enables O(1) - * random access lookup by block number. + /* The block log is an external append only log of the blocks with a header. Blocks should only + * be written to the log after they irreverisble as the log is append only. The log is a doubly + * linked list of blocks. There is a secondary index file of only block positions that enables + * O(1) random access lookup by block number. * * +---------+----------------+---------+----------------+-----+------------+-------------------+ * | Block 1 | Pos of Block 1 | Block 2 | Pos of Block 2 | ... | Head Block | Pos of Head Block | @@ -44,7 +44,7 @@ namespace eosio { namespace chain { uint64_t append(const signed_block_ptr& b); void flush(); - uint64_t reset_to_genesis( const genesis_state& gs, const signed_block_ptr& genesis_block ); + void reset( const genesis_state& gs, const signed_block_ptr& genesis_block, uint32_t first_block_num = 1 ); std::pair read_block(uint64_t file_pos)const; signed_block_ptr read_block_by_num(uint32_t block_num)const; @@ -58,10 +58,12 @@ namespace eosio { namespace chain { uint64_t get_block_pos(uint32_t block_num) const; signed_block_ptr read_head()const; const signed_block_ptr& head()const; + uint32_t first_block_num() const; static const uint64_t npos = std::numeric_limits::max(); - static const uint32_t supported_version; + static const uint32_t min_supported_version; + static const uint32_t max_supported_version; static fc::path repair_log( const fc::path& data_dir, uint32_t truncate_at_block = 0 ); diff --git a/libraries/chain/include/eosio/chain/chain_snapshot.hpp b/libraries/chain/include/eosio/chain/chain_snapshot.hpp new file mode 100644 index 00000000000..7174e69c5b9 --- /dev/null +++ b/libraries/chain/include/eosio/chain/chain_snapshot.hpp @@ -0,0 +1,34 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once + +#include + +namespace eosio { namespace chain { + +struct chain_snapshot_header { + /** + * Version history + * 1: initial version + */ + + static constexpr uint32_t minimum_compatible_version = 1; + static constexpr uint32_t current_version = 1; + + uint32_t version = current_version; + + void validate() const { + auto min = minimum_compatible_version; + auto max = current_version; + EOS_ASSERT(version >= min && version <= max, + snapshot_validation_exception, + "Unsupported version of chain snapshot: ${version}. Supported version must be between ${min} and ${max} inclusive.", + ("version",version)("min",min)("max",max)); + } +}; + +} } + +FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index 9e1dcd0b073..c0e9806319e 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -93,7 +93,7 @@ const static uint32_t setcode_ram_bytes_multiplier = 10; ///< multip const static uint32_t hashing_checktime_block_size = 10*1024; /// call checktime from hashing intrinsic once per this number of bytes -const static eosio::chain::wasm_interface::vm_type default_wasm_runtime = eosio::chain::wasm_interface::vm_type::binaryen; +const static eosio::chain::wasm_interface::vm_type default_wasm_runtime = eosio::chain::wasm_interface::vm_type::wabt; const static uint32_t default_abi_serializer_max_time_ms = 15*1000; ///< default deadline for abi serialization methods /** diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index bc2fff140c4..b3428340823 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -4,11 +4,9 @@ */ #pragma once +#include #include #include -#include - -#include #include #include @@ -64,7 +62,7 @@ namespace eosio { namespace chain { table_id t_id; uint64_t primary_key; account_name payer = 0; - shared_string value; + shared_blob value; }; using key_value_index = chainbase::shared_multi_index_container< @@ -160,6 +158,29 @@ namespace eosio { namespace chain { typedef secondary_index::index_object index_long_double_object; typedef secondary_index::index_index index_long_double_index; + /** + * helper template to map from an index type to the best tag + * to use when traversing by table_id + */ + template + struct object_to_table_id_tag; + +#define DECLARE_TABLE_ID_TAG( object, tag ) \ + template<> \ + struct object_to_table_id_tag { \ + using tag_type = tag;\ + }; + + DECLARE_TABLE_ID_TAG(key_value_object, by_scope_primary) + DECLARE_TABLE_ID_TAG(index64_object, by_primary) + DECLARE_TABLE_ID_TAG(index128_object, by_primary) + DECLARE_TABLE_ID_TAG(index256_object, by_primary) + DECLARE_TABLE_ID_TAG(index_double_object, by_primary) + DECLARE_TABLE_ID_TAG(index_long_double_object, by_primary) + + template + using object_to_table_id_tag_t = typename object_to_table_id_tag::tag_type; + namespace config { template<> struct billable_size { @@ -216,5 +237,14 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::index256_object, eosio::chain::index256_i CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_double_object, eosio::chain::index_double_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_long_double_object, eosio::chain::index_long_double_index) -FC_REFLECT(eosio::chain::table_id_object, (id)(code)(scope)(table) ) -FC_REFLECT(eosio::chain::key_value_object, (id)(t_id)(primary_key)(value)(payer) ) +FC_REFLECT(eosio::chain::table_id_object, (code)(scope)(table)(payer)(count) ) +FC_REFLECT(eosio::chain::key_value_object, (primary_key)(payer)(value) ) + +#define REFLECT_SECONDARY(type)\ + FC_REFLECT(type, (primary_key)(payer)(secondary_key) ) + +REFLECT_SECONDARY(eosio::chain::index64_object) +REFLECT_SECONDARY(eosio::chain::index128_object) +REFLECT_SECONDARY(eosio::chain::index256_object) +REFLECT_SECONDARY(eosio::chain::index_double_object) +REFLECT_SECONDARY(eosio::chain::index_long_double_object) diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 6b5cf3b613f..ec7b53fafc0 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -6,6 +6,7 @@ #include #include +#include namespace chainbase { class database; @@ -88,7 +89,7 @@ namespace eosio { namespace chain { ~controller(); void add_indices(); - void startup(); + void startup( const snapshot_reader_ptr& snapshot = nullptr ); /** * Starts a new pending block session upon which new transactions can @@ -146,14 +147,13 @@ namespace eosio { namespace chain { */ void push_confirmation( const header_confirmation& c ); - chainbase::database& db()const; + const chainbase::database& db()const; - fork_database& fork_db()const; + const fork_database& fork_db()const; const account_object& get_account( account_name n )const; const global_property_object& get_global_properties()const; const dynamic_global_property_object& get_dynamic_global_properties()const; - const permission_object& get_permission( const permission_level& level )const; const resource_limits_manager& get_resource_limits_manager()const; resource_limits_manager& get_mutable_resource_limits_manager(); const authorization_manager& get_authorization_manager()const; @@ -204,6 +204,9 @@ namespace eosio { namespace chain { block_id_type get_block_id_for_num( uint32_t block_num )const; + sha256 calculate_integrity_hash()const; + void write_snapshot( const snapshot_writer_ptr& snapshot )const; + void check_contract_list( account_name code )const; void check_action_list( account_name code, action_name action )const; void check_key_list( const public_key_type& key )const; @@ -286,6 +289,10 @@ namespace eosio { namespace chain { } private: + friend class apply_context; + friend class transaction_context; + + chainbase::database& mutable_db()const; std::unique_ptr my; diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp new file mode 100644 index 00000000000..e2b0c0d487f --- /dev/null +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -0,0 +1,219 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once + +#include +#include +#include + +namespace eosio { namespace chain { + + template + class index_set; + + template + class index_utils { + public: + using index_t = Index; + + template + static void walk( const chainbase::database& db, F function ) { + auto const& index = db.get_index().indices(); + const auto& first = index.begin(); + const auto& last = index.end(); + for (auto itr = first; itr != last; ++itr) { + function(*itr); + } + } + + template + static void walk_range( const chainbase::database& db, const Key& begin_key, const Key& end_key, F function ) { + const auto& idx = db.get_index(); + auto begin_itr = idx.lower_bound(begin_key); + auto end_itr = idx.lower_bound(end_key); + for (auto itr = begin_itr; itr != end_itr; ++itr) { + function(*itr); + } + } + + template + static size_t size_range( const chainbase::database& db, const Key& begin_key, const Key& end_key ) { + const auto& idx = db.get_index(); + auto begin_itr = idx.lower_bound(begin_key); + auto end_itr = idx.lower_bound(end_key); + size_t res = 0; + while (begin_itr != end_itr) { + res++; ++begin_itr; + } + return res; + } + + template + static void create( chainbase::database& db, F cons ) { + db.create(cons); + } + }; + + template + class index_set { + public: + static void add_indices( chainbase::database& db ) { + db.add_index(); + } + + template + static void walk_indices( F function ) { + function( index_utils() ); + } + }; + + template + class index_set { + public: + static void add_indices( chainbase::database& db ) { + index_set::add_indices(db); + index_set::add_indices(db); + } + + template + static void walk_indices( F function ) { + index_set::walk_indices(function); + index_set::walk_indices(function); + } + }; + + template + DataStream& operator << ( DataStream& ds, const shared_blob& b ) { + fc::raw::pack(ds, static_cast(b)); + return ds; + } + + template + DataStream& operator >> ( DataStream& ds, shared_blob& b ) { + fc::raw::unpack(ds, static_cast(b)); + return ds; + } +} } + +namespace fc { + + // overloads for to/from_variant + template + void to_variant( const chainbase::oid& oid, variant& v ) { + v = variant(oid._id); + } + + template + void from_variant( const variant& v, chainbase::oid& oid ) { + from_variant(v, oid._id); + } + + inline + void to_variant( const float64_t& f, variant& v ) { + v = variant(*reinterpret_cast(&f)); + } + + inline + void from_variant( const variant& v, float64_t& f ) { + from_variant(v, *reinterpret_cast(&f)); + } + + inline + void to_variant( const float128_t& f, variant& v ) { + v = variant(*reinterpret_cast(&f)); + } + + inline + void from_variant( const variant& v, float128_t& f ) { + from_variant(v, *reinterpret_cast(&f)); + } + + inline + void to_variant( const eosio::chain::shared_string& s, variant& v ) { + v = variant(std::string(s.begin(), s.end())); + } + + inline + void from_variant( const variant& v, eosio::chain::shared_string& s ) { + string _s; + from_variant(v, _s); + s = eosio::chain::shared_string(_s.begin(), _s.end(), s.get_allocator()); + } + + inline + void to_variant( const eosio::chain::shared_blob& b, variant& v ) { + v = variant(base64_encode(b.data(), b.size())); + } + + inline + void from_variant( const variant& v, eosio::chain::shared_blob& b ) { + string _s = base64_decode(v.as_string()); + b = eosio::chain::shared_blob(_s.begin(), _s.end(), b.get_allocator()); + } + + inline + void to_variant( const blob& b, variant& v ) { + v = variant(base64_encode(b.data.data(), b.data.size())); + } + + inline + void from_variant( const variant& v, blob& b ) { + string _s = base64_decode(v.as_string()); + b.data = std::vector(_s.begin(), _s.end()); + } + + template + void to_variant( const eosio::chain::shared_vector& sv, variant& v ) { + to_variant(std::vector(sv.begin(), sv.end()), v); + } + + template + void from_variant( const variant& v, eosio::chain::shared_vector& sv ) { + std::vector _v; + from_variant(v, _v); + sv = eosio::chain::shared_vector(_v.begin(), _v.end(), sv.get_allocator()); + } +} + +namespace chainbase { + // overloads for OID packing + template + DataStream& operator << ( DataStream& ds, const oid& oid ) { + fc::raw::pack(ds, oid._id); + return ds; + } + + template + DataStream& operator >> ( DataStream& ds, oid& oid ) { + fc::raw::unpack(ds, oid._id); + return ds; + } +} + +// overloads for softfloat packing +template +DataStream& operator << ( DataStream& ds, const float64_t& v ) { + fc::raw::pack(ds, *reinterpret_cast(&v)); + return ds; +} + +template +DataStream& operator >> ( DataStream& ds, float64_t& v ) { + fc::raw::unpack(ds, *reinterpret_cast(&v)); + return ds; +} + +template +DataStream& operator << ( DataStream& ds, const float128_t& v ) { + fc::raw::pack(ds, *reinterpret_cast(&v)); + return ds; +} + +template +DataStream& operator >> ( DataStream& ds, float128_t& v ) { + fc::raw::unpack(ds, *reinterpret_cast(&v)); + return ds; +} + diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 91467e746f6..2dc5e114d03 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -230,7 +230,7 @@ namespace eosio { namespace chain { 3060004, "Contract Query Exception" ) FC_DECLARE_DERIVED_EXCEPTION( guard_exception, database_exception, - 3060100, "Database exception" ) + 3060100, "Guard Exception" ) FC_DECLARE_DERIVED_EXCEPTION( database_guard_exception, guard_exception, 3060101, "Database usage is at unsafe levels" ) @@ -456,6 +456,10 @@ namespace eosio { namespace chain { 3170004, "Producer schedule exception" ) FC_DECLARE_DERIVED_EXCEPTION( producer_not_in_schedule, producer_exception, 3170006, "The producer is not part of current schedule" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_directory_not_found_exception, producer_exception, + 3170007, "The configured snapshot directory does not exist" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_exists_exception, producer_exception, + 3170008, "The requested snapshot already exists" ) FC_DECLARE_DERIVED_EXCEPTION( reversible_blocks_exception, chain_exception, 3180000, "Reversible Blocks exception" ) @@ -510,4 +514,9 @@ namespace eosio { namespace chain { 3230002, "Database API Exception" ) FC_DECLARE_DERIVED_EXCEPTION( arithmetic_exception, contract_api_exception, 3230003, "Arithmetic Exception" ) + + FC_DECLARE_DERIVED_EXCEPTION( snapshot_exception, chain_exception, + 3240000, "Snapshot exception" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_validation_exception, snapshot_exception, + 3240001, "Snapshot Validation Exception" ) } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp index 64c16de4dc6..6d3e74dd558 100644 --- a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp @@ -3,7 +3,7 @@ * @copyright defined in eos/LICENSE.txt */ #pragma once -#include +#include #include #include @@ -34,7 +34,7 @@ namespace eosio { namespace chain { time_point delay_until; /// this generated transaction will not be applied until the specified time time_point expiration; /// this generated transaction will not be applied after this time time_point published; - shared_string packed_trx; + shared_blob packed_trx; uint32_t set( const transaction& trx ) { auto trxsize = fc::raw::pack_size( trx ); @@ -115,3 +115,5 @@ namespace eosio { namespace chain { } } // eosio::chain CHAINBASE_SET_INDEX_TYPE(eosio::chain::generated_transaction_object, eosio::chain::generated_transaction_multi_index) + +FC_REFLECT(eosio::chain::generated_transaction_object, (trx_id)(sender)(sender_id)(payer)(delay_until)(expiration)(published)(packed_trx)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/name.hpp b/libraries/chain/include/eosio/chain/name.hpp index af5af89ec23..81c13145dde 100644 --- a/libraries/chain/include/eosio/chain/name.hpp +++ b/libraries/chain/include/eosio/chain/name.hpp @@ -91,12 +91,6 @@ namespace eosio { namespace chain { operator unsigned __int128()const { return value; } }; - - inline std::vector sort_names( std::vector&& names ) { - fc::deduplicate(names); - return names; - } - } } // eosio::chain namespace std { diff --git a/libraries/chain/include/eosio/chain/permission_link_object.hpp b/libraries/chain/include/eosio/chain/permission_link_object.hpp index 627c1c1203b..9930b647ad8 100644 --- a/libraries/chain/include/eosio/chain/permission_link_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_link_object.hpp @@ -79,4 +79,4 @@ namespace eosio { namespace chain { CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_link_object, eosio::chain::permission_link_index) -FC_REFLECT(eosio::chain::permission_link_object, (id)(account)(code)(message_type)(required_permission)) +FC_REFLECT(eosio::chain::permission_link_object, (account)(code)(message_type)(required_permission)) diff --git a/libraries/chain/include/eosio/chain/permission_object.hpp b/libraries/chain/include/eosio/chain/permission_object.hpp index 16185390475..ee43f0e52a7 100644 --- a/libraries/chain/include/eosio/chain/permission_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_object.hpp @@ -4,6 +4,7 @@ */ #pragma once #include +#include #include "multi_index_includes.hpp" @@ -70,6 +71,19 @@ namespace eosio { namespace chain { } }; + /** + * special cased to abstract the foreign keys for usage and the optimization of using OID for the parent + */ + struct snapshot_permission_object { + permission_name parent; ///< parent permission + account_name owner; ///< the account this permission belongs to + permission_name name; ///< human-readable name for the permission + time_point last_updated; ///< the last time this authority was updated + time_point last_used; ///< when this permission was last used + authority auth; ///< authority required to execute this permission + }; + + struct by_parent; struct by_owner; struct by_name; @@ -110,8 +124,7 @@ namespace eosio { namespace chain { CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_object, eosio::chain::permission_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_usage_object, eosio::chain::permission_usage_index) -FC_REFLECT(chainbase::oid, (_id)) -FC_REFLECT(eosio::chain::permission_object, (id)(usage_id)(parent)(owner)(name)(last_updated)(auth)) +FC_REFLECT(eosio::chain::permission_object, (usage_id)(parent)(owner)(name)(last_updated)(auth)) +FC_REFLECT(eosio::chain::snapshot_permission_object, (parent)(owner)(name)(last_updated)(last_used)(auth)) -FC_REFLECT(chainbase::oid, (_id)) -FC_REFLECT(eosio::chain::permission_usage_object, (id)(last_used)) +FC_REFLECT(eosio::chain::permission_usage_object, (last_used)) diff --git a/libraries/chain/include/eosio/chain/producer_schedule.hpp b/libraries/chain/include/eosio/chain/producer_schedule.hpp index 6ebf064bf20..09528d7b809 100644 --- a/libraries/chain/include/eosio/chain/producer_schedule.hpp +++ b/libraries/chain/include/eosio/chain/producer_schedule.hpp @@ -84,3 +84,4 @@ namespace eosio { namespace chain { FC_REFLECT( eosio::chain::producer_key, (producer_name)(block_signing_key) ) FC_REFLECT( eosio::chain::producer_schedule_type, (version)(producers) ) +FC_REFLECT( eosio::chain::shared_producer_schedule_type, (version)(producers) ) diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index 121c4608713..b7fb7b24c29 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -1,6 +1,7 @@ #pragma once #include #include +#include #include #include @@ -43,6 +44,10 @@ namespace eosio { namespace chain { namespace resource_limits { void add_indices(); void initialize_database(); + void calculate_integrity_hash( fc::sha256::encoder& enc ) const; + void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const; + void read_from_snapshot( const snapshot_reader_ptr& snapshot ); + void initialize_account( const account_name& account ); void set_block_parameters( const elastic_limit_parameters& cpu_limit_parameters, const elastic_limit_parameters& net_limit_parameters ); @@ -80,3 +85,5 @@ namespace eosio { namespace chain { namespace resource_limits { } } } /// eosio::chain FC_REFLECT( eosio::chain::resource_limits::account_resource_limit, (used)(available)(max) ) +FC_REFLECT( eosio::chain::resource_limits::ratio, (numerator)(denominator)) +FC_REFLECT( eosio::chain::resource_limits::elastic_limit_parameters, (target)(max)(periods)(max_multiplier)(contract_rate)(expand_rate)) diff --git a/libraries/chain/include/eosio/chain/resource_limits_private.hpp b/libraries/chain/include/eosio/chain/resource_limits_private.hpp index 309387114e8..687a56a4d90 100644 --- a/libraries/chain/include/eosio/chain/resource_limits_private.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits_private.hpp @@ -264,3 +264,10 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_object, CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_usage_object, eosio::chain::resource_limits::resource_usage_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_config_object, eosio::chain::resource_limits::resource_limits_config_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_state_object, eosio::chain::resource_limits::resource_limits_state_index) + +FC_REFLECT(eosio::chain::resource_limits::usage_accumulator, (last_ordinal)(value_ex)(consumed)) + +FC_REFLECT(eosio::chain::resource_limits::resource_limits_object, (owner)(net_weight)(cpu_weight)(ram_bytes)) +FC_REFLECT(eosio::chain::resource_limits::resource_usage_object, (owner)(net_usage)(cpu_usage)(ram_usage)) +FC_REFLECT(eosio::chain::resource_limits::resource_limits_config_object, (cpu_limit_parameters)(net_limit_parameters)(account_cpu_usage_average_window)(account_net_usage_average_window)) +FC_REFLECT(eosio::chain::resource_limits::resource_limits_state_object, (average_block_net_usage)(average_block_cpu_usage)(pending_net_usage)(pending_cpu_usage)(total_net_weight)(total_cpu_weight)(total_ram_bytes)(virtual_net_limit)(virtual_cpu_limit)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp new file mode 100644 index 00000000000..b6c7a81bf0a --- /dev/null +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -0,0 +1,359 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once + +#include +#include +#include +#include +#include + +namespace eosio { namespace chain { + /** + * History: + * Version 1: initial version with string identified sections and rows + */ + static const uint32_t current_snapshot_version = 1; + + namespace detail { + template + struct snapshot_section_traits { + static std::string section_name() { + return boost::core::demangle(typeid(T).name()); + } + }; + + template + struct snapshot_row_traits { + using value_type = std::decay_t; + using snapshot_type = value_type; + + static const snapshot_type& to_snapshot_row( const value_type& value, const chainbase::database& ) { + return value; + }; + }; + + /** + * Due to a pattern in our code of overloading `operator << ( std::ostream&, ... )` to provide + * human-readable string forms of data, we cannot directly use ostream as those operators will + * be used instead of the expected operators. In otherwords: + * fc::raw::pack(fc::datastream...) + * will end up calling _very_ different operators than + * fc::raw::pack(std::ostream...) + */ + struct ostream_wrapper { + explicit ostream_wrapper(std::ostream& s) + :inner(s) { + + } + + ostream_wrapper(ostream_wrapper &&) = default; + ostream_wrapper(const ostream_wrapper& ) = default; + + auto& write( const char* d, size_t s ) { + return inner.write(d, s); + } + + auto& put(char c) { + return inner.put(c); + } + + auto tellp() const { + return inner.tellp(); + } + + auto& seekp(std::ostream::pos_type p) { + return inner.seekp(p); + } + + std::ostream& inner; + }; + + + struct abstract_snapshot_row_writer { + virtual void write(ostream_wrapper& out) const = 0; + virtual variant to_variant() const = 0; + virtual std::string row_type_name() const = 0; + }; + + template + struct snapshot_row_writer : abstract_snapshot_row_writer { + explicit snapshot_row_writer( const T& data ) + :data(data) {} + + void write(ostream_wrapper& out) const override { + fc::raw::pack(out, data); + } + + fc::variant to_variant() const override { + variant var; + fc::to_variant(data, var); + return var; + } + + std::string row_type_name() const override { + return boost::core::demangle( typeid( T ).name() ); + } + + const T& data; + }; + + template + snapshot_row_writer make_row_writer( const T& data) { + return snapshot_row_writer(data); + } + } + + class snapshot_writer { + public: + class section_writer { + public: + template + void add_row( const T& row, const chainbase::database& db ) { + _writer.write_row(detail::make_row_writer(detail::snapshot_row_traits::to_snapshot_row(row, db))); + } + + private: + friend class snapshot_writer; + section_writer(snapshot_writer& writer) + :_writer(writer) + { + + } + snapshot_writer& _writer; + }; + + template + void write_section(const std::string section_name, F f) { + write_start_section(section_name); + auto section = section_writer(*this); + f(section); + write_end_section(); + } + + template + void write_section(F f) { + write_section(detail::snapshot_section_traits::section_name(), f); + } + + virtual ~snapshot_writer(){}; + + protected: + virtual void write_start_section( const std::string& section_name ) = 0; + virtual void write_row( const detail::abstract_snapshot_row_writer& row_writer ) = 0; + virtual void write_end_section() = 0; + }; + + using snapshot_writer_ptr = std::shared_ptr; + + namespace detail { + struct abstract_snapshot_row_reader { + virtual void provide(std::istream& in) const = 0; + virtual void provide(const fc::variant&) const = 0; + virtual std::string row_type_name() const = 0; + }; + + template + struct is_chainbase_object { + static constexpr bool value = false; + }; + + template + struct is_chainbase_object> { + static constexpr bool value = true; + }; + + template + constexpr bool is_chainbase_object_v = is_chainbase_object::value; + + struct row_validation_helper { + template + static auto apply(const T& data, F f) -> std::enable_if_t> { + auto orig = data.id; + f(); + EOS_ASSERT(orig == data.id, snapshot_exception, + "Snapshot for ${type} mutates row member \"id\" which is illegal", + ("type",boost::core::demangle( typeid( T ).name() ))); + } + + template + static auto apply(const T&, F f) -> std::enable_if_t> { + f(); + } + }; + + template + struct snapshot_row_reader : abstract_snapshot_row_reader { + explicit snapshot_row_reader( T& data ) + :data(data) {} + + + void provide(std::istream& in) const override { + row_validation_helper::apply(data, [&in,this](){ + fc::raw::unpack(in, data); + }); + } + + void provide(const fc::variant& var) const override { + row_validation_helper::apply(data, [&var,this]() { + fc::from_variant(var, data); + }); + } + + std::string row_type_name() const override { + return boost::core::demangle( typeid( T ).name() ); + } + + T& data; + }; + + template + snapshot_row_reader make_row_reader( T& data ) { + return snapshot_row_reader(data); + } + } + + class snapshot_reader { + public: + class section_reader { + public: + template + auto read_row( T& out ) -> std::enable_if_t, typename detail::snapshot_row_traits::snapshot_type>::value,bool> { + auto reader = detail::make_row_reader(out); + return _reader.read_row(reader); + } + + template + auto read_row( T& out, chainbase::database& ) -> std::enable_if_t, typename detail::snapshot_row_traits::snapshot_type>::value,bool> { + return read_row(out); + } + + template + auto read_row( T& out, chainbase::database& db ) -> std::enable_if_t, typename detail::snapshot_row_traits::snapshot_type>::value,bool> { + auto temp = typename detail::snapshot_row_traits::snapshot_type(); + auto reader = detail::make_row_reader(temp); + bool result = _reader.read_row(reader); + detail::snapshot_row_traits::from_snapshot_row(std::move(temp), out, db); + return result; + } + + bool empty() { + return _reader.empty(); + } + + private: + friend class snapshot_reader; + section_reader(snapshot_reader& _reader) + :_reader(_reader) + {} + + snapshot_reader& _reader; + + }; + + template + void read_section(const std::string& section_name, F f) { + set_section(section_name); + auto section = section_reader(*this); + f(section); + clear_section(); + } + + template + void read_section(F f) { + read_section(detail::snapshot_section_traits::section_name(), f); + } + + template + bool has_section(const std::string& suffix = std::string()) { + return has_section(suffix + detail::snapshot_section_traits::section_name()); + } + + virtual void validate() const = 0; + + virtual ~snapshot_reader(){}; + + protected: + virtual bool has_section( const std::string& section_name ) = 0; + virtual void set_section( const std::string& section_name ) = 0; + virtual bool read_row( detail::abstract_snapshot_row_reader& row_reader ) = 0; + virtual bool empty( ) = 0; + virtual void clear_section() = 0; + }; + + using snapshot_reader_ptr = std::shared_ptr; + + class variant_snapshot_writer : public snapshot_writer { + public: + variant_snapshot_writer(fc::mutable_variant_object& snapshot); + + void write_start_section( const std::string& section_name ) override; + void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override; + void write_end_section( ) override; + void finalize(); + + private: + fc::mutable_variant_object& snapshot; + std::string current_section_name; + fc::variants current_rows; + }; + + class variant_snapshot_reader : public snapshot_reader { + public: + explicit variant_snapshot_reader(const fc::variant& snapshot); + + void validate() const override; + bool has_section( const string& section_name ) override; + void set_section( const string& section_name ) override; + bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override; + bool empty ( ) override; + void clear_section() override; + + private: + const fc::variant& snapshot; + const fc::variant_object* cur_section; + uint64_t cur_row; + }; + + class ostream_snapshot_writer : public snapshot_writer { + public: + explicit ostream_snapshot_writer(std::ostream& snapshot); + + void write_start_section( const std::string& section_name ) override; + void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override; + void write_end_section( ) override; + void finalize(); + + static const uint32_t magic_number = 0x30510550; + + private: + detail::ostream_wrapper snapshot; + std::streampos header_pos; + std::streampos section_pos; + uint64_t row_count; + + }; + + class istream_snapshot_reader : public snapshot_reader { + public: + explicit istream_snapshot_reader(std::istream& snapshot); + + void validate() const override; + bool has_section( const string& section_name ) override; + void set_section( const string& section_name ) override; + bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override; + bool empty ( ) override; + void clear_section() override; + + private: + bool validate_section() const; + + std::istream& snapshot; + std::streampos header_pos; + uint64_t num_rows; + uint64_t cur_row; + }; + +}} diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index ad02baf5bac..03750bd512e 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -28,15 +28,14 @@ namespace eosio { namespace chain { action act; bool context_free = false; fc::microseconds elapsed; - uint64_t cpu_usage = 0; string console; - uint64_t total_cpu_usage = 0; /// total of inline_traces[x].cpu_usage + cpu_usage transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; fc::optional producer_block_id; flat_set account_ram_deltas; + fc::optional except; }; struct action_trace : public base_action_trace { @@ -70,8 +69,8 @@ FC_REFLECT( eosio::chain::account_delta, (account)(delta) ) FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(context_free)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) - (block_num)(block_time)(producer_block_id)(account_ram_deltas) ) + (receipt)(act)(context_free)(elapsed)(console)(trx_id) + (block_num)(block_time)(producer_block_id)(account_ram_deltas)(except) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 3175994dedd..b69a00143e9 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -1,9 +1,23 @@ #pragma once #include #include +#include namespace eosio { namespace chain { + struct deadline_timer { + deadline_timer(); + ~deadline_timer(); + + void start(fc::time_point tp); + void stop(); + + static volatile sig_atomic_t expired; + private: + static void timer_expired(int); + static bool initialized; + }; + class transaction_context { private: void init( uint64_t initial_net_usage); @@ -108,6 +122,8 @@ namespace eosio { namespace chain { fc::time_point pseudo_start; fc::microseconds billed_time; fc::microseconds billing_timer_duration_limit; + + deadline_timer _deadline_timer; }; } } diff --git a/libraries/chain/include/eosio/chain/transaction_object.hpp b/libraries/chain/include/eosio/chain/transaction_object.hpp index 2b59f6ab813..0d049267e8e 100644 --- a/libraries/chain/include/eosio/chain/transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/transaction_object.hpp @@ -51,3 +51,4 @@ namespace eosio { namespace chain { CHAINBASE_SET_INDEX_TYPE(eosio::chain::transaction_object, eosio::chain::transaction_multi_index) +FC_REFLECT(eosio::chain::transaction_object, (expiration)(trx_id)) diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 2a9117a99fc..4610f24c891 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -96,6 +96,25 @@ namespace eosio { namespace chain { template using shared_set = boost::interprocess::set, allocator>; + /** + * For bugs in boost interprocess we moved our blob data to shared_string + * this wrapper allows us to continue that while also having a type-level distinction for + * serialization and to/from variant + */ + class shared_blob : public shared_string { + public: + shared_blob() = default; + + template + shared_blob(InputIterator f, InputIterator l, const allocator_type& a) + :shared_string(f,l,a) + {} + + shared_blob(const allocator_type& a) + :shared_string(a) + {} + }; + using action_name = name; using scope_name = name; using account_name = name; diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 17ac03fddfe..7e6991996af 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -53,7 +53,6 @@ namespace eosio { namespace chain { public: enum class vm_type { wavm, - binaryen, wabt }; @@ -66,6 +65,9 @@ namespace eosio { namespace chain { //Calls apply or error on a given code void apply(const digest_type& code_id, const shared_string& code, apply_context& context); + //Immediately exits currently running wasm. UB is called when no wasm running + void exit(); + private: unique_ptr my; friend class eosio::chain::webassembly::common::intrinsics_accessor; @@ -77,4 +79,4 @@ namespace eosio{ namespace chain { std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime); }} -FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(binaryen)(wabt) ) +FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(wabt) ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index df28d79a21b..c3af34d79ea 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -27,8 +26,6 @@ namespace eosio { namespace chain { wasm_interface_impl(wasm_interface::vm_type vm) { if(vm == wasm_interface::vm_type::wavm) runtime_interface = std::make_unique(); - else if(vm == wasm_interface::vm_type::binaryen) - runtime_interface = std::make_unique(); else if(vm == wasm_interface::vm_type::wabt) runtime_interface = std::make_unique(); else @@ -98,7 +95,6 @@ namespace eosio { namespace chain { #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_WAVM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - _REGISTER_BINARYEN_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #define _REGISTER_INTRINSIC4(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ diff --git a/libraries/chain/include/eosio/chain/webassembly/binaryen.hpp b/libraries/chain/include/eosio/chain/webassembly/binaryen.hpp deleted file mode 100644 index 51e908edbbd..00000000000 --- a/libraries/chain/include/eosio/chain/webassembly/binaryen.hpp +++ /dev/null @@ -1,701 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - - -namespace eosio { namespace chain { namespace webassembly { namespace binaryen { - -using namespace fc; -using namespace wasm; -using namespace eosio::chain::webassembly::common; - - -using linear_memory_type = fc::array; -using call_indirect_table_type = vector; - -struct interpreter_interface; - -struct intrinsic_registrator { - using intrinsic_fn = Literal(*)(interpreter_interface*, LiteralList&); - - static auto& get_map(){ - static map _map; - return _map; - }; - - intrinsic_registrator(const char* name, intrinsic_fn fn) - { - get_map()[string(name)] = fn; - } -}; - -using import_lut_type = unordered_map; - - -struct interpreter_interface : ModuleInstance::ExternalInterface { - interpreter_interface(linear_memory_type& memory, call_indirect_table_type& table, import_lut_type& import_lut, const unsigned& initial_memory_size, apply_context& context) - :memory(memory),table(table),import_lut(import_lut), current_memory_size(initial_memory_size), context(context) - {} - - void importGlobals(std::map& globals, Module& wasm) override - { - - } - - void init(Module& wasm, ModuleInstance& instance) override { - - } - - Literal callImport(Import *import, LiteralList &args) override - { - auto fn_iter = import_lut.find((uintptr_t)import); - EOS_ASSERT(fn_iter != import_lut.end(), wasm_execution_error, "unknown import ${m}:${n}", ("m", import->module.c_str())("n", import->module.c_str())); - return fn_iter->second(this, args); - } - - Literal callTable(Index index, LiteralList& arguments, WasmType result, ModuleInstance& instance) override - { - EOS_ASSERT(index < table.size(), wasm_execution_error, "callIndirect: bad pointer"); - auto* func = instance.wasm.getFunctionOrNull(table[index]); - EOS_ASSERT(func, wasm_execution_error, "callIndirect: uninitialized element"); - EOS_ASSERT(func->params.size() == arguments.size(), wasm_execution_error, "callIndirect: bad # of arguments"); - - for (size_t i = 0; i < func->params.size(); i++) { - EOS_ASSERT(func->params[i] == arguments[i].type, wasm_execution_error, "callIndirect: bad argument type"); - } - EOS_ASSERT(func->result == result, wasm_execution_error, "callIndirect: bad result type"); - return instance.callFunctionInternal(func->name, arguments); - } - - void trap(const char* why) override { - FC_THROW_EXCEPTION(wasm_execution_error, why); - } - - void assert_memory_is_accessible(uint32_t offset, uint32_t size) { - EOS_ASSERT(offset + size <= current_memory_size && offset + size >= offset, - wasm_execution_error, "access violation"); - } - - char* get_validated_pointer(uint32_t offset, uint32_t size) { - assert_memory_is_accessible(offset, size); - return memory.data + offset; - } - - template - static bool aligned_for(const void* address) { - return 0 == (reinterpret_cast(address) & (std::alignment_of::value - 1)); - } - - template - T load_memory(uint32_t offset) { - char *base = get_validated_pointer(offset, sizeof(T)); - if (aligned_for(base)) { - return *reinterpret_cast(base); - } else { - T temp; - memcpy(&temp, base, sizeof(T)); - return temp; - } - } - - template - void store_memory(uint32_t offset, T value) { - char *base = get_validated_pointer(offset, sizeof(T)); - if (aligned_for(base)) { - *reinterpret_cast(base) = value; - } else { - memcpy(base, &value, sizeof(T)); - } - } - - void growMemory(Address old_size, Address new_size) override { - memset(memory.data + old_size.addr, 0, new_size.addr - old_size.addr); - current_memory_size += new_size.addr - old_size.addr; - } - - int8_t load8s(Address addr) override { return load_memory(addr); } - uint8_t load8u(Address addr) override { return load_memory(addr); } - int16_t load16s(Address addr) override { return load_memory(addr); } - uint16_t load16u(Address addr) override { return load_memory(addr); } - int32_t load32s(Address addr) override { return load_memory(addr); } - uint32_t load32u(Address addr) override { return load_memory(addr); } - int64_t load64s(Address addr) override { return load_memory(addr); } - uint64_t load64u(Address addr) override { return load_memory(addr); } - - void store8(Address addr, int8_t value) override { store_memory(addr, value); } - void store16(Address addr, int16_t value) override { store_memory(addr, value); } - void store32(Address addr, int32_t value) override { store_memory(addr, value); } - void store64(Address addr, int64_t value) override { store_memory(addr, value); } - - linear_memory_type& memory; - call_indirect_table_type& table; - import_lut_type& import_lut; - unsigned current_memory_size; - apply_context& context; -}; - -class binaryen_runtime : public eosio::chain::wasm_runtime_interface { - public: - binaryen_runtime(); - std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; - - private: - linear_memory_type _memory __attribute__ ((aligned (4096))); -}; - -/** - * class to represent an in-wasm-memory array - * it is a hint to the transcriber that the next parameter will - * be a size (data bytes length) and that the pair are validated together - * This triggers the template specialization of intrinsic_invoker_impl - * @tparam T - */ -template -inline array_ptr array_ptr_impl (interpreter_interface* interface, uint32_t ptr, uint32_t length) -{ - EOS_ASSERT( length < INT_MAX/(uint32_t)sizeof(T), binaryen_exception, "length will overflow" ); - return array_ptr((T*)(interface->get_validated_pointer(ptr, length * (uint32_t)sizeof(T)))); -} - -/** - * class to represent an in-wasm-memory char array that must be null terminated - */ -inline null_terminated_ptr null_terminated_ptr_impl(interpreter_interface* interface, uint32_t ptr) -{ - char *value = interface->get_validated_pointer(ptr, 1); - const char* p = value; - const char* const top_of_memory = interface->memory.data + interface->current_memory_size; - while(p < top_of_memory) - if(*p++ == '\0') - return null_terminated_ptr(value); - - FC_THROW_EXCEPTION(wasm_execution_error, "unterminated string"); -} - - -template -struct is_reference_from_value { - static constexpr bool value = false; -}; - -template<> -struct is_reference_from_value { - static constexpr bool value = true; -}; - -template<> -struct is_reference_from_value { - static constexpr bool value = true; -}; - -template -constexpr bool is_reference_from_value_v = is_reference_from_value::value; - -template -T convert_literal_to_native(Literal& v); - -template<> -inline double convert_literal_to_native(Literal& v) { - return v.getf64(); -} - -template<> -inline float convert_literal_to_native(Literal& v) { - return v.getf32(); -} - -template<> -inline int64_t convert_literal_to_native(Literal& v) { - return v.geti64(); -} - -template<> -inline uint64_t convert_literal_to_native(Literal& v) { - return v.geti64(); -} - -template<> -inline int32_t convert_literal_to_native(Literal& v) { - return v.geti32(); -} - -template<> -inline uint32_t convert_literal_to_native(Literal& v) { - return v.geti32(); -} - -template<> -inline bool convert_literal_to_native(Literal& v) { - return v.geti32(); -} - -template<> -inline name convert_literal_to_native(Literal& v) { - int64_t val = v.geti64(); - return name(val); -} - -template -inline auto convert_native_to_literal(const interpreter_interface*, T val) { - return Literal(val); -} - -inline auto convert_native_to_literal(const interpreter_interface*, const name &val) { - return Literal(val.value); -} - -inline auto convert_native_to_literal(const interpreter_interface*, const fc::time_point_sec &val) { - return Literal(val.sec_since_epoch()); -} - -inline auto convert_native_to_literal(const interpreter_interface* interface, char* ptr) { - const char* base = interface->memory.data; - const char* top_of_memory = base + interface->current_memory_size; - EOS_ASSERT(ptr >= base && ptr < top_of_memory, wasm_execution_error, "returning pointer not in linear memory"); - return Literal((int)(ptr - base)); -} - -struct void_type { -}; - -/** - * Forward declaration of the invoker type which transcribes arguments to/from a native method - * and injects the appropriate checks - * - * @tparam Ret - the return type of the native function - * @tparam NativeParameters - a std::tuple of the remaining native parameters to transcribe - * @tparam WasmParameters - a std::tuple of the transribed parameters - */ -template -struct intrinsic_invoker_impl; - -/** - * Specialization for the fully transcribed signature - * @tparam Ret - the return type of the native function - */ -template -struct intrinsic_invoker_impl> { - using next_method_type = Ret (*)(interpreter_interface*, LiteralList&, int); - - template - static Literal invoke(interpreter_interface* interface, LiteralList& args) { - return convert_native_to_literal(interface, Method(interface, args, args.size() - 1)); - } - - template - static const auto fn() { - return invoke; - } -}; - -/** - * specialization of the fully transcribed signature for void return values - * @tparam Translated - the arguments to the wasm function - */ -template<> -struct intrinsic_invoker_impl> { - using next_method_type = void_type (*)(interpreter_interface*, LiteralList&, int); - - template - static Literal invoke(interpreter_interface* interface, LiteralList& args) { - Method(interface, args, args.size() - 1); - return Literal(); - } - - template - static const auto fn() { - return invoke; - } -}; - -/** - * Sepcialization for transcribing a simple type in the native method signature - * @tparam Ret - the return type of the native method - * @tparam Input - the type of the native parameter to transcribe - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, Input, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - auto& last = args.at(offset); - auto native = convert_literal_to_native(last); - return Then(interface, native, rest..., args, (uint32_t)offset - 1); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a array_ptr type in the native method signature - * This type transcribes into 2 wasm parameters: a pointer and byte length and checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl, size_t, Inputs...>> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(interpreter_interface*, array_ptr, size_t, Inputs..., LiteralList&, int); - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); - uint32_t ptr = args.at((uint32_t)offset - 1).geti32(); - size_t length = args.at((uint32_t)offset).geti32(); - T* base = array_ptr_impl(interface, ptr, length); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of const values" ); - std::vector > copy(length > 0 ? length : 1); - T* copy_ptr = ©[0]; - memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); - return Then(interface, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); - } - return Then(interface, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); - }; - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); - uint32_t ptr = args.at((uint32_t)offset - 1).geti32(); - size_t length = args.at((uint32_t)offset).geti32(); - T* base = array_ptr_impl(interface, ptr, length); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of values" ); - std::vector > copy(length > 0 ? length : 1); - T* copy_ptr = ©[0]; - memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); - Ret ret = Then(interface, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); - memcpy( (void*)base, (void*)copy_ptr, length * sizeof(T) ); - return ret; - } - return Then(interface, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a null_terminated_ptr type in the native method signature - * This type transcribes 1 wasm parameters: a char pointer which is validated to contain - * a null value before the end of the allocated memory. - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(interpreter_interface*, null_terminated_ptr, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - uint32_t ptr = args.at((uint32_t)offset).geti32(); - return Then(interface, null_terminated_ptr_impl(interface, ptr), rest..., args, (uint32_t)offset - 1); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a pair of array_ptr types in the native method signature that share size - * This type transcribes into 3 wasm parameters: 2 pointers and byte length and checks the validity of those memory - * ranges before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl, array_ptr, size_t, Inputs...>> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(interpreter_interface*, array_ptr, array_ptr, size_t, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - uint32_t ptr_t = args.at((uint32_t)offset - 2).geti32(); - uint32_t ptr_u = args.at((uint32_t)offset - 1).geti32(); - size_t length = args.at((uint32_t)offset).geti32(); - static_assert(std::is_same, char>::value && std::is_same, char>::value, "Currently only support array of (const)chars"); - return Then(interface, array_ptr_impl(interface, ptr_t, length), array_ptr_impl(interface, ptr_u, length), length, args, (uint32_t)offset - 3); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing memset parameters - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl, int, size_t>> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(interpreter_interface*, array_ptr, int, size_t, LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, LiteralList& args, int offset) { - uint32_t ptr = args.at((uint32_t)offset - 2).geti32(); - uint32_t value = args.at((uint32_t)offset - 1).geti32(); - size_t length = args.at((uint32_t)offset).geti32(); - return Then(interface, array_ptr_impl(interface, ptr, length), value, length, args, (uint32_t)offset - 3); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a pointer type in the native method signature - * This type transcribes into an int32 pointer checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, T *, Inputs..., LiteralList&, int); - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - uint32_t ptr = args.at((uint32_t)offset).geti32(); - T* base = array_ptr_impl(interface, ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const pointer" ); - std::remove_const_t copy; - T* copy_ptr = © - memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); - return Then(interface, copy_ptr, rest..., args, (uint32_t)offset - 1); - } - return Then(interface, base, rest..., args, (uint32_t)offset - 1); - }; - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - uint32_t ptr = args.at((uint32_t)offset).geti32(); - T* base = array_ptr_impl(interface, ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned pointer" ); - T copy; - memcpy( (void*)©, (void*)base, sizeof(T) ); - Ret ret = Then(interface, ©, rest..., args, (uint32_t)offset - 1); - memcpy( (void*)base, (void*)©, sizeof(T) ); - return ret; - } - return Then(interface, base, rest..., args, (uint32_t)offset - 1); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a reference to a name which can be passed as a native value - * This type transcribes into a native type which is loaded by value into a - * variable on the stack and then passed by reference to the intrinsic. - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, const name&, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - uint64_t wasm_value = args.at((uint32_t)offset).geti64(); - auto value = name(wasm_value); - return Then(interface, value, rest..., args, (uint32_t)offset - 1); - } - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a reference to a fc::time_point_sec which can be passed as a native value - * This type transcribes into a native type which is loaded by value into a - * variable on the stack and then passed by reference to the intrinsic. - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, const fc::time_point_sec&, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - uint32_t wasm_value = args.at((uint32_t)offset).geti32(); - auto value = fc::time_point_sec(wasm_value); - return Then(interface, value, rest..., args, (uint32_t)offset - 1); - } - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - - -/** - * Specialization for transcribing a reference type in the native method signature - * This type transcribes into an int32 pointer checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, T &, Inputs..., LiteralList&, int); - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - // references cannot be created for null pointers - uint32_t ptr = args.at((uint32_t)offset).geti32(); - EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); - T* base = array_ptr_impl(interface, ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const reference" ); - std::remove_const_t copy; - T* copy_ptr = © - memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); - return Then(interface, *copy_ptr, rest..., args, (uint32_t)offset - 1); - } - return Then(interface, *base, rest..., args, (uint32_t)offset - 1); - } - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - // references cannot be created for null pointers - uint32_t ptr = args.at((uint32_t)offset).geti32(); - EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); - T* base = array_ptr_impl(interface, ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned reference" ); - T copy; - memcpy( (void*)©, (void*)base, sizeof(T) ); - Ret ret = Then(interface, copy, rest..., args, (uint32_t)offset - 1); - memcpy( (void*)base, (void*)©, sizeof(T) ); - return ret; - } - return Then(interface, *base, rest..., args, (uint32_t)offset - 1); - } - - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * forward declaration of a wrapper class to call methods of the class - */ -template -struct intrinsic_function_invoker { - using impl = intrinsic_invoker_impl>; - - template - static Ret wrapper(interpreter_interface* interface, Params... params, LiteralList&, int) { - class_from_wasm::value(interface->context).checktime(); - return (class_from_wasm::value(interface->context).*Method)(params...); - } - - template - static const intrinsic_registrator::intrinsic_fn fn() { - return impl::template fn>(); - } -}; - -template -struct intrinsic_function_invoker { - using impl = intrinsic_invoker_impl>; - - template - static void_type wrapper(interpreter_interface* interface, Params... params, LiteralList& args, int offset) { - class_from_wasm::value(interface->context).checktime(); - (class_from_wasm::value(interface->context).*Method)(params...); - return void_type(); - } - - template - static const intrinsic_registrator::intrinsic_fn fn() { - return impl::template fn>(); - } - -}; - -template -struct intrinsic_function_invoker_wrapper; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -#define _ADD_PAREN_1(...) ((__VA_ARGS__)) _ADD_PAREN_2 -#define _ADD_PAREN_2(...) ((__VA_ARGS__)) _ADD_PAREN_1 -#define _ADD_PAREN_1_END -#define _ADD_PAREN_2_END -#define _WRAPPED_SEQ(SEQ) BOOST_PP_CAT(_ADD_PAREN_1 SEQ, _END) - -#define __INTRINSIC_NAME(LABEL, SUFFIX) LABEL##SUFFIX -#define _INTRINSIC_NAME(LABEL, SUFFIX) __INTRINSIC_NAME(LABEL,SUFFIX) - -#define _REGISTER_BINARYEN_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - static eosio::chain::webassembly::binaryen::intrinsic_registrator _INTRINSIC_NAME(__binaryen_intrinsic_fn, __COUNTER__) (\ - MOD "." NAME,\ - eosio::chain::webassembly::binaryen::intrinsic_function_invoker_wrapper::type::fn<&CLS::METHOD>()\ - );\ - - -} } } }// eosio::chain::webassembly::wavm diff --git a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp index 8158c727829..2a9b8119b67 100644 --- a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp @@ -17,6 +17,9 @@ class wasm_runtime_interface { public: virtual std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) = 0; + //immediately exit the currently running wasm_instantiated_module_interface. Yep, this assumes only one can possibly run at a time. + virtual void immediately_exit_currently_running_module() = 0; + virtual ~wasm_runtime_interface(); }; diff --git a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp index 5be568d4b01..31456dc1dda 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp @@ -52,6 +52,8 @@ class wabt_runtime : public eosio::chain::wasm_runtime_interface { wabt_runtime(); std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; + void immediately_exit_currently_running_module() override; + private: wabt::ReadBinaryOptions read_binary_options; //note default ctor will look at each option in feature.def and default to DISABLED for the feature }; @@ -371,7 +373,8 @@ struct intrinsic_invoker_impl, size_t, Inputs...>> size_t length = args.at((uint32_t)offset).get_i32(); T* base = array_ptr_impl(vars, ptr, length); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of const values" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned array of const values" ); std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); @@ -387,7 +390,8 @@ struct intrinsic_invoker_impl, size_t, Inputs...>> size_t length = args.at((uint32_t)offset).get_i32(); T* base = array_ptr_impl(vars, ptr, length); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of values" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned array of values" ); std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); @@ -500,7 +504,8 @@ struct intrinsic_invoker_impl> { uint32_t ptr = args.at((uint32_t)offset).get_i32(); T* base = array_ptr_impl(vars, ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const pointer" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned const pointer" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); @@ -514,7 +519,8 @@ struct intrinsic_invoker_impl> { uint32_t ptr = args.at((uint32_t)offset).get_i32(); T* base = array_ptr_impl(vars, ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned pointer" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned pointer" ); T copy; memcpy( (void*)©, (void*)base, sizeof(T) ); Ret ret = Then(vars, ©, rest..., args, (uint32_t)offset - 1); @@ -603,7 +609,8 @@ struct intrinsic_invoker_impl> { EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); T* base = array_ptr_impl(vars, ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const reference" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned const reference" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); @@ -619,7 +626,8 @@ struct intrinsic_invoker_impl> { EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); T* base = array_ptr_impl(vars, ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned reference" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned reference" ); T copy; memcpy( (void*)©, (void*)base, sizeof(T) ); Ret ret = Then(vars, copy, rest..., args, (uint32_t)offset - 1); diff --git a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp index 4674c97e2a8..f619e318b3f 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "Runtime/Runtime.h" #include "IR/Types.h" @@ -21,6 +22,8 @@ class wavm_runtime : public eosio::chain::wasm_runtime_interface { ~wavm_runtime(); std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; + void immediately_exit_currently_running_module() override; + struct runtime_guard { runtime_guard(); ~runtime_guard(); @@ -382,7 +385,8 @@ struct intrinsic_invoker_impl, size_t, Inputs...>, const auto length = size_t(size); T* base = array_ptr_impl(ctx, (U32)ptr, length); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of const values" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned array of const values" ); std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); @@ -397,7 +401,8 @@ struct intrinsic_invoker_impl, size_t, Inputs...>, const auto length = size_t(size); T* base = array_ptr_impl(ctx, (U32)ptr, length); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of values" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned array of values" ); std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); @@ -508,7 +513,8 @@ struct intrinsic_invoker_impl, std::tuple std::enable_if_t::value, Ret> { T* base = array_ptr_impl(ctx, (U32)ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const pointer" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned const pointer" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); @@ -521,7 +527,8 @@ struct intrinsic_invoker_impl, std::tuple std::enable_if_t::value, Ret> { T* base = array_ptr_impl(ctx, (U32)ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned pointer" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned pointer" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); @@ -587,7 +594,8 @@ struct intrinsic_invoker_impl, std::tuple(&base) % alignof(T) != 0 ) { - wlog( "misaligned const reference" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned const reference" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)&base, sizeof(T) ); @@ -605,7 +613,8 @@ struct intrinsic_invoker_impl, std::tuple(&base) % alignof(T) != 0 ) { - wlog( "misaligned reference" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned reference" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)&base, sizeof(T) ); diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 6d3176c7fb1..d090631b9c4 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -4,10 +4,18 @@ #include #include #include +#include #include namespace eosio { namespace chain { namespace resource_limits { +using resource_index_set = index_set< + resource_limits_index, + resource_usage_index, + resource_limits_state_index, + resource_limits_config_index +>; + static_assert( config::rate_limiting_precision > 0, "config::rate_limiting_precision must be positive" ); static uint64_t update_elastic_limit(uint64_t current_limit, uint64_t average_usage, const elastic_limit_parameters& params) { @@ -40,10 +48,7 @@ void resource_limits_state_object::update_virtual_net_limit( const resource_limi } void resource_limits_manager::add_indices() { - _db.add_index(); - _db.add_index(); - _db.add_index(); - _db.add_index(); + resource_index_set::add_indices(_db); } void resource_limits_manager::initialize_database() { @@ -60,6 +65,37 @@ void resource_limits_manager::initialize_database() { }); } +void resource_limits_manager::calculate_integrity_hash( fc::sha256::encoder& enc ) const { + resource_index_set::walk_indices([this, &enc]( auto utils ){ + decltype(utils)::walk(_db, [&enc]( const auto &row ) { + fc::raw::pack(enc, row); + }); + }); +} + +void resource_limits_manager::add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { + resource_index_set::walk_indices([this, &snapshot]( auto utils ){ + snapshot->write_section([this]( auto& section ){ + decltype(utils)::walk(_db, [this, §ion]( const auto &row ) { + section.add_row(row, _db); + }); + }); + }); +} + +void resource_limits_manager::read_from_snapshot( const snapshot_reader_ptr& snapshot ) { + resource_index_set::walk_indices([this, &snapshot]( auto utils ){ + snapshot->read_section([this]( auto& section ) { + bool more = !section.empty(); + while(more) { + decltype(utils)::create(_db, [this, §ion, &more]( auto &row ) { + more = section.read_row(row, _db); + }); + } + }); + }); +} + void resource_limits_manager::initialize_account(const account_name& account) { _db.create([&]( resource_limits_object& bl ) { bl.owner = account; diff --git a/libraries/chain/snapshot.cpp b/libraries/chain/snapshot.cpp new file mode 100644 index 00000000000..fb119b40a3a --- /dev/null +++ b/libraries/chain/snapshot.cpp @@ -0,0 +1,339 @@ +#include +#include +#include + +namespace eosio { namespace chain { + +variant_snapshot_writer::variant_snapshot_writer(fc::mutable_variant_object& snapshot) +: snapshot(snapshot) +{ + snapshot.set("sections", fc::variants()); + snapshot.set("version", current_snapshot_version ); +} + +void variant_snapshot_writer::write_start_section( const std::string& section_name ) { + current_rows.clear(); + current_section_name = section_name; +} + +void variant_snapshot_writer::write_row( const detail::abstract_snapshot_row_writer& row_writer ) { + current_rows.emplace_back(row_writer.to_variant()); +} + +void variant_snapshot_writer::write_end_section( ) { + snapshot["sections"].get_array().emplace_back(fc::mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); +} + +void variant_snapshot_writer::finalize() { + +} + +variant_snapshot_reader::variant_snapshot_reader(const fc::variant& snapshot) +:snapshot(snapshot) +,cur_row(0) +{ +} + +void variant_snapshot_reader::validate() const { + EOS_ASSERT(snapshot.is_object(), snapshot_validation_exception, + "Variant snapshot is not an object"); + const fc::variant_object& o = snapshot.get_object(); + + EOS_ASSERT(o.contains("version"), snapshot_validation_exception, + "Variant snapshot has no version"); + + const auto& version = o["version"]; + EOS_ASSERT(version.is_integer(), snapshot_validation_exception, + "Variant snapshot version is not an integer"); + + EOS_ASSERT(version.as_uint64() == (uint64_t)current_snapshot_version, snapshot_validation_exception, + "Variant snapshot is an unsuppored version. Expected : ${expected}, Got: ${actual}", + ("expected", current_snapshot_version)("actual",o["version"].as_uint64())); + + EOS_ASSERT(o.contains("sections"), snapshot_validation_exception, + "Variant snapshot has no sections"); + + const auto& sections = o["sections"]; + EOS_ASSERT(sections.is_array(), snapshot_validation_exception, "Variant snapshot sections is not an array"); + + const auto& section_array = sections.get_array(); + for( const auto& section: section_array ) { + EOS_ASSERT(section.is_object(), snapshot_validation_exception, "Variant snapshot section is not an object"); + + const auto& so = section.get_object(); + EOS_ASSERT(so.contains("name"), snapshot_validation_exception, + "Variant snapshot section has no name"); + + EOS_ASSERT(so["name"].is_string(), snapshot_validation_exception, + "Variant snapshot section name is not a string"); + + EOS_ASSERT(so.contains("rows"), snapshot_validation_exception, + "Variant snapshot section has no rows"); + + EOS_ASSERT(so["rows"].is_array(), snapshot_validation_exception, + "Variant snapshot section rows is not an array"); + } +} + +bool variant_snapshot_reader::has_section( const string& section_name ) { + const auto& sections = snapshot["sections"].get_array(); + for( const auto& section: sections ) { + if (section["name"].as_string() == section_name) { + return true; + } + } + + return false; +} + +void variant_snapshot_reader::set_section( const string& section_name ) { + const auto& sections = snapshot["sections"].get_array(); + for( const auto& section: sections ) { + if (section["name"].as_string() == section_name) { + cur_section = §ion.get_object(); + return; + } + } + + EOS_THROW(snapshot_exception, "Variant snapshot has no section named ${n}", ("n", section_name)); +} + +bool variant_snapshot_reader::read_row( detail::abstract_snapshot_row_reader& row_reader ) { + const auto& rows = (*cur_section)["rows"].get_array(); + row_reader.provide(rows.at(cur_row++)); + return cur_row < rows.size(); +} + +bool variant_snapshot_reader::empty ( ) { + const auto& rows = (*cur_section)["rows"].get_array(); + return rows.empty(); +} + +void variant_snapshot_reader::clear_section() { + cur_section = nullptr; + cur_row = 0; +} + +ostream_snapshot_writer::ostream_snapshot_writer(std::ostream& snapshot) +:snapshot(snapshot) +,header_pos(snapshot.tellp()) +,section_pos(-1) +,row_count(0) +{ + // write magic number + auto totem = magic_number; + snapshot.write((char*)&totem, sizeof(totem)); + + // write version + auto version = current_snapshot_version; + snapshot.write((char*)&version, sizeof(version)); +} + +void ostream_snapshot_writer::write_start_section( const std::string& section_name ) +{ + EOS_ASSERT(section_pos == std::streampos(-1), snapshot_exception, "Attempting to write a new section without closing the previous section"); + section_pos = snapshot.tellp(); + row_count = 0; + + uint64_t placeholder = std::numeric_limits::max(); + + // write a placeholder for the section size + snapshot.write((char*)&placeholder, sizeof(placeholder)); + + // write placeholder for row count + snapshot.write((char*)&placeholder, sizeof(placeholder)); + + // write the section name (null terminated) + snapshot.write(section_name.data(), section_name.size()); + snapshot.put(0); +} + +void ostream_snapshot_writer::write_row( const detail::abstract_snapshot_row_writer& row_writer ) { + auto restore = snapshot.tellp(); + try { + row_writer.write(snapshot); + } catch (...) { + snapshot.seekp(restore); + throw; + } + row_count++; +} + +void ostream_snapshot_writer::write_end_section( ) { + auto restore = snapshot.tellp(); + + uint64_t section_size = restore - section_pos - sizeof(uint64_t); + + snapshot.seekp(section_pos); + + // write a the section size + snapshot.write((char*)§ion_size, sizeof(section_size)); + + // write the row count + snapshot.write((char*)&row_count, sizeof(row_count)); + + snapshot.seekp(restore); + + section_pos = std::streampos(-1); + row_count = 0; +} + +void ostream_snapshot_writer::finalize() { + uint64_t end_marker = std::numeric_limits::max(); + + // write a placeholder for the section size + snapshot.write((char*)&end_marker, sizeof(end_marker)); +} + +istream_snapshot_reader::istream_snapshot_reader(std::istream& snapshot) +:snapshot(snapshot) +,header_pos(snapshot.tellg()) +,num_rows(0) +,cur_row(0) +{ + +} + +void istream_snapshot_reader::validate() const { + // make sure to restore the read pos + auto restore_pos = fc::make_scoped_exit([this,pos=snapshot.tellg(),ex=snapshot.exceptions()](){ + snapshot.seekg(pos); + snapshot.exceptions(ex); + }); + + snapshot.exceptions(std::istream::failbit|std::istream::eofbit); + + try { + // validate totem + auto expected_totem = ostream_snapshot_writer::magic_number; + decltype(expected_totem) actual_totem; + snapshot.read((char*)&actual_totem, sizeof(actual_totem)); + EOS_ASSERT(actual_totem == expected_totem, snapshot_exception, + "Binary snapshot has unexpected magic number!"); + + // validate version + auto expected_version = current_snapshot_version; + decltype(expected_version) actual_version; + snapshot.read((char*)&actual_version, sizeof(actual_version)); + EOS_ASSERT(actual_version == expected_version, snapshot_exception, + "Binary snapshot is an unsuppored version. Expected : ${expected}, Got: ${actual}", + ("expected", expected_version)("actual", actual_version)); + + while (validate_section()) {} + } catch( const std::exception& e ) { \ + snapshot_exception fce(FC_LOG_MESSAGE( warn, "Binary snapshot validation threw IO exception (${what})",("what",e.what()))); + throw fce; + } +} + +bool istream_snapshot_reader::validate_section() const { + uint64_t section_size = 0; + snapshot.read((char*)§ion_size,sizeof(section_size)); + + // stop when we see the end marker + if (section_size == std::numeric_limits::max()) { + return false; + } + + // seek past the section + snapshot.seekg(snapshot.tellg() + std::streamoff(section_size)); + + return true; +} + +bool istream_snapshot_reader::has_section( const string& section_name ) { + auto restore_pos = fc::make_scoped_exit([this,pos=snapshot.tellg()](){ + snapshot.seekg(pos); + }); + + const std::streamoff header_size = sizeof(ostream_snapshot_writer::magic_number) + sizeof(current_snapshot_version); + + auto next_section_pos = header_pos + header_size; + + while (true) { + snapshot.seekg(next_section_pos); + uint64_t section_size = 0; + snapshot.read((char*)§ion_size,sizeof(section_size)); + if (section_size == std::numeric_limits::max()) { + break; + } + + next_section_pos = snapshot.tellg() + std::streamoff(section_size); + + uint64_t ignore = 0; + snapshot.read((char*)&ignore,sizeof(ignore)); + + bool match = true; + for(auto c : section_name) { + if(snapshot.get() != c) { + match = false; + break; + } + } + + if (match && snapshot.get() == 0) { + return true; + } + } + + return false; +} + +void istream_snapshot_reader::set_section( const string& section_name ) { + auto restore_pos = fc::make_scoped_exit([this,pos=snapshot.tellg()](){ + snapshot.seekg(pos); + }); + + const std::streamoff header_size = sizeof(ostream_snapshot_writer::magic_number) + sizeof(current_snapshot_version); + + auto next_section_pos = header_pos + header_size; + + while (true) { + snapshot.seekg(next_section_pos); + uint64_t section_size = 0; + snapshot.read((char*)§ion_size,sizeof(section_size)); + if (section_size == std::numeric_limits::max()) { + break; + } + + next_section_pos = snapshot.tellg() + std::streamoff(section_size); + + uint64_t row_count = 0; + snapshot.read((char*)&row_count,sizeof(row_count)); + + bool match = true; + for(auto c : section_name) { + if(snapshot.get() != c) { + match = false; + break; + } + } + + if (match && snapshot.get() == 0) { + cur_row = 0; + num_rows = row_count; + + // leave the stream at the right point + restore_pos.cancel(); + return; + } + } + + EOS_THROW(snapshot_exception, "Binary snapshot has no section named ${n}", ("n", section_name)); +} + +bool istream_snapshot_reader::read_row( detail::abstract_snapshot_row_reader& row_reader ) { + row_reader.provide(snapshot); + return ++cur_row < num_rows; +} + +bool istream_snapshot_reader::empty ( ) { + return num_rows == 0; +} + +void istream_snapshot_reader::clear_section() { + num_rows = 0; + cur_row = 0; +} + +}} \ No newline at end of file diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index dd58f0364ec..476a78d982b 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -7,8 +7,144 @@ #include #include +#pragma push_macro("N") +#undef N +#include +#include +#include +#include +#include +#include +#pragma pop_macro("N") + +#include + namespace eosio { namespace chain { +namespace bacc = boost::accumulators; + + struct deadline_timer_verify { + deadline_timer_verify() { + //keep longest first in list. You're effectively going to take test_intervals[0]*sizeof(test_intervals[0]) + //time to do the the "calibration" + int test_intervals[] = {50000, 10000, 5000, 1000, 500, 100, 50, 10}; + + struct sigaction act; + sigemptyset(&act.sa_mask); + act.sa_handler = timer_hit; + act.sa_flags = 0; + if(sigaction(SIGALRM, &act, NULL)) + return; + + sigset_t alrm; + sigemptyset(&alrm); + sigaddset(&alrm, SIGALRM); + int dummy; + + for(int& interval : test_intervals) { + unsigned int loops = test_intervals[0]/interval; + + for(unsigned int i = 0; i < loops; ++i) { + struct itimerval enable = {{0, 0}, {0, interval}}; + hit = 0; + auto start = std::chrono::high_resolution_clock::now(); + if(setitimer(ITIMER_REAL, &enable, NULL)) + return; + while(!hit) {} + auto end = std::chrono::high_resolution_clock::now(); + int timer_slop = std::chrono::duration_cast(end-start).count() - interval; + + //since more samples are run for the shorter expirations, weigh the longer expirations accordingly. This + //helps to make a few results more fair. Two such examples: AWS c4&i5 xen instances being rather stable + //down to 100us but then struggling with 50us and 10us. MacOS having performance that seems to correlate + //with expiry length; that is, long expirations have high error, short expirations have low error. + //That said, for these platforms, a tighter tolerance may possibly be achieved by taking performance + //metrics in mulitple bins and appliying the slop based on which bin a deadline resides in. Not clear + //if that's worth the extra complexity at this point. + samples(timer_slop, bacc::weight = interval/(float)test_intervals[0]); + } + } + timer_overhead = bacc::mean(samples) + sqrt(bacc::variance(samples))*2; //target 95% of expirations before deadline + use_deadline_timer = timer_overhead < 1000; + + act.sa_handler = SIG_DFL; + sigaction(SIGALRM, &act, NULL); + } + + static void timer_hit(int) { + hit = 1; + } + static volatile sig_atomic_t hit; + + bacc::accumulator_set, float> samples; + bool use_deadline_timer = false; + int timer_overhead; + }; + volatile sig_atomic_t deadline_timer_verify::hit; + static deadline_timer_verify deadline_timer_verification; + + deadline_timer::deadline_timer() { + if(initialized) + return; + initialized = true; + + #define TIMER_STATS_FORMAT "min:${min}us max:${max}us mean:${mean}us stddev:${stddev}us" + #define TIMER_STATS \ + ("min", bacc::min(deadline_timer_verification.samples))("max", bacc::max(deadline_timer_verification.samples)) \ + ("mean", (int)bacc::mean(deadline_timer_verification.samples))("stddev", (int)sqrt(bacc::variance(deadline_timer_verification.samples))) \ + ("t", deadline_timer_verification.timer_overhead) + + if(deadline_timer_verification.use_deadline_timer) { + struct sigaction act; + act.sa_handler = timer_expired; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + if(sigaction(SIGALRM, &act, NULL) == 0) { + ilog("Using ${t}us deadline timer for checktime: " TIMER_STATS_FORMAT, TIMER_STATS); + return; + } + } + + wlog("Using polled checktime; deadline timer too inaccurate: " TIMER_STATS_FORMAT, TIMER_STATS); + deadline_timer_verification.use_deadline_timer = false; //set in case sigaction() fails above + } + + void deadline_timer::start(fc::time_point tp) { + if(tp == fc::time_point::maximum()) { + expired = 0; + return; + } + if(!deadline_timer_verification.use_deadline_timer) { + expired = 1; + return; + } + microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); + if(x.count() <= deadline_timer_verification.timer_overhead) + expired = 1; + else { + struct itimerval enable = {{0, 0}, {0, (int)x.count()-deadline_timer_verification.timer_overhead}}; + expired = 0; + expired |= !!setitimer(ITIMER_REAL, &enable, NULL); + } + } + + void deadline_timer::stop() { + if(expired) + return; + struct itimerval disable = {{0, 0}, {0, 0}}; + setitimer(ITIMER_REAL, &disable, NULL); + } + + deadline_timer::~deadline_timer() { + stop(); + } + + void deadline_timer::timer_expired(int) { + expired = 1; + } + volatile sig_atomic_t deadline_timer::expired = 0; + bool deadline_timer::initialized = false; + transaction_context::transaction_context( controller& c, const signed_transaction& t, const transaction_id_type& trx_id, @@ -23,7 +159,7 @@ namespace eosio { namespace chain { ,pseudo_start(s) { if (!c.skip_db_sessions()) { - undo_session = c.db().start_undo_session(true); + undo_session = c.mutable_db().start_undo_session(true); } trace->id = id; trace->block_num = c.pending_block_state()->block_num; @@ -132,6 +268,11 @@ namespace eosio { namespace chain { checktime(); // Fail early if deadline has already been exceeded + if(control.skip_trx_checks()) + _deadline_timer.expired = 0; + else + _deadline_timer.start(_deadline); + is_initialized = true; } @@ -292,34 +433,34 @@ namespace eosio { namespace chain { } void transaction_context::checktime()const { - if (!control.skip_trx_checks()) { - auto now = fc::time_point::now(); - if( BOOST_UNLIKELY( now > _deadline ) ) { - // edump((now-start)(now-pseudo_start)); - if( explicit_billed_cpu_time || deadline_exception_code == deadline_exception::code_value ) { - EOS_THROW( deadline_exception, "deadline exceeded", ("now", now)("deadline", _deadline)("start", start) ); - } else if( deadline_exception_code == block_cpu_usage_exceeded::code_value ) { - EOS_THROW( block_cpu_usage_exceeded, - "not enough time left in block to complete executing transaction", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } else if( deadline_exception_code == tx_cpu_usage_exceeded::code_value ) { - if (cpu_limit_due_to_greylist) { - EOS_THROW( greylist_cpu_usage_exceeded, - "greylisted transaction was executing for too long", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } else { - EOS_THROW( tx_cpu_usage_exceeded, - "transaction was executing for too long", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } - } else if( deadline_exception_code == leeway_deadline_exception::code_value ) { - EOS_THROW( leeway_deadline_exception, - "the transaction was unable to complete by deadline, " - "but it is possible it could have succeeded if it were allowed to run to completion", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + if(BOOST_LIKELY(_deadline_timer.expired == false)) + return; + auto now = fc::time_point::now(); + if( BOOST_UNLIKELY( now > _deadline ) ) { + // edump((now-start)(now-pseudo_start)); + if( explicit_billed_cpu_time || deadline_exception_code == deadline_exception::code_value ) { + EOS_THROW( deadline_exception, "deadline exceeded", ("now", now)("deadline", _deadline)("start", start) ); + } else if( deadline_exception_code == block_cpu_usage_exceeded::code_value ) { + EOS_THROW( block_cpu_usage_exceeded, + "not enough time left in block to complete executing transaction", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + } else if( deadline_exception_code == tx_cpu_usage_exceeded::code_value ) { + if (cpu_limit_due_to_greylist) { + EOS_THROW( greylist_cpu_usage_exceeded, + "greylisted transaction was executing for too long", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + } else { + EOS_THROW( tx_cpu_usage_exceeded, + "transaction was executing for too long", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); } - EOS_ASSERT( false, transaction_exception, "unexpected deadline exception code" ); + } else if( deadline_exception_code == leeway_deadline_exception::code_value ) { + EOS_THROW( leeway_deadline_exception, + "the transaction was unable to complete by deadline, " + "but it is possible it could have succeeded if it were allowed to run to completion", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); } + EOS_ASSERT( false, transaction_exception, "unexpected deadline exception code" ); } } @@ -330,6 +471,7 @@ namespace eosio { namespace chain { billed_time = now - pseudo_start; deadline_exception_code = deadline_exception::code_value; // Other timeout exceptions cannot be thrown while billable timer is paused. pseudo_start = fc::time_point(); + _deadline_timer.stop(); } void transaction_context::resume_billing_timer() { @@ -344,6 +486,7 @@ namespace eosio { namespace chain { _deadline = deadline; deadline_exception_code = deadline_exception::code_value; } + _deadline_timer.start(_deadline); } void transaction_context::validate_cpu_usage_to_bill( int64_t billed_us, bool check_minimum )const { @@ -429,14 +572,7 @@ namespace eosio { namespace chain { acontext.context_free = context_free; acontext.receiver = receiver; - try { - acontext.exec(); - } catch( ... ) { - trace = move(acontext.trace); - throw; - } - - trace = move(acontext.trace); + acontext.exec( trace ); } void transaction_context::schedule_transaction() { @@ -451,7 +587,7 @@ namespace eosio { namespace chain { auto first_auth = trx.first_authorizor(); uint32_t trx_size = 0; - const auto& cgto = control.db().create( [&]( auto& gto ) { + const auto& cgto = control.mutable_db().create( [&]( auto& gto ) { gto.trx_id = id; gto.payer = first_auth; gto.sender = account_name(); /// delayed transactions have no sender @@ -467,7 +603,7 @@ namespace eosio { namespace chain { void transaction_context::record_transaction( const transaction_id_type& id, fc::time_point_sec expire ) { try { - control.db().create([&](transaction_object& transaction) { + control.mutable_db().create([&](transaction_object& transaction) { transaction.trx_id = id; transaction.expiration = expire; }); diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index ac580045277..0ea29d3c6d8 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -57,6 +57,10 @@ namespace eosio { namespace chain { my->get_instantiated_module(code_id, code, context.trx_context)->apply(context); } + void wasm_interface::exit() { + my->runtime_interface->immediately_exit_currently_running_module(); + } + wasm_instantiated_module_interface::~wasm_instantiated_module_interface() {} wasm_runtime_interface::~wasm_runtime_interface() {} @@ -948,7 +952,7 @@ class context_free_system_api : public context_aware_api { } void eosio_exit(int32_t code) { - throw wasm_exit{code}; + context.control.get_wasm_interface().exit(); } }; @@ -1911,8 +1915,6 @@ std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime) { in >> s; if (s == "wavm") runtime = eosio::chain::wasm_interface::vm_type::wavm; - else if (s == "binaryen") - runtime = eosio::chain::wasm_interface::vm_type::binaryen; else if (s == "wabt") runtime = eosio::chain::wasm_interface::vm_type::wabt; else diff --git a/libraries/chain/webassembly/binaryen.cpp b/libraries/chain/webassembly/binaryen.cpp deleted file mode 100644 index ca3138db806..00000000000 --- a/libraries/chain/webassembly/binaryen.cpp +++ /dev/null @@ -1,105 +0,0 @@ -#include -#include - -#include - - -namespace eosio { namespace chain { namespace webassembly { namespace binaryen { - -class binaryen_instantiated_module : public wasm_instantiated_module_interface { - public: - binaryen_instantiated_module(linear_memory_type& shared_linear_memory, - std::vector initial_memory, - call_indirect_table_type table, - import_lut_type import_lut, - unique_ptr&& module) : - _shared_linear_memory(shared_linear_memory), - _initial_memory(initial_memory), - _table(forward(table)), - _import_lut(forward(import_lut)), - _module(forward(module)) { - - } - - void apply(apply_context& context) override { - LiteralList args = {Literal(uint64_t(context.receiver)), - Literal(uint64_t(context.act.account)), - Literal(uint64_t(context.act.name))}; - call("apply", args, context); - } - - private: - linear_memory_type& _shared_linear_memory; - std::vector _initial_memory; - call_indirect_table_type _table; - import_lut_type _import_lut; - unique_ptr _module; - - void call(const string& entry_point, LiteralList& args, apply_context& context){ - const unsigned initial_memory_size = _module->memory.initial*Memory::kPageSize; - interpreter_interface local_interface(_shared_linear_memory, _table, _import_lut, initial_memory_size, context); - - //zero out the initial pages - memset(_shared_linear_memory.data, 0, initial_memory_size); - //copy back in the initial data - memcpy(_shared_linear_memory.data, _initial_memory.data(), _initial_memory.size()); - - //be aware that construction of the ModuleInstance implictly fires the start function - ModuleInstance instance(*_module.get(), &local_interface); - instance.callExport(Name(entry_point), args); - } -}; - -binaryen_runtime::binaryen_runtime() { - -} - -std::unique_ptr binaryen_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) { - try { - vector code(code_bytes, code_bytes + code_size); - unique_ptr module(new Module()); - WasmBinaryBuilder builder(*module, code, false); - builder.read(); - - EOS_ASSERT(module->memory.initial * Memory::kPageSize <= wasm_constraints::maximum_linear_memory, binaryen_exception, "exceeds maximum linear memory"); - - // create a temporary globals to use - TrivialGlobalManager globals; - for (auto& global : module->globals) { - globals[global->name] = ConstantExpressionRunner(globals).visit(global->init).value; - } - - call_indirect_table_type table; - table.resize(module->table.initial); - for (auto& segment : module->table.segments) { - Address offset = ConstantExpressionRunner(globals).visit(segment.offset).value.geti32(); - EOS_ASSERT( uint64_t(offset) + segment.data.size() <= module->table.initial, binaryen_exception, ""); - for (size_t i = 0; i != segment.data.size(); ++i) { - table[offset + i] = segment.data[i]; - } - } - - // initialize the import lut - import_lut_type import_lut; - import_lut.reserve(module->imports.size()); - for (auto& import : module->imports) { - std::string full_name = string(import->module.c_str()) + "." + string(import->base.c_str()); - if (import->kind == ExternalKind::Function) { - auto& intrinsic_map = intrinsic_registrator::get_map(); - auto intrinsic_itr = intrinsic_map.find(full_name); - if (intrinsic_itr != intrinsic_map.end()) { - import_lut.emplace(make_pair((uintptr_t)import.get(), intrinsic_itr->second)); - continue; - } - } - - EOS_ASSERT( !"unresolvable", wasm_exception, "${module}.${export} unresolveable", ("module",import->module.c_str())("export",import->base.c_str()) ); - } - - return std::make_unique(_memory, initial_memory, move(table), move(import_lut), move(module)); - } catch (const ParseException &e) { - FC_THROW_EXCEPTION(wasm_execution_error, "Error building interpreter: ${s}", ("s", e.text)); - } -} - -}}}} diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp index bf5e1c9d6c8..2d45fa4ee01 100644 --- a/libraries/chain/webassembly/wabt.cpp +++ b/libraries/chain/webassembly/wabt.cpp @@ -96,4 +96,8 @@ std::unique_ptr wabt_runtime::instantiate_mo return std::make_unique(std::move(env), initial_memory, instantiated_module); } +void wabt_runtime::immediately_exit_currently_running_module() { + throw wasm_exit(); +} + }}}} diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index 9844cb2373f..e614398c74e 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -127,4 +127,12 @@ std::unique_ptr wavm_runtime::instantiate_mo return std::make_unique(instance, std::move(module), initial_memory); } +void wavm_runtime::immediately_exit_currently_running_module() { +#ifdef _WIN32 + throw wasm_exit(); +#else + Platform::immediately_exit(); +#endif +} + }}}} diff --git a/libraries/chainbase b/libraries/chainbase index 4724baf2095..8ca96ad6b18 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 4724baf2095cdc1bb1722254874b51070adf0e74 +Subproject commit 8ca96ad6b18709d65a7d1f67f8893978f25babcf diff --git a/libraries/fc b/libraries/fc index f085773d29e..3e5ce84852f 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit f085773d29ecc457894170fae740b726465f1382 +Subproject commit 3e5ce84852f32dce576f2b8d30365326b71c91e2 diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 2e609616615..9a4f4094330 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -83,10 +83,10 @@ namespace eosio { namespace testing { virtual ~base_tester() {}; void init(bool push_genesis = true, db_read_mode read_mode = db_read_mode::SPECULATIVE); - void init(controller::config config); + void init(controller::config config, const snapshot_reader_ptr& snapshot = nullptr); void close(); - void open(); + void open( const snapshot_reader_ptr& snapshot ); bool is_same_chain( base_tester& other ); virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; @@ -264,6 +264,10 @@ namespace eosio { namespace testing { return true; } + const controller::config& get_config() const { + return cfg; + } + protected: signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false, uint32_t skip_flag = 0 ); void _start_block(fc::time_point block_time); @@ -332,9 +336,7 @@ namespace eosio { namespace testing { vcfg.genesis.initial_key = get_public_key( config::system_account_name, "active" ); for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--binaryen")) - vcfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) + if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) vcfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) vcfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 4e947083763..b116d1a4969 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -96,26 +96,22 @@ namespace eosio { namespace testing { cfg.genesis.initial_key = get_public_key( config::system_account_name, "active" ); for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--binaryen")) - cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) + if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; - else - cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; } - open(); + open(nullptr); if (push_genesis) push_genesis_block(); } - void base_tester::init(controller::config config) { + void base_tester::init(controller::config config, const snapshot_reader_ptr& snapshot) { cfg = config; - open(); + open(snapshot); } @@ -125,10 +121,10 @@ namespace eosio { namespace testing { } - void base_tester::open() { + void base_tester::open( const snapshot_reader_ptr& snapshot) { control.reset( new controller(cfg) ); control->add_indices(); - control->startup(); + control->startup(snapshot); chain_transactions.clear(); control->accepted_block.connect([this]( const block_state_ptr& block_state ){ FC_ASSERT( block_state->block ); diff --git a/libraries/wasm-jit/Include/Platform/Platform.h b/libraries/wasm-jit/Include/Platform/Platform.h index 5f33133a5fb..8d8769d4834 100644 --- a/libraries/wasm-jit/Include/Platform/Platform.h +++ b/libraries/wasm-jit/Include/Platform/Platform.h @@ -134,6 +134,7 @@ namespace Platform Uptr& outTrapOperand, const std::function& thunk ); + PLATFORM_API void immediately_exit(); // // Threading diff --git a/libraries/wasm-jit/Source/Platform/POSIX.cpp b/libraries/wasm-jit/Source/Platform/POSIX.cpp index 8dac984bb2c..4305381b39f 100644 --- a/libraries/wasm-jit/Source/Platform/POSIX.cpp +++ b/libraries/wasm-jit/Source/Platform/POSIX.cpp @@ -276,6 +276,10 @@ namespace Platform return signalType; } + void immediately_exit() { + siglongjmp(signalReturnEnv,1); + } + CallStack captureCallStack(Uptr numOmittedFramesFromTop) { #if 0 diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index e69102c66b8..db7d2139d83 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -6,10 +6,12 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Description | URL | | ----------- | --- | -| Watch for specific actions and send them to an HTTP URL | https://github.com/eosauthority/eosio-watcher-plugin | +| BP Heartbeat | https://github.com/bancorprotocol/eos-producer-heartbeat-plugin | +| ElasticSearch | https://github.com/EOSLaoMao/elasticsearch_plugin | | Kafka | https://github.com/TP-Lab/kafka_plugin | +| MySQL | https://github.com/eosBLACK/eosio_mysqldb_plugin | | SQL | https://github.com/asiniscalchi/eosio_sql_plugin | -| ElasticSearch | https://github.com/EOSLaoMao/elasticsearch_plugin | +| Watch for specific actions and send them to an HTTP URL | https://github.com/eosauthority/eosio-watcher-plugin | | ZMQ / history | https://github.com/cc32d9/eos_zmq_plugin | ## DISCLAIMER: diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 31e576ae4d8..f07e48a7f04 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -37,7 +37,7 @@ struct async_result_visitor : public fc::visitor { #define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ + [api_handle](string, string body, url_response_callback cb) mutable { \ api_handle.validate(); \ try { \ if (body.empty()) body = "{}"; \ @@ -50,7 +50,7 @@ struct async_result_visitor : public fc::visitor { #define CALL_ASYNC(api_name, api_handle, api_namespace, call_name, call_result, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ + [api_handle](string, string body, url_response_callback cb) mutable { \ if (body.empty()) body = "{}"; \ api_handle.validate(); \ api_handle.call_name(fc::json::from_string(body).as(),\ @@ -79,7 +79,10 @@ void chain_api_plugin::plugin_startup() { auto ro_api = app().get_plugin().get_read_only_api(); auto rw_api = app().get_plugin().get_read_write_api(); - app().get_plugin().add_api({ + auto& _http_plugin = app().get_plugin(); + ro_api.set_shorten_abi_errors( !_http_plugin.verbose_errors() ); + + _http_plugin.add_api({ CHAIN_RO_CALL(get_info, 200l), CHAIN_RO_CALL(get_block, 200), CHAIN_RO_CALL(get_block_header_state, 200), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 12c5f3137b7..92b297ab727 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -167,6 +168,7 @@ class chain_plugin_impl { //txn_msg_rate_limits rate_limits; fc::optional wasm_runtime; fc::microseconds abi_serializer_max_time_ms; + fc::optional snapshot_path; // retained references to channels for easy publication @@ -213,7 +215,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("blocks-dir", bpo::value()->default_value("blocks"), "the location of the blocks directory (absolute path or relative to application data dir)") ("checkpoint", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") - ("wasm-runtime", bpo::value()->value_name("wavm/binaryen/wabt"), "Override default WASM runtime") + ("wasm-runtime", bpo::value()->value_name("wavm/wabt"), "Override default WASM runtime") ("abi-serializer-max-time-ms", bpo::value()->default_value(config::default_abi_serializer_max_time_ms), "Override default maximum ABI serialization time allowed in ms") ("chain-state-db-size-mb", bpo::value()->default_value(config::default_state_size / (1024 * 1024)), "Maximum size (in MiB) of the chain state database") @@ -285,6 +287,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("export-reversible-blocks", bpo::value(), "export reversible block database in portable format into specified file and then exit") ("trusted-producer", bpo::value>()->composing(), "Indicate a producer whose blocks headers signed by it will be fully validated, but transactions in those validated blocks will be trusted.") + ("snapshot", bpo::value(), "File to read Snapshot State from") ; } @@ -531,44 +534,76 @@ void chain_plugin::plugin_initialize(const variables_map& options) { wlog("The --import-reversible-blocks option should be used by itself."); } - if( options.count( "genesis-json" )) { - EOS_ASSERT( !fc::exists( my->blocks_dir / "blocks.log" ), - plugin_config_exception, - "Genesis state can only be set on a fresh blockchain." ); - - auto genesis_file = options.at( "genesis-json" ).as(); - if( genesis_file.is_relative()) { - genesis_file = bfs::current_path() / genesis_file; + if (options.count( "snapshot" )) { + my->snapshot_path = options.at( "snapshot" ).as(); + EOS_ASSERT( fc::exists(*my->snapshot_path), plugin_config_exception, + "Cannot load snapshot, ${name} does not exist", ("name", my->snapshot_path->generic_string()) ); + + // recover genesis information from the snapshot + auto infile = std::ifstream(my->snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); + auto reader = std::make_shared(infile); + reader->validate(); + reader->read_section([this]( auto §ion ){ + section.read_row(my->chain_config->genesis); + }); + infile.close(); + + EOS_ASSERT( options.count( "genesis-json" ) == 0 && options.count( "genesis-timestamp" ) == 0, + plugin_config_exception, + "--snapshot is incompatible with --genesis-json and --genesis-timestamp as the snapshot contains genesis information"); + + auto shared_mem_path = my->chain_config->state_dir / "shared_memory.bin"; + EOS_ASSERT( !fc::exists(shared_mem_path), + plugin_config_exception, + "Snapshot can only be used to initialize an empty database." ); + + if( fc::is_regular_file( my->blocks_dir / "blocks.log" )) { + auto log_genesis = block_log::extract_genesis_state(my->blocks_dir); + EOS_ASSERT( log_genesis.compute_chain_id() == my->chain_config->genesis.compute_chain_id(), + plugin_config_exception, + "Genesis information in blocks.log does not match genesis information in the snapshot"); } - EOS_ASSERT( fc::is_regular_file( genesis_file ), - plugin_config_exception, - "Specified genesis file '${genesis}' does not exist.", - ("genesis", genesis_file.generic_string())); + } else { + if( options.count( "genesis-json" )) { + EOS_ASSERT( !fc::exists( my->blocks_dir / "blocks.log" ), + plugin_config_exception, + "Genesis state can only be set on a fresh blockchain." ); + + auto genesis_file = options.at( "genesis-json" ).as(); + if( genesis_file.is_relative()) { + genesis_file = bfs::current_path() / genesis_file; + } - my->chain_config->genesis = fc::json::from_file( genesis_file ).as(); + EOS_ASSERT( fc::is_regular_file( genesis_file ), + plugin_config_exception, + "Specified genesis file '${genesis}' does not exist.", + ("genesis", genesis_file.generic_string())); - ilog( "Using genesis state provided in '${genesis}'", ("genesis", genesis_file.generic_string())); + my->chain_config->genesis = fc::json::from_file( genesis_file ).as(); - if( options.count( "genesis-timestamp" )) { - my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( - options.at( "genesis-timestamp" ).as()); - } + ilog( "Using genesis state provided in '${genesis}'", ("genesis", genesis_file.generic_string())); - wlog( "Starting up fresh blockchain with provided genesis state." ); - } else if( options.count( "genesis-timestamp" )) { - EOS_ASSERT( !fc::exists( my->blocks_dir / "blocks.log" ), - plugin_config_exception, - "Genesis state can only be set on a fresh blockchain." ); + if( options.count( "genesis-timestamp" )) { + my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( + options.at( "genesis-timestamp" ).as()); + } - my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( - options.at( "genesis-timestamp" ).as()); + wlog( "Starting up fresh blockchain with provided genesis state." ); + } else if( options.count( "genesis-timestamp" )) { + EOS_ASSERT( !fc::exists( my->blocks_dir / "blocks.log" ), + plugin_config_exception, + "Genesis state can only be set on a fresh blockchain." ); - wlog( "Starting up fresh blockchain with default genesis state but with adjusted genesis timestamp." ); - } else if( fc::is_regular_file( my->blocks_dir / "blocks.log" )) { - my->chain_config->genesis = block_log::extract_genesis_state( my->blocks_dir ); - } else { - wlog( "Starting up fresh blockchain with default genesis state." ); + my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( + options.at( "genesis-timestamp" ).as()); + + wlog( "Starting up fresh blockchain with default genesis state but with adjusted genesis timestamp." ); + } else if( fc::is_regular_file( my->blocks_dir / "blocks.log" )) { + my->chain_config->genesis = block_log::extract_genesis_state( my->blocks_dir ); + } else { + wlog( "Starting up fresh blockchain with default genesis state." ); + } } if ( options.count("read-mode") ) { @@ -653,7 +688,14 @@ void chain_plugin::plugin_initialize(const variables_map& options) { void chain_plugin::plugin_startup() { try { try { - my->chain->startup(); + if (my->snapshot_path) { + auto infile = std::ifstream(my->snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); + auto reader = std::make_shared(infile); + my->chain->startup(reader); + infile.close(); + } else { + my->chain->startup(); + } } catch (const database_guard_exception& e) { log_guard_exception(e); // make sure to properly close the db @@ -1221,7 +1263,7 @@ static float64_t to_softfloat64( double d ) { return *reinterpret_cast(&d); } -static fc::variant get_global_row( const database& db, const abi_def& abi, const abi_serializer& abis, const fc::microseconds& abi_serializer_max_time_ms ) { +fc::variant get_global_row( const database& db, const abi_def& abi, const abi_serializer& abis, const fc::microseconds& abi_serializer_max_time_ms, bool shorten_abi_errors ) { const auto table_type = get_table_type(abi, N(global)); EOS_ASSERT(table_type == read_only::KEYi64, chain::contract_table_query_exception, "Invalid table type ${type} for table global", ("type",table_type)); @@ -1234,7 +1276,7 @@ static fc::variant get_global_row( const database& db, const abi_def& abi, const vector data; read_only::copy_inline_row(*it, data); - return abis.binary_to_variant(abis.get_table_type(N(global)), data, abi_serializer_max_time_ms); + return abis.binary_to_variant(abis.get_table_type(N(global)), data, abi_serializer_max_time_ms, shorten_abi_errors ); } read_only::get_producers_result read_only::get_producers( const read_only::get_producers_params& p ) const { @@ -1279,12 +1321,12 @@ read_only::get_producers_result read_only::get_producers( const read_only::get_p } copy_inline_row(*kv_index.find(boost::make_tuple(table_id->id, it->primary_key)), data); if (p.json) - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(N(producers)), data, abi_serializer_max_time)); + result.rows.emplace_back( abis.binary_to_variant( abis.get_table_type(N(producers)), data, abi_serializer_max_time, shorten_abi_errors ) ); else result.rows.emplace_back(fc::variant(data)); } - result.total_producer_vote_weight = get_global_row(d, abi, abis, abi_serializer_max_time)["total_producer_vote_weight"].as_double(); + result.total_producer_vote_weight = get_global_row(d, abi, abis, abi_serializer_max_time, shorten_abi_errors)["total_producer_vote_weight"].as_double(); return result; } @@ -1650,6 +1692,9 @@ read_only::get_account_results read_only::get_account( const get_account_params& auto core_symbol = extract_core_symbol(); + if (params.expected_core_symbol.valid()) + core_symbol = *(params.expected_core_symbol); + const auto* t_id = d.find(boost::make_tuple( token_code, params.account_name, N(accounts) )); if( t_id != nullptr ) { const auto &idx = d.get_index(); @@ -1672,7 +1717,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.total_resources = abis.binary_to_variant( "user_resources", data, abi_serializer_max_time ); + result.total_resources = abis.binary_to_variant( "user_resources", data, abi_serializer_max_time, shorten_abi_errors ); } } @@ -1683,7 +1728,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.self_delegated_bandwidth = abis.binary_to_variant( "delegated_bandwidth", data, abi_serializer_max_time ); + result.self_delegated_bandwidth = abis.binary_to_variant( "delegated_bandwidth", data, abi_serializer_max_time, shorten_abi_errors ); } } @@ -1694,7 +1739,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.refund_request = abis.binary_to_variant( "refund_request", data, abi_serializer_max_time ); + result.refund_request = abis.binary_to_variant( "refund_request", data, abi_serializer_max_time, shorten_abi_errors ); } } @@ -1705,7 +1750,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.voter_info = abis.binary_to_variant( "voter_info", data, abi_serializer_max_time ); + result.voter_info = abis.binary_to_variant( "voter_info", data, abi_serializer_max_time, shorten_abi_errors ); } } } @@ -1731,7 +1776,7 @@ read_only::abi_json_to_bin_result read_only::abi_json_to_bin( const read_only::a auto action_type = abis.get_action_type(params.action); EOS_ASSERT(!action_type.empty(), action_validate_exception, "Unknown action ${action} in contract ${contract}", ("action", params.action)("contract", params.code)); try { - result.binargs = abis.variant_to_binary(action_type, params.args, abi_serializer_max_time); + result.binargs = abis.variant_to_binary( action_type, params.args, abi_serializer_max_time, shorten_abi_errors ); } EOS_RETHROW_EXCEPTIONS(chain::invalid_action_args_exception, "'${args}' is invalid args for action '${action}' code '${code}'. expected '${proto}'", ("args", params.args)("action", params.action)("code", params.code)("proto", action_abi_to_variant(abi, action_type))) @@ -1748,7 +1793,7 @@ read_only::abi_bin_to_json_result read_only::abi_bin_to_json( const read_only::a abi_def abi; if( abi_serializer::to_abi(code_account.abi, abi) ) { abi_serializer abis( abi, abi_serializer_max_time ); - result.args = abis.binary_to_variant( abis.get_action_type( params.action ), params.binargs, abi_serializer_max_time ); + result.args = abis.binary_to_variant( abis.get_action_type( params.action ), params.binargs, abi_serializer_max_time, shorten_abi_errors ); } else { EOS_ASSERT(false, abi_not_found_exception, "No ABI found for ${contract}", ("contract", params.code)); } @@ -1783,7 +1828,7 @@ namespace detail { } chain::symbol read_only::extract_core_symbol()const { - symbol core_symbol; // Default to CORE_SYMBOL if the appropriate data structure cannot be found in the system contract table data + symbol core_symbol(0); // The following code makes assumptions about the contract deployed on eosio account (i.e. the system contract) and how it stores its data. const auto& d = db.db(); diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index e01813aa039..0b69a6af89a 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -36,6 +36,7 @@ namespace eosio { using fc::optional; using boost::container::flat_set; using chain::asset; + using chain::symbol; using chain::authority; using chain::account_name; using chain::action_name; @@ -68,6 +69,7 @@ uint64_t convert_to_type(const string& str, const string& desc); class read_only { const controller& db; const fc::microseconds abi_serializer_max_time; + bool shorten_abi_errors = true; public: static const string KEYi64; @@ -77,6 +79,8 @@ class read_only { void validate() const {} + void set_shorten_abi_errors( bool f ) { shorten_abi_errors = f; } + using get_info_params = empty; struct get_info_results { @@ -134,7 +138,8 @@ class read_only { }; struct get_account_params { - name account_name; + name account_name; + optional expected_core_symbol; }; get_account_results get_account( const get_account_params& params )const; @@ -434,7 +439,7 @@ class read_only { copy_inline_row(*itr2, data); if (p.json) { - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(p.table), data, abi_serializer_max_time)); + result.rows.emplace_back( abis.binary_to_variant( abis.get_table_type(p.table), data, abi_serializer_max_time, shorten_abi_errors ) ); } else { result.rows.emplace_back(fc::variant(data)); } @@ -495,7 +500,7 @@ class read_only { copy_inline_row(*itr, data); if (p.json) { - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(p.table), data, abi_serializer_max_time)); + result.rows.emplace_back( abis.binary_to_variant( abis.get_table_type(p.table), data, abi_serializer_max_time, shorten_abi_errors ) ); } else { result.rows.emplace_back(fc::variant(data)); } @@ -694,7 +699,7 @@ FC_REFLECT( eosio::chain_apis::read_only::get_account_results, FC_REFLECT( eosio::chain_apis::read_only::get_code_results, (account_name)(code_hash)(wast)(wasm)(abi) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_results, (account_name)(code_hash) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_results, (account_name)(abi) ) -FC_REFLECT( eosio::chain_apis::read_only::get_account_params, (account_name) ) +FC_REFLECT( eosio::chain_apis::read_only::get_account_params, (account_name)(expected_core_symbol) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_params, (account_name)(code_as_wasm) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_params, (account_name) ) diff --git a/plugins/db_size_api_plugin/db_size_api_plugin.cpp b/plugins/db_size_api_plugin/db_size_api_plugin.cpp index 8dd5b50f48e..13b717c0789 100644 --- a/plugins/db_size_api_plugin/db_size_api_plugin.cpp +++ b/plugins/db_size_api_plugin/db_size_api_plugin.cpp @@ -36,7 +36,7 @@ void db_size_api_plugin::plugin_startup() { } db_size_stats db_size_api_plugin::get() { - chainbase::database& db = app().get_plugin().chain().db(); + const chainbase::database& db = app().get_plugin().chain().db(); db_size_stats ret; ret.free_bytes = db.get_segment_manager()->get_free_memory(); diff --git a/plugins/history_api_plugin/history_api_plugin.cpp b/plugins/history_api_plugin/history_api_plugin.cpp index 71f43701728..bd78dede086 100644 --- a/plugins/history_api_plugin/history_api_plugin.cpp +++ b/plugins/history_api_plugin/history_api_plugin.cpp @@ -21,7 +21,7 @@ void history_api_plugin::plugin_initialize(const variables_map&) {} #define CALL(api_name, api_handle, api_namespace, call_name) \ {std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ + [api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ auto result = api_handle.call_name(fc::json::from_string(body).as()); \ diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 3c3650b9e59..e3292672e44 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -206,7 +206,7 @@ namespace eosio { void record_account_action( account_name n, const base_action_trace& act ) { auto& chain = chain_plug->chain(); - auto& db = chain.db(); + chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) const auto& idx = db.get_index(); auto itr = idx.lower_bound( boost::make_tuple( name(n.value+1), 0 ) ); @@ -227,7 +227,7 @@ namespace eosio { void on_system_action( const action_trace& at ) { auto& chain = chain_plug->chain(); - auto& db = chain.db(); + chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) if( at.act.name == N(newaccount) ) { const auto create = at.act.data_as(); @@ -256,7 +256,7 @@ namespace eosio { if( filter( at ) ) { //idump((fc::json::to_pretty_string(at))); auto& chain = chain_plug->chain(); - auto& db = chain.db(); + chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) db.create( [&]( auto& aho ) { auto ps = fc::raw::pack_size( at ); @@ -344,10 +344,12 @@ namespace eosio { EOS_ASSERT( my->chain_plug, chain::missing_chain_plugin_exception, "" ); auto& chain = my->chain_plug->chain(); - chain.db().add_index(); - chain.db().add_index(); - chain.db().add_index(); - chain.db().add_index(); + chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) + // TODO: Use separate chainbase database for managing the state of the history_plugin (or remove deprecated history_plugin entirely) + db.add_index(); + db.add_index(); + db.add_index(); + db.add_index(); my->applied_transaction_connection.emplace( chain.applied_transaction.connect( [&]( const transaction_trace_ptr& p ) { diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 6a380da0d03..d9be006bb45 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -606,4 +606,8 @@ namespace eosio { return (!my->listen_endpoint || my->listen_endpoint->address().is_loopback()); } + bool http_plugin::verbose_errors()const { + return verbose_http_errors; + } + } diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index e78300c6240..7f9aedb01e4 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -60,12 +60,12 @@ namespace eosio { * called with the response code and body. * * The handler will be called from the appbase application io_service - * thread. The callback can be called from any thread and will + * thread. The callback can be called from any thread and will * automatically propagate the call to the http thread. * * The HTTP service will run in its own thread with its own io_service to * make sure that HTTP request processing does not interfer with other - * plugins. + * plugins. */ class http_plugin : public appbase::plugin { @@ -85,7 +85,7 @@ namespace eosio { void add_handler(const string& url, const url_handler&); void add_api(const api_description& api) { - for (const auto& call : api) + for (const auto& call : api) add_handler(call.first, call.second); } @@ -95,6 +95,8 @@ namespace eosio { bool is_on_loopback() const; bool is_secure() const; + bool verbose_errors()const; + private: std::unique_ptr my; }; diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index f0e27401dfe..d66cdfebf66 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -111,7 +111,9 @@ class mongo_db_plugin_impl { void remove_account_control( const account_name& name, const permission_name& permission ); /// @return true if act should be added to mongodb, false to skip it - bool filter_include( const chain::action_trace& action_trace ) const; + bool filter_include( const account_name& receiver, const action_name& act_name, + const vector& authorization ) const; + bool filter_include( const transaction& trx ) const; void init(); void wipe_database(); @@ -127,6 +129,7 @@ class mongo_db_plugin_impl { bool filter_on_star = true; std::set filter_on; std::set filter_out; + bool update_blocks_via_block_num = false; bool store_blocks = true; bool store_block_states = true; bool store_transactions = true; @@ -217,20 +220,22 @@ const std::string mongo_db_plugin_impl::accounts_col = "accounts"; const std::string mongo_db_plugin_impl::pub_keys_col = "pub_keys"; const std::string mongo_db_plugin_impl::account_controls_col = "account_controls"; -bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_trace ) const { +bool mongo_db_plugin_impl::filter_include( const account_name& receiver, const action_name& act_name, + const vector& authorization ) const +{ bool include = false; if( filter_on_star ) { include = true; } else { - auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace]( const auto& filter ) { - return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 ); + auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&receiver, &act_name]( const auto& filter ) { + return filter.match( receiver, act_name, 0 ); } ); if( itr != filter_on.cend() ) { include = true; } else { - for( const auto& a : action_trace.act.authorization ) { - auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace, &a]( const auto& filter ) { - return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor ); + for( const auto& a : authorization ) { + auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&receiver, &act_name, &a]( const auto& filter ) { + return filter.match( receiver, act_name, a.actor ); } ); if( itr != filter_on.cend() ) { include = true; @@ -241,15 +246,16 @@ bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_tra } if( !include ) { return false; } + if( filter_out.empty() ) { return true; } - auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace]( const auto& filter ) { - return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 ); + auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&receiver, &act_name]( const auto& filter ) { + return filter.match( receiver, act_name, 0 ); } ); if( itr != filter_out.cend() ) { return false; } - for( const auto& a : action_trace.act.authorization ) { - auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace, &a]( const auto& filter ) { - return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor ); + for( const auto& a : authorization ) { + auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&receiver, &act_name, &a]( const auto& filter ) { + return filter.match( receiver, act_name, a.actor ); } ); if( itr != filter_out.cend() ) { return false; } } @@ -257,6 +263,30 @@ bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_tra return true; } +bool mongo_db_plugin_impl::filter_include( const transaction& trx ) const +{ + if( !filter_on_star || !filter_out.empty() ) { + bool include = false; + for( const auto& a : trx.actions ) { + if( filter_include( a.account, a.name, a.authorization ) ) { + include = true; + break; + } + } + if( !include ) { + for( const auto& a : trx.context_free_actions ) { + if( filter_include( a.account, a.name, a.authorization ) ) { + include = true; + break; + } + } + } + return include; + } + return true; +} + + template void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { boost::mutex::scoped_lock lock( mtx ); @@ -694,6 +724,10 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti using bsoncxx::builder::basic::make_array; namespace bbb = bsoncxx::builder::basic; + const auto& trx = t->trx; + + if( !filter_include( trx ) ) return; + auto trans_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -701,7 +735,6 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti const auto& trx_id = t->id; const auto trx_id_str = trx_id.str(); - const auto& trx = t->trx; trans_doc.append( kvp( "trx_id", trx_id_str ) ); @@ -776,7 +809,8 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces } bool added = false; - if( start_block_reached && store_action_traces && filter_include( atrace ) ) { + if( start_block_reached && store_action_traces && + filter_include( atrace.receipt.receiver, atrace.act.name, atrace.act.authorization ) ) { auto action_traces_doc = bsoncxx::builder::basic::document{}; const chain::base_action_trace& base = atrace; // without inline action traces @@ -930,9 +964,16 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr block_state_doc.append( kvp( "createdAt", b_date{now} ) ); try { - if( !_block_states.update_one( make_document( kvp( "block_id", block_id_str ) ), - make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${bid}", ("bid", block_id) ); + if( update_blocks_via_block_num ) { + if( !_block_states.update_one( make_document( kvp( "block_num", b_int32{static_cast(block_num)} ) ), + make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${num}", ("num", block_num) ); + } + } else { + if( !_block_states.update_one( make_document( kvp( "block_id", block_id_str ) ), + make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${bid}", ("bid", block_id) ); + } } } catch( ... ) { handle_mongo_exception( "block_states insert: " + json, __LINE__ ); @@ -963,9 +1004,16 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr block_doc.append( kvp( "createdAt", b_date{now} ) ); try { - if( !_blocks.update_one( make_document( kvp( "block_id", block_id_str ) ), - make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${bid}", ("bid", block_id) ); + if( update_blocks_via_block_num ) { + if( !_blocks.update_one( make_document( kvp( "block_num", b_int32{static_cast(block_num)} ) ), + make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${num}", ("num", block_num) ); + } + } else { + if( !_blocks.update_one( make_document( kvp( "block_id", block_id_str ) ), + make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${bid}", ("bid", block_id) ); + } } } catch( ... ) { handle_mongo_exception( "blocks insert: " + json, __LINE__ ); @@ -1030,7 +1078,9 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ const auto& pt = receipt.trx.get(); // get id via get_raw_transaction() as packed_transaction.id() mutates internal transaction state const auto& raw = pt.get_raw_transaction(); - const auto& id = fc::raw::unpack( raw ).id(); + const auto& trx = fc::raw::unpack( raw ); + if( !filter_include( trx ) ) continue; + const auto& id = trx.id(); trx_id_str = id.str(); } else { const auto& id = receipt.trx.get(); @@ -1043,7 +1093,7 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ kvp( "updatedAt", b_date{now} ) ) ) ); mongocxx::model::update_one update_op{make_document( kvp( "trx_id", trx_id_str ) ), update_doc.view()}; - update_op.upsert( true ); + update_op.upsert( false ); bulk.append( update_op ); transactions_in_block = true; } @@ -1427,6 +1477,8 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc "MongoDB URI connection string, see: https://docs.mongodb.com/master/reference/connection-string/." " If not specified then plugin is disabled. Default database 'EOS' is used if not specified in URI." " Example: mongodb://127.0.0.1:27017/EOS") + ("mongodb-update-via-block-num", bpo::value()->default_value(false), + "Update blocks/block_state with latest via block number so that duplicates are overwritten.") ("mongodb-store-blocks", bpo::value()->default_value(true), "Enables storing blocks in mongodb.") ("mongodb-store-block-states", bpo::value()->default_value(true), @@ -1476,6 +1528,9 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) if( options.count( "mongodb-block-start" )) { my->start_block_num = options.at( "mongodb-block-start" ).as(); } + if( options.count( "mongodb-update-via-block-num" )) { + my->update_blocks_via_block_num = options.at( "mongodb-update-via-block-num" ).as(); + } if( options.count( "mongodb-store-blocks" )) { my->store_blocks = options.at( "mongodb-store-blocks" ).as(); } diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 586c4ae4aa6..a736a9ff464 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -79,17 +79,12 @@ namespace eosio { fc::sha256 node_id; ///< for duplicate notification }; - typedef std::chrono::system_clock::duration::rep tstamp; - typedef int32_t tdist; - - static_assert(sizeof(std::chrono::system_clock::duration::rep) >= 8, "system_clock is expected to be at least 64 bits"); - - struct time_message { - tstamp org; //!< origin timestamp - tstamp rec; //!< receive timestamp - tstamp xmt; //!< transmit timestamp - mutable tstamp dst; //!< destination timestamp - }; + struct time_message { + tstamp org; //!< origin timestamp + tstamp rec; //!< receive timestamp + tstamp xmt; //!< transmit timestamp + mutable tstamp dst; //!< destination timestamp + }; enum id_list_modes { none, diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 28ee6f47d83..9ee380b666f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -63,15 +63,6 @@ namespace eosio { using net_message_ptr = shared_ptr; - template - std::string itoh(I n, size_t hlen = sizeof(I)<<1) { - static const char* digits = "0123456789abcdef"; - std::string r(hlen, '0'); - for(size_t i = 0, j = (hlen - 1) * 4 ; i < hlen; ++i, j -= 4) - r[i] = digits[(n>>j) & 0x0f]; - return r; - } - struct node_transaction_state { transaction_id_type id; time_point_sec expires; /// time after which this may be purged. @@ -1556,7 +1547,7 @@ namespace eosio { else { c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; reset_lib_num (c); - start_sync(c, msg.known_blocks.pending); + start_sync(c, msg.known_trx.pending); } } diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index f414d2c3924..13599d5f834 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -86,6 +86,10 @@ void producer_api_plugin::plugin_startup() { INVOKE_R_V(producer, get_whitelist_blacklist), 201), CALL(producer, producer, set_whitelist_blacklist, INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), + CALL(producer, producer, get_integrity_hash, + INVOKE_R_V(producer, get_integrity_hash), 201), + CALL(producer, producer, create_snapshot, + INVOKE_R_V(producer, create_snapshot), 201), }); } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 5697823d4f7..f2e50e92849 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -40,6 +40,16 @@ class producer_plugin : public appbase::plugin { std::vector accounts; }; + struct integrity_hash_information { + chain::block_id_type head_block_id; + chain::digest_type integrity_hash; + }; + + struct snapshot_information { + chain::block_id_type head_block_id; + std::string snapshot_name; + }; + producer_plugin(); virtual ~producer_plugin(); @@ -67,7 +77,9 @@ class producer_plugin : public appbase::plugin { whitelist_blacklist get_whitelist_blacklist() const; void set_whitelist_blacklist(const whitelist_blacklist& params); - + + integrity_hash_information get_integrity_hash() const; + snapshot_information create_snapshot() const; signal confirmed_block; private: @@ -79,5 +91,6 @@ class producer_plugin : public appbase::plugin { FC_REFLECT(eosio::producer_plugin::runtime_options, (max_transaction_time)(max_irreversible_block_age)(produce_time_offset_us)(last_block_time_offset_us)(subjective_cpu_leeway_us)(incoming_defer_ratio)); FC_REFLECT(eosio::producer_plugin::greylist_params, (accounts)); FC_REFLECT(eosio::producer_plugin::whitelist_blacklist, (actor_whitelist)(actor_blacklist)(contract_whitelist)(contract_blacklist)(action_blacklist)(key_blacklist) ) - +FC_REFLECT(eosio::producer_plugin::integrity_hash_information, (head_block_id)(integrity_hash)) +FC_REFLECT(eosio::producer_plugin::snapshot_information, (head_block_id)(snapshot_name)) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a265b6fd9d0..c46452b5fb4 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -173,6 +174,10 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.timestamp <= _last_signed_block_time ) return; if( bsp->header.timestamp <= _start_time ) return; @@ -514,11 +519,13 @@ void producer_plugin::set_program_options( ("greylist-account", boost::program_options::value>()->composing()->multitoken(), "account that can not access to extended CPU/NET virtual resources") ("produce-time-offset-us", boost::program_options::value()->default_value(0), - "offset of non last block producing time in micro second. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") + "offset of non last block producing time in microseconds. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") ("last-block-time-offset-us", boost::program_options::value()->default_value(0), - "offset of last block producing time in micro second. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") + "offset of last block producing time in microseconds. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") ("incoming-defer-ratio", bpo::value()->default_value(1.0), "ratio between incoming transations and deferred transactions when both are exhausted") + ("snapshots-dir", bpo::value()->default_value("snapshots"), + "the location of the snapshots directory (absolute path or relative to application data dir)") ; config_file_options.add(producer_options); } @@ -647,6 +654,21 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_incoming_defer_ratio = options.at("incoming-defer-ratio").as(); + if( options.count( "snapshots-dir" )) { + auto sd = options.at( "snapshots-dir" ).as(); + if( sd.is_relative()) { + my->_snapshots_dir = app().data_dir() / sd; + if (!fc::exists(my->_snapshots_dir)) { + fc::create_directories(my->_snapshots_dir); + } + } else { + my->_snapshots_dir = sd; + } + + EOS_ASSERT( fc::is_directory(my->_snapshots_dir), snapshot_directory_not_found_exception, + "No such directory '${dir}'", ("dir", my->_snapshots_dir.generic_string()) ); + } + my->_incoming_block_subscription = app().get_channel().subscribe([this](const signed_block_ptr& block){ try { my->on_incoming_block(block); @@ -849,6 +871,53 @@ void producer_plugin::set_whitelist_blacklist(const producer_plugin::whitelist_b if(params.key_blacklist.valid()) chain.set_key_blacklist(*params.key_blacklist); } +producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash() const { + chain::controller& chain = app().get_plugin().chain(); + + auto reschedule = fc::make_scoped_exit([this](){ + my->schedule_production_loop(); + }); + + if (chain.pending_block_state()) { + // abort the pending block + chain.abort_block(); + } else { + reschedule.cancel(); + } + + return {chain.head_block_id(), chain.calculate_integrity_hash()}; +} + +producer_plugin::snapshot_information producer_plugin::create_snapshot() const { + chain::controller& chain = app().get_plugin().chain(); + + auto reschedule = fc::make_scoped_exit([this](){ + my->schedule_production_loop(); + }); + + if (chain.pending_block_state()) { + // abort the pending block + chain.abort_block(); + } else { + reschedule.cancel(); + } + + auto head_id = chain.head_block_id(); + std::string snapshot_path = (my->_snapshots_dir / fc::format_string("snapshot-${id}.bin", fc::mutable_variant_object()("id", head_id))).generic_string(); + + EOS_ASSERT( !fc::is_regular_file(snapshot_path), snapshot_exists_exception, + "snapshot named ${name} already exists", ("name", snapshot_path)); + + + auto snap_out = std::ofstream(snapshot_path, (std::ios::out | std::ios::binary)); + auto writer = std::make_shared(snap_out); + chain.write_snapshot(writer); + writer->finalize(); + snap_out.flush(); + snap_out.close(); + + return {head_id, snapshot_path}; +} optional producer_plugin_impl::calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const { chain::controller& chain = app().get_plugin().chain(); @@ -1357,15 +1426,17 @@ bool producer_plugin_impl::maybe_produce_block() { }); try { - produce_block(); - return true; - } catch ( const guard_exception& e ) { - app().get_plugin().handle_guard_exception(e); - return false; - } catch ( boost::interprocess::bad_alloc& ) { - chain_plugin::handle_db_exhaustion(); + try { + produce_block(); + return true; + } catch ( const guard_exception& e ) { + app().get_plugin().handle_guard_exception(e); + return false; + } FC_LOG_AND_DROP(); + } catch ( boost::interprocess::bad_alloc&) { + raise(SIGUSR1); return false; - } FC_LOG_AND_DROP(); + } fc_dlog(_log, "Aborting block due to produce_block error"); chain::controller& chain = app().get_plugin().chain(); diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp index 91d5535c796..16510b06460 100644 --- a/plugins/test_control_api_plugin/test_control_api_plugin.cpp +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -37,7 +37,7 @@ struct async_result_visitor : public fc::visitor { #define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ + [api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ auto result = api_handle.call_name(fc::json::from_string(body).as()); \ diff --git a/plugins/wallet_plugin/se_wallet.cpp b/plugins/wallet_plugin/se_wallet.cpp index 6f612a17013..6e1a4fe0e17 100644 --- a/plugins/wallet_plugin/se_wallet.cpp +++ b/plugins/wallet_plugin/se_wallet.cpp @@ -192,7 +192,6 @@ struct se_wallet_impl { return optional{}; fc::ecdsa_sig sig = ECDSA_SIG_new(); - BIGNUM *r = BN_new(), *s = BN_new(); CFErrorRef error = nullptr; CFDataRef digestData = CFDataCreateWithBytesNoCopy(nullptr, (UInt8*)d.data(), d.data_size(), kCFAllocatorNull); @@ -205,10 +204,8 @@ struct se_wallet_impl { } const UInt8* der_bytes = CFDataGetBytePtr(signature); - - BN_bin2bn(der_bytes+4, der_bytes[3], r); - BN_bin2bn(der_bytes+6+der_bytes[3], der_bytes[4+der_bytes[3]+1], s); - ECDSA_SIG_set0(sig, r, s); + long derSize = CFDataGetLength(signature); + d2i_ECDSA_SIG(&sig.obj, &der_bytes, derSize); public_key_data kd; compact_signature compact_sig; @@ -303,7 +300,7 @@ se_wallet::se_wallet() : my(new detail::se_wallet_impl()) { } unsigned int major, minor; if(sscanf(model, "MacBookPro%u,%u", &major, &minor) == 2) { - if(major >= 13 && minor >= 2) { + if((major >= 15) || (major >= 13 && minor >= 2)) { my->populate_existing_keys(); return; } diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 3457b5679b2..b3a656c3b10 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -3,3 +3,4 @@ add_subdirectory( cleos ) add_subdirectory( keosd ) add_subdirectory( eosio-launcher ) add_subdirectory( eosio-abigen ) +add_subdirectory( eosio-blocklog ) diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 2581a15bd4f..e748581d849 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -36,7 +36,9 @@ target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_D target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) + +copy_bin( ${CLI_CLIENT_EXECUTABLE_NAME} ) install( TARGETS ${CLI_CLIENT_EXECUTABLE_NAME} diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 26b260b1a95..b0b8acd574e 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -230,14 +231,7 @@ namespace eosio { namespace client { namespace http { } else { //https boost::asio::ssl::context ssl_context(boost::asio::ssl::context::sslv23_client); -#if defined( __APPLE__ ) - //TODO: this is undocumented/not supported; fix with keychain based approach - ssl_context.load_verify_file("/private/etc/ssl/cert.pem"); -#elif defined( _WIN32 ) - EOS_THROW(http_exception, "HTTPS on Windows not supported"); -#else - ssl_context.set_default_verify_paths(); -#endif + fc::add_platform_root_cas_to_context(ssl_context); boost::asio::ssl::stream socket(cp.context->ios, ssl_context); SSL_set_tlsext_host_name(socket.native_handle(), url.server.c_str()); diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 474f160f4ef..30334246fd0 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -51,10 +51,10 @@ Usage: ./cleos create SUBCOMMAND Subcommands: key Create a new keypair and print the public and private keys - account Create a new account on the blockchain + account Create a new account on the blockchain (assumes system contract does not restrict RAM usage) $ ./cleos create account -Create a new account on the blockchain +Create a new account on the blockchain (assumes system contract does not restrict RAM usage) Usage: ./cleos create account [OPTIONS] creator name OwnerKey ActiveKey Positionals: @@ -166,7 +166,8 @@ bfs::path determine_home_directory() } string url = "http://127.0.0.1:8888/"; -string wallet_url = "http://127.0.0.1:8900/"; +string default_wallet_url = "unix://" + (determine_home_directory() / "eosio-wallet" / (string(key_store_executable_name) + ".sock")).string(); +string wallet_url; //to be set to default_wallet_url in main bool no_verify = false; vector headers; @@ -768,25 +769,22 @@ struct set_action_permission_subcommand { }; -bool local_port_used(const string& lo_address, uint16_t port) { +bool local_port_used() { using namespace boost::asio; io_service ios; - boost::asio::ip::tcp::endpoint endpoint(boost::asio::ip::address::from_string(lo_address), port); - boost::asio::ip::tcp::socket socket(ios); - boost::system::error_code ec = error::would_block; - //connecting/failing to connect to localhost should be always fast - don't care about timeouts - socket.async_connect(endpoint, [&](const boost::system::error_code& error) { ec = error; } ); - do { - ios.run_one(); - } while (ec == error::would_block); + local::stream_protocol::endpoint endpoint(wallet_url.substr(strlen("unix://"))); + local::stream_protocol::socket socket(ios); + boost::system::error_code ec; + socket.connect(endpoint, ec); + return !ec; } -void try_local_port( const string& lo_address, uint16_t port, uint32_t duration ) { +void try_local_port(uint32_t duration) { using namespace std::chrono; auto start_time = duration_cast( system_clock::now().time_since_epoch() ).count(); - while ( !local_port_used(lo_address, port)) { + while ( !local_port_used()) { if (duration_cast( system_clock::now().time_since_epoch()).count() - start_time > duration ) { std::cerr << "Unable to connect to keosd, if keosd is running please kill the process and try again.\n"; throw connection_exception(fc::log_messages{FC_LOG_MESSAGE(error, "Unable to connect to keosd")}); @@ -806,16 +804,11 @@ void ensure_keosd_running(CLI::App* app) { if (subapp->got_subcommand("listproducers") || subapp->got_subcommand("listbw") || subapp->got_subcommand("bidnameinfo")) // system list* do not require wallet return; } + if (wallet_url != default_wallet_url) + return; - auto parsed_url = parse_url(wallet_url); - auto resolved_url = resolve_url(context, parsed_url); - - if (!resolved_url.is_loopback) - return; - - for (const auto& addr: resolved_url.resolved_addresses) - if (local_port_used(addr, resolved_url.resolved_port)) // Hopefully taken by keosd - return; + if (local_port_used()) + return; boost::filesystem::path binPath = boost::dll::program_location(); binPath.remove_filename(); @@ -827,13 +820,17 @@ void ensure_keosd_running(CLI::App* app) { binPath.remove_filename().remove_filename().append("keosd").append(key_store_executable_name); } - const auto& lo_address = resolved_url.resolved_addresses.front(); if (boost::filesystem::exists(binPath)) { namespace bp = boost::process; binPath = boost::filesystem::canonical(binPath); vector pargs; - pargs.push_back("--http-server-address=" + lo_address + ":" + std::to_string(resolved_url.resolved_port)); + pargs.push_back("--http-server-address"); + pargs.push_back(""); + pargs.push_back("--https-server-address"); + pargs.push_back(""); + pargs.push_back("--unix-socket-path"); + pargs.push_back(string(key_store_executable_name) + ".sock"); ::boost::process::child keos(binPath, pargs, bp::std_in.close(), @@ -842,13 +839,12 @@ void ensure_keosd_running(CLI::App* app) { if (keos.running()) { std::cerr << binPath << " launched" << std::endl; keos.detach(); - try_local_port(lo_address, resolved_url.resolved_port, 2000); + try_local_port(2000); } else { - std::cerr << "No wallet service listening on " << lo_address << ":" - << std::to_string(resolved_url.resolved_port) << ". Failed to launch " << binPath << std::endl; + std::cerr << "No wallet service listening on " << wallet_url << ". Failed to launch " << binPath << std::endl; } } else { - std::cerr << "No wallet service listening on " << lo_address << ":" << std::to_string(resolved_url.resolved_port) + std::cerr << "No wallet service listening on " << ". Cannot automatically start keosd because keosd was not found." << std::endl; } } @@ -896,12 +892,17 @@ struct create_account_subcommand { string stake_net; string stake_cpu; uint32_t buy_ram_bytes_in_kbytes = 0; + uint32_t buy_ram_bytes = 0; string buy_ram_eos; bool transfer; bool simple; create_account_subcommand(CLI::App* actionRoot, bool s) : simple(s) { - auto createAccount = actionRoot->add_subcommand( (simple ? "account" : "newaccount"), localized("Create an account, buy ram, stake for bandwidth for the account")); + auto createAccount = actionRoot->add_subcommand( + (simple ? "account" : "newaccount"), + (simple ? localized("Create a new account on the blockchain (assumes system contract does not restrict RAM usage)") + : localized("Create a new account on the blockchain with initial resources") ) + ); createAccount->add_option("creator", creator, localized("The name of the account creating the new account"))->required(); createAccount->add_option("name", account_name, localized("The name of the new account"))->required(); createAccount->add_option("OwnerKey", owner_key_str, localized("The owner public key for the new account"))->required(); @@ -913,7 +914,9 @@ struct create_account_subcommand { createAccount->add_option("--stake-cpu", stake_cpu, (localized("The amount of EOS delegated for CPU bandwidth")))->required(); createAccount->add_option("--buy-ram-kbytes", buy_ram_bytes_in_kbytes, - (localized("The amount of RAM bytes to purchase for the new account in kibibytes (KiB), default is 8 KiB"))); + (localized("The amount of RAM bytes to purchase for the new account in kibibytes (KiB)"))); + createAccount->add_option("--buy-ram-bytes", buy_ram_bytes, + (localized("The amount of RAM bytes to purchase for the new account in bytes"))); createAccount->add_option("--buy-ram", buy_ram_eos, (localized("The amount of RAM bytes to purchase for the new account in EOS"))); createAccount->add_flag("--transfer", transfer, @@ -934,12 +937,10 @@ struct create_account_subcommand { } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid active public key: ${public_key}", ("public_key", active_key_str)); auto create = create_newaccount(creator, account_name, owner_key, active_key); if (!simple) { - if ( buy_ram_eos.empty() && buy_ram_bytes_in_kbytes == 0) { - std::cerr << "ERROR: Either --buy-ram or --buy-ram-kbytes with non-zero value is required" << std::endl; - return; - } + EOSC_ASSERT( buy_ram_eos.size() || buy_ram_bytes_in_kbytes || buy_ram_bytes, "ERROR: One of --buy-ram, --buy-ram-kbytes or --buy-ram-bytes should have non-zero value" ); + EOSC_ASSERT( !buy_ram_bytes_in_kbytes || !buy_ram_bytes, "ERROR: --buy-ram-kbytes and --buy-ram-bytes cannot be set at the same time" ); action buyram = !buy_ram_eos.empty() ? create_buyram(creator, account_name, to_asset(buy_ram_eos)) - : create_buyrambytes(creator, account_name, buy_ram_bytes_in_kbytes * 1024); + : create_buyrambytes(creator, account_name, (buy_ram_bytes_in_kbytes) ? (buy_ram_bytes_in_kbytes * 1024) : buy_ram_bytes); auto net = to_asset(stake_net); auto cpu = to_asset(stake_cpu); if ( net.get_amount() != 0 || cpu.get_amount() != 0 ) { @@ -1200,6 +1201,7 @@ struct delegate_bandwidth_subcommand { string stake_cpu_amount; string stake_storage_amount; string buy_ram_amount; + uint32_t buy_ram_bytes = 0; bool transfer = false; delegate_bandwidth_subcommand(CLI::App* actionRoot) { @@ -1209,6 +1211,7 @@ struct delegate_bandwidth_subcommand { delegate_bandwidth->add_option("stake_net_quantity", stake_net_amount, localized("The amount of EOS to stake for network bandwidth"))->required(); delegate_bandwidth->add_option("stake_cpu_quantity", stake_cpu_amount, localized("The amount of EOS to stake for CPU bandwidth"))->required(); delegate_bandwidth->add_option("--buyram", buy_ram_amount, localized("The amount of EOS to buyram")); + delegate_bandwidth->add_option("--buy-ram-bytes", buy_ram_bytes, localized("The amount of RAM to buy in number of bytes")); delegate_bandwidth->add_flag("--transfer", transfer, localized("Transfer voting power and right to unstake EOS to receiver")); add_standard_transaction_options(delegate_bandwidth); @@ -1220,12 +1223,11 @@ struct delegate_bandwidth_subcommand { ("stake_cpu_quantity", to_asset(stake_cpu_amount)) ("transfer", transfer); std::vector acts{create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(delegatebw), act_payload)}; - if (buy_ram_amount.length()) { - fc::variant act_payload2 = fc::mutable_variant_object() - ("payer", from_str) - ("receiver", receiver_str) - ("quant", to_asset(buy_ram_amount)); - acts.push_back(create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyram), act_payload2)); + EOSC_ASSERT( !(buy_ram_amount.size()) || !buy_ram_bytes, "ERROR: --buyram and --buy-ram-bytes cannot be set at the same time" ); + if (buy_ram_amount.size()) { + acts.push_back( create_buyram(from_str, receiver_str, to_asset(buy_ram_amount)) ); + } else if (buy_ram_bytes) { + acts.push_back( create_buyrambytes(from_str, receiver_str, buy_ram_bytes) ); } send_actions(std::move(acts)); }); @@ -1353,27 +1355,22 @@ struct buyram_subcommand { string receiver_str; string amount; bool kbytes = false; + bool bytes = false; buyram_subcommand(CLI::App* actionRoot) { auto buyram = actionRoot->add_subcommand("buyram", localized("Buy RAM")); buyram->add_option("payer", from_str, localized("The account paying for RAM"))->required(); buyram->add_option("receiver", receiver_str, localized("The account receiving bought RAM"))->required(); - buyram->add_option("amount", amount, localized("The amount of EOS to pay for RAM, or number of kbytes of RAM if --kbytes is set"))->required(); - buyram->add_flag("--kbytes,-k", kbytes, localized("buyram in number of kbytes")); + buyram->add_option("amount", amount, localized("The amount of EOS to pay for RAM, or number of bytes/kibibytes of RAM if --bytes/--kbytes is set"))->required(); + buyram->add_flag("--kbytes,-k", kbytes, localized("buyram in number of kibibytes (KiB)")); + buyram->add_flag("--bytes,-b", bytes, localized("buyram in number of bytes")); add_standard_transaction_options(buyram); buyram->set_callback([this] { - if (kbytes) { - fc::variant act_payload = fc::mutable_variant_object() - ("payer", from_str) - ("receiver", receiver_str) - ("bytes", fc::to_uint64(amount) * 1024ull); - send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyrambytes), act_payload)}); + EOSC_ASSERT( !kbytes || !bytes, "ERROR: --kbytes and --bytes cannot be set at the same time" ); + if (kbytes || bytes) { + send_actions( { create_buyrambytes(from_str, receiver_str, fc::to_uint64(amount) * ((kbytes) ? 1024ull : 1ull)) } ); } else { - fc::variant act_payload = fc::mutable_variant_object() - ("payer", from_str) - ("receiver", receiver_str) - ("quant", to_asset(amount)); - send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyram), act_payload)}); + send_actions( { create_buyram(from_str, receiver_str, to_asset(amount)) } ); } }); } @@ -1471,10 +1468,16 @@ struct canceldelay_subcommand { } }; -void get_account( const string& accountName, bool json_format ) { - auto json = call(get_account_func, fc::mutable_variant_object("account_name", accountName)); - auto res = json.as(); +void get_account( const string& accountName, const string& coresym, bool json_format ) { + fc::variant json; + if (coresym.empty()) { + json = call(get_account_func, fc::mutable_variant_object("account_name", accountName)); + } + else { + json = call(get_account_func, fc::mutable_variant_object("account_name", accountName)("expected_core_symbol", symbol::from_string(coresym))); + } + auto res = json.as(); if (!json_format) { asset staked; asset unstaking; @@ -1745,6 +1748,7 @@ int main( int argc, char** argv ) { bindtextdomain(locale_domain, locale_path); textdomain(locale_domain); context = eosio::client::http::create_http_context(); + wallet_url = default_wallet_url; CLI::App app{"Command Line Interface to EOSIO Client"}; app.require_subcommand(); @@ -1916,11 +1920,13 @@ int main( int argc, char** argv ) { // get account string accountName; + string coresym; bool print_json; auto getAccount = get->add_subcommand("account", localized("Retrieve an account from the blockchain"), false); getAccount->add_option("name", accountName, localized("The name of the account to retrieve"))->required(); + getAccount->add_option("core-symbol", coresym, localized("The expected core symbol of the chain you are querying")); getAccount->add_flag("--json,-j", print_json, localized("Output in JSON format") ); - getAccount->set_callback([&]() { get_account(accountName, print_json); }); + getAccount->set_callback([&]() { get_account(accountName, coresym, print_json); }); // get code string codeFilename; @@ -3061,19 +3067,21 @@ int main( int argc, char** argv ) { } ); - // sudo subcommand - auto sudo = app.add_subcommand("sudo", localized("Sudo contract commands"), false); - sudo->require_subcommand(); + // wrap subcommand + auto wrap = app.add_subcommand("wrap", localized("Wrap contract commands"), false); + wrap->require_subcommand(); - // sudo exec + // wrap exec + string wrap_con = "eosio.wrap"; executer = ""; string trx_to_exec; - auto sudo_exec = sudo->add_subcommand("exec", localized("Execute a transaction while bypassing authorization checks")); - add_standard_transaction_options(sudo_exec); - sudo_exec->add_option("executer", executer, localized("Account executing the transaction and paying for the deferred transaction RAM"))->required(); - sudo_exec->add_option("transaction", trx_to_exec, localized("The JSON string or filename defining the transaction to execute"))->required(); + auto wrap_exec = wrap->add_subcommand("exec", localized("Execute a transaction while bypassing authorization checks")); + add_standard_transaction_options(wrap_exec); + wrap_exec->add_option("executer", executer, localized("Account executing the transaction and paying for the deferred transaction RAM"))->required(); + wrap_exec->add_option("transaction", trx_to_exec, localized("The JSON string or filename defining the transaction to execute"))->required(); + wrap_exec->add_option("--contract,-c", wrap_con, localized("The account which controls the wrap contract")); - sudo_exec->set_callback([&] { + wrap_exec->set_callback([&] { fc::variant trx_var; try { trx_var = json_from_file_or_string(trx_to_exec); @@ -3081,14 +3089,14 @@ int main( int argc, char** argv ) { auto accountPermissions = get_account_permissions(tx_permission); if( accountPermissions.empty() ) { - accountPermissions = vector{{executer, config::active_name}, {"eosio.sudo", config::active_name}}; + accountPermissions = vector{{executer, config::active_name}, {wrap_con, config::active_name}}; } auto args = fc::mutable_variant_object() ("executer", executer ) ("trx", trx_var); - send_actions({chain::action{accountPermissions, "eosio.sudo", "exec", variant_to_bin( N(eosio.sudo), N(exec), args ) }}); + send_actions({chain::action{accountPermissions, wrap_con, "exec", variant_to_bin( wrap_con, N(exec), args ) }}); }); // system subcommand diff --git a/programs/eosio-blocklog/CMakeLists.txt b/programs/eosio-blocklog/CMakeLists.txt new file mode 100644 index 00000000000..b883e493f85 --- /dev/null +++ b/programs/eosio-blocklog/CMakeLists.txt @@ -0,0 +1,25 @@ +add_executable( eosio-blocklog main.cpp ) + +if( UNIX AND NOT APPLE ) + set(rt_library rt ) +endif() + +find_package( Gperftools QUIET ) +if( GPERFTOOLS_FOUND ) + message( STATUS "Found gperftools; compiling eosio-blocklog with TCMalloc") + list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) +endif() + +target_include_directories(eosio-blocklog PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) + +target_link_libraries( eosio-blocklog + PRIVATE appbase + PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + +install( TARGETS + eosio-blocklog + + RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} +) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp new file mode 100644 index 00000000000..31db5b25c70 --- /dev/null +++ b/programs/eosio-blocklog/main.cpp @@ -0,0 +1,204 @@ +/** + * @file + * @copyright defined in eosio/LICENSE.txt + */ +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +using namespace eosio::chain; +namespace bfs = boost::filesystem; +namespace bpo = boost::program_options; +using bpo::options_description; +using bpo::variables_map; + +struct blocklog { + blocklog() + {} + + void read_log(); + void set_program_options(options_description& cli); + void initialize(const variables_map& options); + + bfs::path blocks_dir; + bfs::path output_file; + uint32_t first_block; + uint32_t last_block; + bool no_pretty_print; + bool as_json_array; +}; + +void blocklog::read_log() { + block_log block_logger(blocks_dir); + const auto end = block_logger.read_head(); + EOS_ASSERT( end, block_log_exception, "No blocks found in block log" ); + EOS_ASSERT( end->block_num() > 1, block_log_exception, "Only one block found in block log" ); + + ilog( "existing block log contains block num 1 through block num ${n}", ("n",end->block_num()) ); + + optional reversible_blocks; + try { + reversible_blocks.emplace(blocks_dir / config::reversible_blocks_dir_name, chainbase::database::read_only, config::default_reversible_cache_size); + reversible_blocks->add_index(); + const auto& idx = reversible_blocks->get_index(); + auto first = idx.lower_bound(end->block_num()); + auto last = idx.rbegin(); + if (first != idx.end() && last != idx.rend()) + ilog( "existing reversible block num ${first} through block num ${last} ", ("first",first->get_block()->block_num())("last",last->get_block()->block_num()) ); + else { + elog( "no blocks available in reversible block database: only block_log blocks are available" ); + reversible_blocks.reset(); + } + } catch( const std::runtime_error& e ) { + if( std::string(e.what()) == "database dirty flag set" ) { + elog( "database dirty flag set (likely due to unclean shutdown): only block_log blocks are available" ); + } else if( std::string(e.what()) == "database metadata dirty flag set" ) { + elog( "database metadata dirty flag set (likely due to unclean shutdown): only block_log blocks are available" ); + } else { + throw; + } + } + + std::ofstream output_blocks; + std::ostream* out; + if (!output_file.empty()) { + output_blocks.open(output_file.generic_string().c_str()); + if (output_blocks.fail()) { + std::ostringstream ss; + ss << "Unable to open file '" << output_file.string() << "'"; + throw std::runtime_error(ss.str()); + } + out = &output_blocks; + } + else + out = &std::cout; + + if (as_json_array) + *out << "["; + uint32_t block_num = (first_block < 1) ? 1 : first_block; + signed_block_ptr next; + fc::variant pretty_output; + const fc::microseconds deadline = fc::seconds(10); + auto print_block = [&](signed_block_ptr& next) { + abi_serializer::to_variant(*next, + pretty_output, + []( account_name n ) { return optional(); }, + deadline); + const auto block_id = next->id(); + const uint32_t ref_block_prefix = block_id._hash[1]; + const auto enhanced_object = fc::mutable_variant_object + ("block_num",next->block_num()) + ("id", block_id) + ("ref_block_prefix", ref_block_prefix) + (pretty_output.get_object()); + fc::variant v(std::move(enhanced_object)); + if (no_pretty_print) + fc::json::to_stream(*out, v, fc::json::stringify_large_ints_and_doubles); + else + *out << fc::json::to_pretty_string(v) << "\n"; + }; + bool contains_obj = false; + while((block_num <= last_block) && (next = block_logger.read_block_by_num( block_num ))) { + if (as_json_array && contains_obj) + *out << ","; + print_block(next); + ++block_num; + contains_obj = true; + } + if (reversible_blocks) { + const reversible_block_object* obj = nullptr; + while( (block_num <= last_block) && (obj = reversible_blocks->find(block_num)) ) { + if (as_json_array && contains_obj) + *out << ","; + auto next = obj->get_block(); + print_block(next); + ++block_num; + contains_obj = true; + } + } + if (as_json_array) + *out << "]"; +} + +void blocklog::set_program_options(options_description& cli) +{ + cli.add_options() + ("blocks-dir", bpo::value()->default_value("blocks"), + "the location of the blocks directory (absolute path or relative to the current directory)") + ("output-file,o", bpo::value(), + "the file to write the block log output to (absolute or relative path). If not specified then output is to stdout.") + ("first", bpo::value(&first_block)->default_value(1), + "the first block number to log") + ("last", bpo::value(&last_block)->default_value(std::numeric_limits::max()), + "the last block number (inclusive) to log") + ("no-pretty-print", bpo::bool_switch(&no_pretty_print)->default_value(false), + "Do not pretty print the output. Useful if piping to jq to improve performance.") + ("as-json-array", bpo::bool_switch(&as_json_array)->default_value(false), + "Print out json blocks wrapped in json array (otherwise the output is free-standing json objects).") + ("help", "Print this help message and exit.") + ; + +} + +void blocklog::initialize(const variables_map& options) { + try { + auto bld = options.at( "blocks-dir" ).as(); + if( bld.is_relative()) + blocks_dir = bfs::current_path() / bld; + else + blocks_dir = bld; + + if (options.count( "output-file" )) { + bld = options.at( "output-file" ).as(); + if( bld.is_relative()) + output_file = bfs::current_path() / bld; + else + output_file = bld; + } + } FC_LOG_AND_RETHROW() + +} + + +int main(int argc, char** argv) +{ + std::ios::sync_with_stdio(false); // for potential performance boost for large block log files + options_description cli ("eosio-blocklog command line options"); + try { + blocklog blog; + blog.set_program_options(cli); + variables_map vmap; + bpo::store(bpo::parse_command_line(argc, argv, cli), vmap); + bpo::notify(vmap); + if (vmap.count("help") > 0) { + cli.print(std::cerr); + return 0; + } + blog.initialize(vmap); + blog.read_log(); + } catch( const fc::exception& e ) { + elog( "${e}", ("e", e.to_detail_string())); + return -1; + } catch( const boost::exception& e ) { + elog("${e}", ("e",boost::diagnostic_information(e))); + return -1; + } catch( const std::exception& e ) { + elog("${e}", ("e",e.what())); + return -1; + } catch( ... ) { + elog("unknown exception"); + return -1; + } + + return 0; +} diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 73cba43f337..597e473b677 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -387,63 +388,65 @@ string producer_names::producer_name(unsigned int producer_number) { } struct launcher_def { - bool force_overwrite; - size_t total_nodes; - size_t prod_nodes; - size_t producers; - size_t next_node; - string shape; - p2p_plugin p2p; - allowed_connection allowed_connections = PC_NONE; - bfs::path genesis; - bfs::path output; - bfs::path host_map_file; - bfs::path server_ident_file; - bfs::path stage; - - string erd; - bfs::path config_dir_base; - bfs::path data_dir_base; - bool skip_transaction_signatures = false; - string eosd_extra_args; - std::map specific_nodeos_args; - testnet_def network; - string gelf_endpoint; - vector aliases; - vector bindings; - int per_host = 0; - last_run_def last_run; - int start_delay = 0; - bool gelf_enabled; - bool nogen; - bool boot; - bool add_enable_stale_production = false; - string launch_name; - string launch_time; - server_identities servers; - producer_set_def producer_set; - vector genesis_block; + bool force_overwrite; + size_t total_nodes; + size_t prod_nodes; + size_t producers; + size_t next_node; + string shape; + p2p_plugin p2p; + allowed_connection allowed_connections = PC_NONE; + bfs::path genesis; + bfs::path output; + bfs::path host_map_file; + bfs::path server_ident_file; + bfs::path stage; + + string erd; + bfs::path config_dir_base; + bfs::path data_dir_base; + bool skip_transaction_signatures = false; + string eosd_extra_args; + std::map specific_nodeos_args; + testnet_def network; + string gelf_endpoint; + vector aliases; + vector bindings; + int per_host = 0; + last_run_def last_run; + int start_delay = 0; + bool gelf_enabled; + bool nogen; + bool boot; + bool add_enable_stale_production = false; + string launch_name; + string launch_time; + server_identities servers; + producer_set_def producer_set; string start_temp; string start_script; + fc::optional max_block_cpu_usage; + fc::optional max_transaction_cpu_usage; + eosio::chain::genesis_state genesis_from_file; void assign_name (eosd_def &node, bool is_bios); - void set_options (bpo::options_description &cli); - void initialize (const variables_map &vmap); + void set_options (bpo::options_description &cli); + void initialize (const variables_map &vmap); void init_genesis (); - void load_servers (); - bool generate (); - void define_network (); - void bind_nodes (); - host_def *find_host (const string &name); - host_def *find_host_by_name_or_address (const string &name); - host_def *deploy_config_files (tn_node_def &node); - string compose_scp_command (const host_def &host, const bfs::path &source, - const bfs::path &destination); - void write_config_file (tn_node_def &node); - void write_logging_config_file (tn_node_def &node); - void write_genesis_file (tn_node_def &node); - void write_setprods_file (); + void load_servers (); + bool generate (); + void define_network (); + void bind_nodes (); + host_def *find_host (const string &name); + host_def *find_host_by_name_or_address (const string &name); + host_def *deploy_config_files (tn_node_def &node); + string compose_scp_command (const host_def &host, const bfs::path &source, + const bfs::path &destination); + void write_config_file (tn_node_def &node); + void write_logging_config_file (tn_node_def &node); + void write_genesis_file (tn_node_def &node); + void write_setprods_file (); void write_bios_boot (); bool is_bios_ndx (size_t ndx); @@ -451,25 +454,25 @@ struct launcher_def { bool next_ndx(size_t &ndx); size_t skip_ndx (size_t from, size_t offset); - void make_ring (); - void make_star (); - void make_mesh (); - void make_custom (); - void write_dot_file (); - void format_ssh (const string &cmd, const string &host_name, string &ssh_cmd_line); - void do_command(const host_def& host, const string& name, vector> env_pairs, const string& cmd); - bool do_ssh (const string &cmd, const string &host_name); - void prep_remote_config_dir (eosd_def &node, host_def *host); - void launch (eosd_def &node, string >s); - void kill (launch_modes mode, string sig_opt); - static string get_node_num(uint16_t node_num); - pair find_node(uint16_t node_num); - vector> get_nodes(const string& node_number_list); - void bounce (const string& node_numbers); - void down (const string& node_numbers); - void roll (const string& host_names); - void start_all (string >s, launch_modes mode); - void ignite (); + void make_ring (); + void make_star (); + void make_mesh (); + void make_custom (); + void write_dot_file (); + void format_ssh (const string &cmd, const string &host_name, string &ssh_cmd_line); + void do_command(const host_def& host, const string& name, vector> env_pairs, const string& cmd); + bool do_ssh (const string &cmd, const string &host_name); + void prep_remote_config_dir (eosd_def &node, host_def *host); + void launch (eosd_def &node, string >s); + void kill (launch_modes mode, string sig_opt); + static string get_node_num(uint16_t node_num); + pair find_node(uint16_t node_num); + vector> get_nodes(const string& node_number_list); + void bounce (const string& node_numbers); + void down (const string& node_numbers); + void roll (const string& host_names); + void start_all (string >s, launch_modes mode); + void ignite (); }; void @@ -482,7 +485,7 @@ launcher_def::set_options (bpo::options_description &cfg) { ("mode,m",bpo::value>()->multitoken()->default_value({"any"}, "any"),"connection mode, combination of \"any\", \"producers\", \"specified\", \"none\"") ("shape,s",bpo::value(&shape)->default_value("star"),"network topology, use \"star\" \"mesh\" or give a filename for custom") ("p2p-plugin", bpo::value()->default_value("net"),"select a p2p plugin to use (either net or bnet). Defaults to net.") - ("genesis,g",bpo::value(&genesis)->default_value("./genesis.json"),"set the path to genesis.json") + ("genesis,g",bpo::value()->default_value("./genesis.json"),"set the path to genesis.json") ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag") @@ -490,14 +493,16 @@ launcher_def::set_options (bpo::options_description &cfg) { ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") ("nogen",bpo::bool_switch(&nogen)->default_value(false),"launch nodes without writing new config files") - ("host-map",bpo::value(&host_map_file)->default_value(""),"a file containing mapping specific nodes to hosts. Used to enhance the custom shape argument") - ("servers",bpo::value(&server_ident_file)->default_value(""),"a file containing ip addresses and names of individual servers to deploy as producers or non-producers ") + ("host-map",bpo::value(),"a file containing mapping specific nodes to hosts. Used to enhance the custom shape argument") + ("servers",bpo::value(),"a file containing ip addresses and names of individual servers to deploy as producers or non-producers ") ("per-host",bpo::value(&per_host)->default_value(0),"specifies how many nodeos instances will run on a single host. Use 0 to indicate all on one.") ("network-name",bpo::value(&network.name)->default_value("testnet_"),"network name prefix used in GELF logging source") ("enable-gelf-logging",bpo::value(&gelf_enabled)->default_value(true),"enable gelf logging appender in logging configuration file") ("gelf-endpoint",bpo::value(&gelf_endpoint)->default_value("10.160.11.21:12201"),"hostname:port or ip:port of GELF endpoint") ("template",bpo::value(&start_temp)->default_value("testnet.template"),"the startup script template") ("script",bpo::value(&start_script)->default_value("bios_boot.sh"),"the generated startup script name") + ("max-block-cpu-usage",bpo::value(),"Provide the \"max-block-cpu-usage\" value to use in the genesis.json file") + ("max-transaction-cpu-usage",bpo::value(),"Provide the \"max-transaction-cpu-usage\" value to use in the genesis.json file") ; } @@ -529,6 +534,22 @@ launcher_def::initialize (const variables_map &vmap) { } } + if (vmap.count("max-block-cpu-usage")) { + max_block_cpu_usage = vmap["max-block-cpu-usage"].as(); + } + + if (vmap.count("max-transaction-cpu-usage")) { + max_transaction_cpu_usage = vmap["max-transaction-cpu-usage"].as(); + } + + genesis = vmap["genesis"].as(); + if (vmap.count("host-map")) { + host_map_file = vmap["host-map"].as(); + } + if (vmap.count("servers")) { + server_ident_file = vmap["servers"].as(); + } + if (vmap.count("specific-num")) { const auto specific_nums = vmap["specific-num"].as>(); const auto specific_args = vmap["specific-nodeos"].as>(); @@ -1157,27 +1178,20 @@ launcher_def::write_logging_config_file(tn_node_def &node) { void launcher_def::init_genesis () { - bfs::path genesis_path = bfs::current_path() / "genesis.json"; - bfs::ifstream src(genesis_path); - if (!src.good()) { + const bfs::path genesis_path = genesis.is_complete() ? genesis : bfs::current_path() / genesis; + if (!bfs::exists(genesis_path)) { cout << "generating default genesis file " << genesis_path << endl; eosio::chain::genesis_state default_genesis; fc::json::save_to_file( default_genesis, genesis_path, true ); - src.open(genesis_path); } string bioskey = string(network.nodes["bios"].keys[0].get_public_key()); - string str; - string prefix("initial_key"); - while(getline(src,str)) { - size_t pos = str.find(prefix); - if (pos != string::npos) { - size_t cut = str.find("EOS",pos); - genesis_block.push_back(str.substr(0,cut) + bioskey + "\","); - } - else { - genesis_block.push_back(str); - } - } + + fc::json::from_file(genesis_path).as(genesis_from_file); + genesis_from_file.initial_key = public_key_type(bioskey); + if (max_block_cpu_usage) + genesis_from_file.initial_configuration.max_block_cpu_usage = *max_block_cpu_usage; + if (max_transaction_cpu_usage) + genesis_from_file.initial_configuration.max_transaction_cpu_usage = *max_transaction_cpu_usage; } void @@ -1191,10 +1205,7 @@ launcher_def::write_genesis_file(tn_node_def &node) { } filename = dd / "genesis.json"; - bfs::ofstream gf ( dd / "genesis.json"); - for (auto &line : genesis_block) { - gf << line << "\n"; - } + fc::json::save_to_file( genesis_from_file, dd / "genesis.json", true ); } void @@ -1707,6 +1718,13 @@ launcher_def::bounce (const string& node_numbers) { const string node_num = node.get_node_num(); cout << "Bouncing " << node.name << endl; string cmd = "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; + if (node_num != "bios" && !specific_nodeos_args.empty()) { + const auto node_num_i = boost::lexical_cast(node_num); + if (specific_nodeos_args.count(node_num_i)) { + cmd += " " + specific_nodeos_args[node_num_i]; + } + } + do_command(host, node.name, { { "EOSIO_HOME", host.eosio_home }, { "EOSIO_NODE", node_num } }, cmd); } } diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index a332f8e26b1..1c294329387 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -20,6 +20,7 @@ target_include_directories(${KEY_STORE_EXECUTABLE_NAME} PUBLIC ${CMAKE_CURRENT_B mas_sign(${KEY_STORE_EXECUTABLE_NAME}) +copy_bin( ${KEY_STORE_EXECUTABLE_NAME} ) install( TARGETS ${KEY_STORE_EXECUTABLE_NAME} diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index 58a42d96b30..c457af3c0e3 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -43,7 +43,7 @@ int main(int argc, char** argv) http_plugin::set_defaults({ .address_config_prefix = "", .default_unix_socket_path = keosd::config::key_store_executable_name + ".sock", - .default_http_port = 8900 + .default_http_port = 0 }); app().register_plugin(); if(!app().initialize(argc, argv)) diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 82ce6470789..9e1481c23c3 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -72,6 +72,7 @@ endif() include(additionalPlugins) +copy_bin( ${NODE_EXECUTABLE_NAME} ) install( TARGETS ${NODE_EXECUTABLE_NAME} diff --git a/scripts/generate_bottle.sh b/scripts/generate_bottle.sh new file mode 100644 index 00000000000..fbec0a7a340 --- /dev/null +++ b/scripts/generate_bottle.sh @@ -0,0 +1,62 @@ +#! /bin/bash + +VERS=`sw_vers -productVersion | awk '/10\.13\..*/{print $0}'` +if [[ -z "$VERS" ]]; +then + VERS=`sw_vers -productVersion | awk '/10\.14\..*/{print $0}'` + if [[ -z "$VERS" ]]; + then + echo "Error, unsupported OS X version" + exit -1 + fi + MAC_VERSION="mojave" +else + MAC_VERSION="high_sierra" +fi + +NAME="${PROJECT}-${VERSION}.${MAC_VERSION}.bottle.tar.gz" + +mkdir -p ${PROJECT}/${VERSION}/opt/eosio/lib/cmake + +PREFIX="${PROJECT}/${VERSION}" +SPREFIX="\/usr\/local" +SUBPREFIX="opt/${PROJECT}" +SSUBPREFIX="opt\/${PROJECT}" + +export PREFIX +export SPREFIX +export SUBPREFIX +export SSUBPREFIX + +bash generate_tarball.sh ${NAME} + +hash=`openssl dgst -sha256 ${NAME} | awk 'NF>1{print $NF}'` + +echo "class Eosio < Formula + + homepage \"${URL}\" + revision 0 + url \"https://github.com/eosio/eos/archive/v${VERSION}.tar.gz\" + version \"${VERSION}\" + + option :universal + + depends_on \"gmp\" + depends_on \"gettext\" + depends_on \"openssl\" + depends_on \"gmp\" + depends_on :xcode + depends_on :macos => :high_sierra + depends_on :arch => :intel + + bottle do + root_url \"https://github.com/eosio/eos/releases/download/v${VERSION}\" + sha256 \"${hash}\" => :${MAC_VERSION} + end + def install + raise \"Error, only supporting binary packages at this time\" + end +end +__END__" &> eosio.rb + +rm -r ${PROJECT} diff --git a/scripts/generate_deb.sh b/scripts/generate_deb.sh new file mode 100644 index 00000000000..e52d4527316 --- /dev/null +++ b/scripts/generate_deb.sh @@ -0,0 +1,34 @@ +#! /bin/bash + +NAME="${PROJECT}-${VERSION}.x86_64" +PREFIX="usr" +SPREFIX=${PREFIX} +SUBPREFIX="opt/${PROJECT}/${VERSION}" +SSUBPREFIX="opt\/${PROJECT}\/${VERSION}" + +DEPS_STR="" +for dep in "${DEPS[@]}"; do + DEPS_STR="${DEPS_STR} Depends: ${dep}" +done +mkdir -p ${PROJECT}/DEBIAN +echo "Package: ${PROJECT} +Version: ${VERSION} +Section: devel +Priority: optional +Depends: libbz2-dev (>= 1.0), libssl-dev (>= 1.0), libgmp3-dev, build-essential, libicu-dev, zlib1g-dev +Architecture: amd64 +Homepage: ${URL} +Maintainer: ${EMAIL} +Description: ${DESC}" &> ${PROJECT}/DEBIAN/control + +export PREFIX +export SUBPREFIX +export SPREFIX +export SSUBPREFIX + +bash generate_tarball.sh ${NAME}.tar.gz + +tar -xvzf ${NAME}.tar.gz -C ${PROJECT} +dpkg-deb --build ${PROJECT} +mv ${PROJECT}.deb ${NAME}.deb +rm -r ${PROJECT} diff --git a/scripts/generate_package.sh.in b/scripts/generate_package.sh.in new file mode 100644 index 00000000000..909598cf5d0 --- /dev/null +++ b/scripts/generate_package.sh.in @@ -0,0 +1,35 @@ +#! /bin/bash + +VARIANT=$1 + +VERSION="@VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@" + +BUILD_DIR="@CMAKE_BINARY_DIR@" + +VENDOR="@VENDOR@" +PROJECT="@PROJECT_NAME@" +DESC="@DESC@" +URL="@URL@" +EMAIL="@EMAIL@" + +export BUILD_DIR +export VERSION +export VENDOR +export PROJECT +export DESC +export URL +export EMAIL + +mkdir tmp + +if [[ ${VARIANT} == "brew" ]]; then + bash generate_bottle.sh +elif [[ ${VARIANT} == "deb" ]]; then + bash generate_deb.sh +elif [[ ${VARIANT} == "rpm" ]]; then + bash generate_rpm.sh +else + echo "Error, unknown package type. Use either ['brew', 'deb', 'rpm']." + exit -1 +fi +rm -r tmp diff --git a/scripts/generate_rpm.sh b/scripts/generate_rpm.sh new file mode 100644 index 00000000000..5e9be4f5149 --- /dev/null +++ b/scripts/generate_rpm.sh @@ -0,0 +1,44 @@ +#! /bin/bash + +NAME="${PROJECT}-${VERSION}.x86_64" +PREFIX="usr" +SPREFIX=${PREFIX} +SUBPREFIX="opt/${PROJECT}/${VERSION}" +SSUBPREFIX="opt\/${PROJECT}\/${VERSION}" + +export PREFIX +export SUBPREFIX +export SPREFIX +export SSUBPREFIX + +bash generate_tarball.sh ${NAME}.tar.gz + +RPMBUILD=`realpath ~/rpmbuild/BUILDROOT/${NAME}-0.x86_64` +mkdir -p ${RPMBUILD} +FILES=$(tar -xvzf ${NAME}.tar.gz -C ${RPMBUILD}) +PFILES="" +for f in ${FILES[@]}; do + if [ -f ${RPMBUILD}/${f} ]; then + PFILES="${PFILES}/${f}\n" + fi +done +echo -e ${PFILES} &> ~/rpmbuild/BUILD/filenames.txt + +mkdir -p ${PROJECT} +echo -e "Name: ${PROJECT} +Version: ${VERSION}.x86_64 +License: MIT +Vendor: ${VENDOR} +Source: ${URL} +Requires: openssl-devel.x86_64, gmp-devel.x86_64, libstdc++-devel.x86_64, bzip2.x86_64, bzip2-devel.x86_64, mongodb.x86_64, mongodb-server.x86_64 +URL: ${URL} +Packager: ${VENDOR} <${EMAIL}> +Summary: ${DESC} +Release: 0 +%description +${DESC} +%files -f filenames.txt" &> ${PROJECT}.spec + +rpmbuild -bb ${PROJECT}.spec +mv ~/rpmbuild/RPMS/x86_64 ./ +rm -r ${PROJECT} ~/rpmbuild/BUILD/filenames.txt ${PROJECT}.spec diff --git a/scripts/generate_tarball.sh b/scripts/generate_tarball.sh new file mode 100644 index 00000000000..675f30b4af7 --- /dev/null +++ b/scripts/generate_tarball.sh @@ -0,0 +1,44 @@ +#! /bin/bash + +NAME=$1 +EOS_PREFIX=${PREFIX}/${SUBPREFIX} +mkdir -p ${PREFIX}/bin/ +#mkdir -p ${PREFIX}/lib/cmake/${PROJECT} +mkdir -p ${EOS_PREFIX}/bin +mkdir -p ${EOS_PREFIX}/licenses/eosio +#mkdir -p ${EOS_PREFIX}/include +#mkdir -p ${EOS_PREFIX}/lib/cmake/${PROJECT} +#mkdir -p ${EOS_PREFIX}/cmake +#mkdir -p ${EOS_PREFIX}/scripts + +# install binaries +cp -R ${BUILD_DIR}/bin/* ${EOS_PREFIX}/bin + +# install licenses +cp -R ${BUILD_DIR}/licenses/eosio/* ${EOS_PREFIX}/licenses + +# install libraries +#cp -R ${BUILD_DIR}/lib/* ${EOS_PREFIX}/lib + +# install cmake modules +#sed "s/_PREFIX_/\/${SPREFIX}/g" ${BUILD_DIR}/modules/EosioTesterPackage.cmake &> ${EOS_PREFIX}/lib/cmake/${PROJECT}/EosioTester.cmake +#sed "s/_PREFIX_/\/${SPREFIX}\/${SSUBPREFIX}/g" ${BUILD_DIR}/modules/${PROJECT}-config.cmake.package &> ${EOS_PREFIX}/lib/cmake/${PROJECT}/${PROJECT}-config.cmake + +# install includes +#cp -R ${BUILD_DIR}/include/* ${EOS_PREFIX}/include + +# make symlinks +#pushd ${PREFIX}/lib/cmake/${PROJECT} &> /dev/null +#ln -sf ../../../${SUBPREFIX}/lib/cmake/${PROJECT}/${PROJECT}-config.cmake ${PROJECT}-config.cmake +#ln -sf ../../../${SUBPREFIX}/lib/cmake/${PROJECT}/EosioTester.cmake EosioTester.cmake +#popd &> /dev/null + +pushd ${PREFIX}/bin &> /dev/null +for f in `ls ${BUILD_DIR}/bin/`; do + bn=$(basename $f) + ln -sf ../${SUBPREFIX}/bin/$bn $bn +done +popd &> /dev/null + +tar -cvzf $NAME ./${PREFIX}/* +rm -r ${PREFIX} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 417eb35f07c..ad77d850285 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -21,7 +21,7 @@ target_link_libraries( plugin_test eosio_testing eosio_chain chainbase eos_utili target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include ${CMAKE_SOURCE_DIR}/plugins/chain_plugin/include ) -add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig) +add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop eosio.msig) # configure_file(${CMAKE_CURRENT_SOURCE_DIR}/core_symbol.py.in ${CMAKE_CURRENT_BINARY_DIR}/core_symbol.py) @@ -50,20 +50,24 @@ add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_ou add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME nodeos_sanity_bnet_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_sanity_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME bnet_nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST bnet_nodeos_run_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME nodeos_run_bnet_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_run_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) if(BUILD_MONGO_DB_PLUGIN) add_test(NAME nodeos_run_test-mongodb COMMAND tests/nodeos_run_test.py --mongodb -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) - set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) + set_property(TEST nodeos_run_test-mongodb PROPERTY LABELS nonparallelizable_tests) endif() -add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST distributed-transactions-bnet-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -79,22 +83,24 @@ set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME bnet_nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST bnet_nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_sanity_bnet_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_sanity_bnet_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) #add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) #set_property(TEST distributed_transactions_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --wallet-port 9901 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME bnet_nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST bnet_nodeos_voting_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_voting_bnet_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) @@ -126,7 +132,7 @@ if(ENABLE_COVERAGE_TESTING) COMMAND ctest -R ${ctest_tests} -E ${ctest_exclude_tests} COMMENT "Capturing lcov counters and generating report" - COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ./tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info + COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ${CMAKE_SOURCE_DIR}/tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info COMMENT "Processing code coverage counters and generating report." COMMAND ${GENHTML_PATH} -o ${Coverage_NAME} ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info diff --git a/tests/Cluster.py b/tests/Cluster.py index 8a9a1cb4b8f..070edd9b3ba 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -4,7 +4,6 @@ import glob import shutil import os -import platform import re import string import signal @@ -12,8 +11,6 @@ import sys import random import json -import socket -import errno from core_symbol import CORE_SYMBOL from testUtils import Utils @@ -32,6 +29,10 @@ class Cluster(object): __BiosHost="localhost" __BiosPort=8788 __LauncherCmdArr=[] + __bootlog="eosio-ignition-wd/bootlog.txt" + __configDir="etc/eosio/" + __dataDir="var/lib/" + __fileDivider="=================================================================" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -64,9 +65,6 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.port=port self.walletHost=walletHost self.walletPort=walletPort - self.walletEndpointArgs="" - if self.walletd: - self.walletEndpointArgs += " --wallet-url http://%s:%d" % (self.walletHost, self.walletPort) self.mongoEndpointArgs="" self.mongoUri="" if self.enableMongo: @@ -84,6 +82,9 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.defproducerbAccount.ownerPrivateKey=defproducerbPrvtKey self.defproducerbAccount.activePrivateKey=defproducerbPrvtKey + self.useBiosBootFile=False + self.filesToCleanup=[] + def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): self.__chainSyncStrategy=self.__chainSyncStrategies.get(chainSyncStrategy) @@ -98,8 +99,8 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontKill=False - , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): + def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, + totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -125,13 +126,16 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if len(self.nodes) > 0: raise RuntimeError("Cluster already running.") + if self.walletMgr is None: + self.walletMgr=WalletMgr(True) + producerFlag="" if totalProducers: assert(isinstance(totalProducers, (str,int))) producerFlag="--producers %s" % (totalProducers) tries = 30 - while not Cluster.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))): + while not Utils.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))): Utils.Print("ERROR: Another process is listening on nodeos default port. wait...") if tries == 0: return False @@ -145,7 +149,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time 990000 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: @@ -170,6 +174,11 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--specific-nodeos") cmdArr.append(arg) + cmdArr.append("--max-block-cpu-usage") + cmdArr.append(str(160000000)) + cmdArr.append("--max-transaction-cpu-usage") + cmdArr.append(str(150000000)) + # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" if topo=="bridge": @@ -310,8 +319,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : self.nodes=nodes if onlyBios: - biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort) - biosNode.setWalletEndpointArgs(self.walletEndpointArgs) + biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort, walletMgr=self.walletMgr) if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return False @@ -330,12 +338,13 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("Bootstrap cluster.") if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, dontKill, onlyBios) + self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False else: - self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, dontKill) + self.useBiosBootFile=True + self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False @@ -366,40 +375,11 @@ def initAccountKeys(account, keys): return True - @staticmethod - def arePortsAvailable(ports): - """Check if specified ports are available for listening on.""" - assert(ports) - assert(isinstance(ports, set)) - - for port in ports: - if Utils.Debug: Utils.Print("Checking if port %d is available." % (port)) - assert(isinstance(port, int)) - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - - try: - s.bind(("127.0.0.1", port)) - except socket.error as e: - if e.errno == errno.EADDRINUSE: - Utils.Print("ERROR: Port %d is already in use" % (port)) - else: - # something else raised the socket.error exception - Utils.Print("ERROR: Unknown exception while trying to listen on port %d" % (port)) - Utils.Print(e) - return False - finally: - s.close() - - return True - - # Initialize the default nodes (at present just the root node) def initializeNodes(self, defproduceraPrvtKey=None, defproducerbPrvtKey=None, onlyBios=False): port=Cluster.__BiosPort if onlyBios else self.port host=Cluster.__BiosHost if onlyBios else self.host - node=Node(host, port, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - node.setWalletEndpointArgs(self.walletEndpointArgs) + node=Node(host, port, walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) if Utils.Debug: Utils.Print("Node: %s", str(node)) node.checkPulse(exitOnError=True) @@ -438,8 +418,7 @@ def initializeNodesFromJson(self, nodesJsonStr): for n in nArr: port=n["port"] host=n["host"] - node=Node(host, port) - node.setWalletEndpointArgs(self.walletEndpointArgs) + node=Node(host, port, walletMgr=self.walletMgr) if Utils.Debug: Utils.Print("Node:", node) node.checkPulse(exitOnError=True) @@ -769,6 +748,14 @@ def nodeNameToId(name): m=re.search(r"node_([\d]+)", name) return int(m.group(1)) + @staticmethod + def nodeExtensionToName(ext): + r"""Convert node extension (bios, 0, 1, etc) to node name. """ + prefix="node_" + if ext == "bios": + return prefix + ext + + return "node_%02d" % (ext) @staticmethod def parseProducerKeys(configFile, nodeName): @@ -807,8 +794,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - node="node_%02d" % (nodeNum) - configFile="etc/eosio/%s/config.ini" % (node) + configFile=Cluster.__configDir + Cluster.nodeExtensionToName(nodeNum) + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -826,20 +812,20 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - node="node_bios" - configFile="etc/eosio/%s/config.ini" % (node) + nodeName=Cluster.nodeExtensionToName("bios") + configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) - producerKeys=Cluster.parseProducerKeys(configFile, node) + producerKeys=Cluster.parseProducerKeys(configFile, nodeName) if producerKeys is None: Utils.Print("ERROR: Failed to parse eosio private keys from cluster config files.") return None for i in range(0, totalNodes): - node="node_%02d" % (i) - configFile="etc/eosio/%s/config.ini" % (node) + nodeName=Cluster.nodeExtensionToName(i) + configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) - keys=Cluster.parseProducerKeys(configFile, node) + keys=Cluster.parseProducerKeys(configFile, nodeName) if keys is not None: producerKeys.update(keys) keyMsg="None" if keys is None else len(keys) @@ -847,11 +833,11 @@ def parseClusterKeys(totalNodes): return producerKeys @staticmethod - def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): + def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") - biosNode=Node(biosHost, biosPort) + biosNode=Node(biosHost, biosPort, walletMgr=walletMgr) if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return None @@ -863,11 +849,10 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): return None p = re.compile('error', re.IGNORECASE) - bootlog="eosio-ignition-wd/bootlog.txt" - with open(bootlog) as bootFile: + with open(Cluster.__bootlog) as bootFile: for line in bootFile: if p.search(line): - Utils.Print("ERROR: bios_boot.sh script resulted in errors. See %s" % (bootlog)) + Utils.Print("ERROR: bios_boot.sh script resulted in errors. See %s" % (Cluster.__bootlog)) Utils.Print(line) return None @@ -877,66 +862,59 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): Utils.Print("ERROR: Failed to parse private keys from cluster config files.") return None - walletMgr=WalletMgr(True) walletMgr.killall() walletMgr.cleanup() if not walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - biosNode.setWalletEndpointArgs(walletMgr.walletEndpointArgs) - try: - ignWallet=walletMgr.create("ignition") - if ignWallet is None: - Utils.Print("ERROR: Failed to create ignition wallet.") - return None + ignWallet=walletMgr.create("ignition") + if ignWallet is None: + Utils.Print("ERROR: Failed to create ignition wallet.") + return None - eosioName="eosio" - eosioKeys=producerKeys[eosioName] - eosioAccount=Account(eosioName) - eosioAccount.ownerPrivateKey=eosioKeys["private"] - eosioAccount.ownerPublicKey=eosioKeys["public"] - eosioAccount.activePrivateKey=eosioKeys["private"] - eosioAccount.activePublicKey=eosioKeys["public"] - producerKeys.pop(eosioName) - - if not walletMgr.importKey(eosioAccount, ignWallet): - Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) - return None + eosioName="eosio" + eosioKeys=producerKeys[eosioName] + eosioAccount=Account(eosioName) + eosioAccount.ownerPrivateKey=eosioKeys["private"] + eosioAccount.ownerPublicKey=eosioKeys["public"] + eosioAccount.activePrivateKey=eosioKeys["private"] + eosioAccount.activePublicKey=eosioKeys["public"] + producerKeys.pop(eosioName) + + if not walletMgr.importKey(eosioAccount, ignWallet): + Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) + return None - initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) - Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) - trans=None - contract="eosio.token" - action="transfer" - for name, keys in producerKeys.items(): - data="{\"from\":\"eosio\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (name, initialFunds, "init transfer") - opts="--permission eosio@active" - if name != "eosio": - trans=biosNode.pushMessage(contract, action, data, opts) - if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to transfer funds from eosio.token to %s." % (name)) - return None + initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) + Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) + trans=None + contract="eosio.token" + action="transfer" + for name, keys in producerKeys.items(): + data="{\"from\":\"eosio\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (name, initialFunds, "init transfer") + opts="--permission eosio@active" + if name != "eosio": + trans=biosNode.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to transfer funds from eosio.token to %s." % (name)) + return None - Node.validateTransaction(trans[1]) + Node.validateTransaction(trans[1]) - Utils.Print("Wait for last transfer transaction to become finalized.") - transId=Node.getTransId(trans[1]) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + Utils.Print("Wait for last transfer transaction to become finalized.") + transId=Node.getTransId(trans[1]) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None - Utils.Print("Cluster bootstrap done.") - finally: - if not dontKill: - walletMgr.killall() - walletMgr.cleanup() + Utils.Print("Cluster bootstrap done.") return biosNode @staticmethod - def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKill=False, onlyBios=False): + def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -944,7 +922,7 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKil if totalProducers is None: totalProducers=totalNodes - biosNode=Node(biosHost, biosPort) + biosNode=Node(biosHost, biosPort, walletMgr=walletMgr) if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return None @@ -958,258 +936,246 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKil Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalProducers+1,len(producerKeys))) return None - walletMgr=WalletMgr(True) walletMgr.killall() walletMgr.cleanup() if not walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - biosNode.setWalletEndpointArgs(walletMgr.walletEndpointArgs) - try: - ignWallet=walletMgr.create("ignition") - - eosioName="eosio" - eosioKeys=producerKeys[eosioName] - eosioAccount=Account(eosioName) - eosioAccount.ownerPrivateKey=eosioKeys["private"] - eosioAccount.ownerPublicKey=eosioKeys["public"] - eosioAccount.activePrivateKey=eosioKeys["private"] - eosioAccount.activePublicKey=eosioKeys["public"] - - if not walletMgr.importKey(eosioAccount, ignWallet): - Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) - return None + ignWallet=walletMgr.create("ignition") + + eosioName="eosio" + eosioKeys=producerKeys[eosioName] + eosioAccount=Account(eosioName) + eosioAccount.ownerPrivateKey=eosioKeys["private"] + eosioAccount.ownerPublicKey=eosioKeys["public"] + eosioAccount.activePrivateKey=eosioKeys["private"] + eosioAccount.activePublicKey=eosioKeys["public"] + + if not walletMgr.importKey(eosioAccount, ignWallet): + Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) + return None - contract="eosio.bios" - contractDir="contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + contract="eosio.bios" + contractDir="contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None + + Node.validateTransaction(trans) + + Utils.Print("Creating accounts: %s " % ", ".join(producerKeys.keys())) + producerKeys.pop(eosioName) + accounts=[] + for name, keys in producerKeys.items(): + initx = None + initx = Account(name) + initx.ownerPrivateKey=keys["private"] + initx.ownerPublicKey=keys["public"] + initx.activePrivateKey=keys["private"] + initx.activePublicKey=keys["public"] + trans=biosNode.createAccount(initx, eosioAccount, 0) if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + Utils.Print("ERROR: Failed to create account %s" % (name)) return None - Node.validateTransaction(trans) + accounts.append(initx) + + transId=Node.getTransId(trans) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None + + Utils.Print("Validating system accounts within bootstrap") + biosNode.validateAccounts(accounts) - Utils.Print("Creating accounts: %s " % ", ".join(producerKeys.keys())) - producerKeys.pop(eosioName) - accounts=[] - for name, keys in producerKeys.items(): - initx = None - initx = Account(name) - initx.ownerPrivateKey=keys["private"] - initx.ownerPublicKey=keys["public"] - initx.activePrivateKey=keys["private"] - initx.activePublicKey=keys["public"] - trans=biosNode.createAccount(initx, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (name)) + if not onlyBios: + if prodCount == -1: + setProdsFile="setprods.json" + if Utils.Debug: Utils.Print("Reading in setprods file %s." % (setProdsFile)) + with open(setProdsFile, "r") as f: + setProdsStr=f.read() + + Utils.Print("Setting producers.") + opts="--permission eosio@active" + myTrans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) + if myTrans is None or not myTrans[0]: + Utils.Print("ERROR: Failed to set producers.") + return None + else: + counts=dict.fromkeys(range(totalNodes), 0) #initialize node prods count to 0 + setProdsStr='{"schedule": [' + firstTime=True + prodNames=[] + for name, keys in producerKeys.items(): + if counts[keys["node"]] >= prodCount: + continue + if firstTime: + firstTime = False + else: + setProdsStr += ',' + + setProdsStr += ' { "producer_name": "%s", "block_signing_key": "%s" }' % (keys["name"], keys["public"]) + prodNames.append(keys["name"]) + counts[keys["node"]] += 1 + + setProdsStr += ' ] }' + if Utils.Debug: Utils.Print("setprods: %s" % (setProdsStr)) + Utils.Print("Setting producers: %s." % (", ".join(prodNames))) + opts="--permission eosio@active" + # pylint: disable=redefined-variable-type + trans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to set producer %s." % (keys["name"])) return None - Node.validateTransaction(trans) - accounts.append(initx) + trans=trans[1] transId=Node.getTransId(trans) if not biosNode.waitForTransInBlock(transId): Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) return None - Utils.Print("Validating system accounts within bootstrap") - biosNode.validateAccounts(accounts) - - if not onlyBios: - if prodCount == -1: - setProdsFile="setprods.json" - if Utils.Debug: Utils.Print("Reading in setprods file %s." % (setProdsFile)) - with open(setProdsFile, "r") as f: - setProdsStr=f.read() - - Utils.Print("Setting producers.") - opts="--permission eosio@active" - myTrans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) - if myTrans is None or not myTrans[0]: - Utils.Print("ERROR: Failed to set producers.") - return None - else: - counts=dict.fromkeys(range(totalNodes), 0) #initialize node prods count to 0 - setProdsStr='{"schedule": [' - firstTime=True - prodNames=[] - for name, keys in producerKeys.items(): - if counts[keys["node"]] >= prodCount: - continue - if firstTime: - firstTime = False - else: - setProdsStr += ',' + # wait for block production handover (essentially a block produced by anyone but eosio). + lam = lambda: biosNode.getInfo(exitOnError=True)["head_block_producer"] != "eosio" + ret=Utils.waitForBool(lam) + if not ret: + Utils.Print("ERROR: Block production handover failed.") + return None - setProdsStr += ' { "producer_name": "%s", "block_signing_key": "%s" }' % (keys["name"], keys["public"]) - prodNames.append(keys["name"]) - counts[keys["node"]] += 1 + eosioTokenAccount=copy.deepcopy(eosioAccount) + eosioTokenAccount.name="eosio.token" + trans=biosNode.createAccount(eosioTokenAccount, eosioAccount, 0) + if trans is None: + Utils.Print("ERROR: Failed to create account %s" % (eosioTokenAccount.name)) + return None - setProdsStr += ' ] }' - if Utils.Debug: Utils.Print("setprods: %s" % (setProdsStr)) - Utils.Print("Setting producers: %s." % (", ".join(prodNames))) - opts="--permission eosio@active" - # pylint: disable=redefined-variable-type - trans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) - if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to set producer %s." % (keys["name"])) - return None + eosioRamAccount=copy.deepcopy(eosioAccount) + eosioRamAccount.name="eosio.ram" + trans=biosNode.createAccount(eosioRamAccount, eosioAccount, 0) + if trans is None: + Utils.Print("ERROR: Failed to create account %s" % (eosioRamAccount.name)) + return None - trans=trans[1] - transId=Node.getTransId(trans) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + eosioRamfeeAccount=copy.deepcopy(eosioAccount) + eosioRamfeeAccount.name="eosio.ramfee" + trans=biosNode.createAccount(eosioRamfeeAccount, eosioAccount, 0) + if trans is None: + Utils.Print("ERROR: Failed to create account %s" % (eosioRamfeeAccount.name)) + return None - # wait for block production handover (essentially a block produced by anyone but eosio). - lam = lambda: biosNode.getInfo(exitOnError=True)["head_block_producer"] != "eosio" - ret=Utils.waitForBool(lam) - if not ret: - Utils.Print("ERROR: Block production handover failed.") - return None + eosioStakeAccount=copy.deepcopy(eosioAccount) + eosioStakeAccount.name="eosio.stake" + trans=biosNode.createAccount(eosioStakeAccount, eosioAccount, 0) + if trans is None: + Utils.Print("ERROR: Failed to create account %s" % (eosioStakeAccount.name)) + return None - eosioTokenAccount=copy.deepcopy(eosioAccount) - eosioTokenAccount.name="eosio.token" - trans=biosNode.createAccount(eosioTokenAccount, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (eosioTokenAccount.name)) - return None + Node.validateTransaction(trans) + transId=Node.getTransId(trans) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None - eosioRamAccount=copy.deepcopy(eosioAccount) - eosioRamAccount.name="eosio.ram" - trans=biosNode.createAccount(eosioRamAccount, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (eosioRamAccount.name)) - return None + contract="eosio.token" + contractDir="contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioTokenAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None - eosioRamfeeAccount=copy.deepcopy(eosioAccount) - eosioRamfeeAccount.name="eosio.ramfee" - trans=biosNode.createAccount(eosioRamfeeAccount, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (eosioRamfeeAccount.name)) - return None + # Create currency0000, followed by issue currency0000 + contract=eosioTokenAccount.name + Utils.Print("push create action to %s contract" % (contract)) + action="create" + data="{\"issuer\":\"%s\",\"maximum_supply\":\"1000000000.0000 %s\",\"can_freeze\":\"0\",\"can_recall\":\"0\",\"can_whitelist\":\"0\"}" % (eosioTokenAccount.name, CORE_SYMBOL) + opts="--permission %s@active" % (contract) + trans=biosNode.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to push create action to eosio contract.") + return None - eosioStakeAccount=copy.deepcopy(eosioAccount) - eosioStakeAccount.name="eosio.stake" - trans=biosNode.createAccount(eosioStakeAccount, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (eosioStakeAccount.name)) - return None + Node.validateTransaction(trans[1]) + transId=Node.getTransId(trans[1]) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None - Node.validateTransaction(trans) - transId=Node.getTransId(trans) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + contract=eosioTokenAccount.name + Utils.Print("push issue action to %s contract" % (contract)) + action="issue" + data="{\"to\":\"%s\",\"quantity\":\"1000000000.0000 %s\",\"memo\":\"initial issue\"}" % (eosioAccount.name, CORE_SYMBOL) + opts="--permission %s@active" % (contract) + trans=biosNode.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to push issue action to eosio contract.") + return None - contract="eosio.token" - contractDir="contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioTokenAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None + Node.validateTransaction(trans[1]) + Utils.Print("Wait for issue action transaction to become finalized.") + transId=Node.getTransId(trans[1]) + # biosNode.waitForTransInBlock(transId) + # guesstimating block finalization timeout. Two production rounds of 12 blocks per node, plus 60 seconds buffer + timeout = .5 * 12 * 2 * len(producerKeys) + 60 + if not biosNode.waitForTransFinalization(transId, timeout=timeout): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a finalized block on server port %d." % (transId, biosNode.port)) + return None - # Create currency0000, followed by issue currency0000 - contract=eosioTokenAccount.name - Utils.Print("push create action to %s contract" % (contract)) - action="create" - data="{\"issuer\":\"%s\",\"maximum_supply\":\"1000000000.0000 %s\",\"can_freeze\":\"0\",\"can_recall\":\"0\",\"can_whitelist\":\"0\"}" % (eosioTokenAccount.name, CORE_SYMBOL) - opts="--permission %s@active" % (contract) - trans=biosNode.pushMessage(contract, action, data, opts) - if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to push create action to eosio contract.") - return None + expectedAmount="1000000000.0000 {0}".format(CORE_SYMBOL) + Utils.Print("Verify eosio issue, Expected: %s" % (expectedAmount)) + actualAmount=biosNode.getAccountEosBalanceStr(eosioAccount.name) + if expectedAmount != actualAmount: + Utils.Print("ERROR: Issue verification failed. Excepted %s, actual: %s" % + (expectedAmount, actualAmount)) + return None - Node.validateTransaction(trans[1]) - transId=Node.getTransId(trans[1]) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + contract="eosio.system" + contractDir="contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None - contract=eosioTokenAccount.name - Utils.Print("push issue action to %s contract" % (contract)) - action="issue" - data="{\"to\":\"%s\",\"quantity\":\"1000000000.0000 %s\",\"memo\":\"initial issue\"}" % (eosioAccount.name, CORE_SYMBOL) - opts="--permission %s@active" % (contract) + Node.validateTransaction(trans) + + initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) + Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) + trans=None + contract=eosioTokenAccount.name + action="transfer" + for name, keys in producerKeys.items(): + data="{\"from\":\"%s\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (eosioAccount.name, name, initialFunds, "init transfer") + opts="--permission %s@active" % (eosioAccount.name) trans=biosNode.pushMessage(contract, action, data, opts) if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to push issue action to eosio contract.") + Utils.Print("ERROR: Failed to transfer funds from %s to %s." % (eosioTokenAccount.name, name)) return None Node.validateTransaction(trans[1]) - Utils.Print("Wait for issue action transaction to become finalized.") - transId=Node.getTransId(trans[1]) - # biosNode.waitForTransInBlock(transId) - # guesstimating block finalization timeout. Two production rounds of 12 blocks per node, plus 60 seconds buffer - timeout = .5 * 12 * 2 * len(producerKeys) + 60 - if not biosNode.waitForTransFinalization(transId, timeout=timeout): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a finalized block on server port %d." % (transId, biosNode.port)) - return None - - expectedAmount="1000000000.0000 {0}".format(CORE_SYMBOL) - Utils.Print("Verify eosio issue, Expected: %s" % (expectedAmount)) - actualAmount=biosNode.getAccountEosBalanceStr(eosioAccount.name) - if expectedAmount != actualAmount: - Utils.Print("ERROR: Issue verification failed. Excepted %s, actual: %s" % - (expectedAmount, actualAmount)) - return None - - contract="eosio.system" - contractDir="contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None - - Node.validateTransaction(trans) - initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) - Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) - trans=None - contract=eosioTokenAccount.name - action="transfer" - for name, keys in producerKeys.items(): - data="{\"from\":\"%s\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (eosioAccount.name, name, initialFunds, "init transfer") - opts="--permission %s@active" % (eosioAccount.name) - trans=biosNode.pushMessage(contract, action, data, opts) - if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to transfer funds from %s to %s." % (eosioTokenAccount.name, name)) - return None - - Node.validateTransaction(trans[1]) - - Utils.Print("Wait for last transfer transaction to become finalized.") - transId=Node.getTransId(trans[1]) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + Utils.Print("Wait for last transfer transaction to become finalized.") + transId=Node.getTransId(trans[1]) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None - Utils.Print("Cluster bootstrap done.") - finally: - if not dontKill: - walletMgr.killall() - walletMgr.cleanup() + Utils.Print("Cluster bootstrap done.") return biosNode @staticmethod def pgrepEosServers(timeout=None): - pgrepOpts="-fl" - # pylint: disable=deprecated-method - if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: - pgrepOpts="-a" - - cmd="pgrep %s %s" % (pgrepOpts, Utils.EosServerName) + cmd=Utils.pgrepCmd(Utils.EosServerName) def myFunc(): psOut=None @@ -1227,11 +1193,8 @@ def myFunc(): @staticmethod def pgrepEosServerPattern(nodeInstance): - if isinstance(nodeInstance, str): - return r"[\n]?(\d+) (.* --data-dir var/lib/node_%s .*)\n" % nodeInstance - else: - nodeInstanceStr="%02d" % nodeInstance - return Cluster.pgrepEosServerPattern(nodeInstanceStr) + dataLocation=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeInstance) + return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) # Populates list of EosInstanceInfo objects, matched to actual running instances def discoverLocalNodes(self, totalNodes, timeout=None): @@ -1253,8 +1216,7 @@ def discoverLocalNodes(self, totalNodes, timeout=None): if m is None: Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) break - instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - instance.setWalletEndpointArgs(self.walletEndpointArgs) + instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) @@ -1304,7 +1266,7 @@ def relaunchEosInstances(self): @staticmethod def dumpErrorDetailImpl(fileName): - Utils.Print("=================================================================") + Utils.Print(Cluster.__fileDivider) Utils.Print("Contents of %s:" % (fileName)) if os.path.exists(fileName): with open(fileName, "r") as f: @@ -1313,16 +1275,22 @@ def dumpErrorDetailImpl(fileName): Utils.Print("File %s not found." % (fileName)) def dumpErrorDetails(self): - fileName="etc/eosio/node_bios/config.ini" + fileName=Cluster.__configDir + Cluster.nodeExtensionToName("bios") + "/config.ini" Cluster.dumpErrorDetailImpl(fileName) - fileName="var/lib/node_bios/stderr.txt" + fileName=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + "/stderr.txt" Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - fileName="etc/eosio/node_%02d/config.ini" % (i) + configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + "/" + fileName=configLocation + "config.ini" Cluster.dumpErrorDetailImpl(fileName) - fileName="var/lib/node_%02d/stderr.txt" % (i) + fileName=configLocation + "genesis.json" Cluster.dumpErrorDetailImpl(fileName) + fileName=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + "/stderr.txt" + Cluster.dumpErrorDetailImpl(fileName) + + if self.useBiosBootFile: + Cluster.dumpErrorDetailImpl(Cluster.__bootlog) def killall(self, silent=True, allInstances=False): """Kill cluster nodeos instances. allInstances will kill all nodeos instances running on the system.""" @@ -1390,11 +1358,14 @@ def waitForNextBlock(self, timeout=None): return node.waitForNextBlock(timeout) def cleanup(self): - for f in glob.glob("var/lib/node_*"): + for f in glob.glob(Cluster.__dataDir + "node_*"): shutil.rmtree(f) - for f in glob.glob("etc/eosio/node_*"): + for f in glob.glob(Cluster.__configDir + "node_*"): shutil.rmtree(f) + for f in self.filesToCleanup: + os.remove(f) + if self.enableMongo: cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs) subcommand="db.dropDatabase()" @@ -1444,3 +1415,134 @@ def reportStatus(self): node.reportStatus() except: Utils.Print("No reportStatus") + + def printBlockLogIfNeeded(self): + printBlockLog=False + if hasattr(self, "nodes") and self.nodes is not None: + for node in self.nodes: + if node.missingTransaction: + printBlockLog=True + break + + if hasattr(self, "biosNode") and self.biosNode is not None and self.biosNode.missingTransaction: + printBlockLog=True + + if not printBlockLog: + return + + self.printBlockLog() + + def getBlockLog(self, nodeExtension): + blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeExtension) + "/blocks/" + return Utils.getBlockLog(blockLogDir, exitOnError=False) + + def printBlockLog(self): + blockLogBios=self.getBlockLog("bios") + Utils.Print(Cluster.__fileDivider) + Utils.Print("Block log from %s:\n%s" % (blockLogDir, json.dumps(blockLogBios, indent=1))) + + if not hasattr(self, "nodes"): + return + + numNodes=len(self.nodes) + for i in range(numNodes): + node=self.nodes[i] + blockLog=self.getBlockLog(i) + Utils.Print(Cluster.__fileDivider) + Utils.Print("Block log from %s:\n%s" % (blockLogDir, json.dumps(blockLog, indent=1))) + + + def compareBlockLogs(self): + blockLogs=[] + blockNameExtensions=[] + lowestMaxes=[] + + def back(arr): + return arr[len(arr)-1] + + def sortLowest(maxes,max): + for i in range(len(maxes)): + if max < maxes[i]: + maxes.insert(i, max) + return + + maxes.append(max) + + i="bios" + blockLog=self.getBlockLog(i) + if blockLog is None: + Utils.errorExit("Node %s does not have a block log, all nodes must have a block log" % (i)) + blockLogs.append(blockLog) + blockNameExtensions.append(i) + sortLowest(lowestMaxes,back(blockLog)["block_num"]) + + if not hasattr(self, "nodes"): + Utils.errorExit("There are not multiple nodes to compare, this method assumes that two nodes or more are expected") + + numNodes=len(self.nodes) + for i in range(numNodes): + node=self.nodes[i] + blockLog=self.getBlockLog(i) + if blockLog is None: + Utils.errorExit("Node %s does not have a block log, all nodes must have a block log" % (i)) + blockLogs.append(blockLog) + blockNameExtensions.append(i) + sortLowest(lowestMaxes,back(blockLog)["block_num"]) + + numNodes=len(blockLogs) + + if numNodes < 2: + Utils.errorExit("There are not multiple nodes to compare, this method assumes that two nodes or more are expected") + + if lowestMaxes[0] < 2: + Utils.errorExit("One or more nodes only has %d blocks, if that is a valid scenario, then compareBlockLogs shouldn't be called" % (lowestMaxes[0])) + + # create a list of block logs and name extensions for the given common block number span + def identifyCommon(blockLogs, blockNameExtensions, first, last): + commonBlockLogs=[] + commonBlockNameExtensions=[] + for i in range(numNodes): + if (len(blockLogs[i]) >= last): + commonBlockLogs.append(blockLogs[i][first:last]) + commonBlockNameExtensions.append(blockNameExtensions[i]) + return (commonBlockLogs,commonBlockNameExtensions) + + # compare the contents of the blockLogs for the given common block number span + def compareCommon(blockLogs, blockNameExtensions, first, last): + if Utils.Debug: Utils.Print("comparing block num %s through %s" % (first, last)) + commonBlockLogs=None + commonBlockNameExtensions=None + (commonBlockLogs,commonBlockNameExtensions) = identifyCommon(blockLogs, blockNameExtensions, first, last) + numBlockLogs=len(commonBlockLogs) + if numBlockLogs < 2: + return False + + ret=None + for i in range(1,numBlockLogs): + context="" % (commonBlockNameExtensions[0], commonBlockNameExtensions[i]) + if Utils.Debug: Utils.Print("context=%s" % (context)) + ret=Utils.compare(commonBlockLogs[0], commonBlockLogs[i], context) + if ret is not None: + blockLogDir1=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" + blockLogDir2=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" + Utils.Print(Cluster.__fileDivider) + Utils.Print("Block log from %s:\n%s" % (blockLogDir1, json.dumps(commonBlockLogs[0], indent=1))) + Utils.Print(Cluster.__fileDivider) + Utils.Print("Block log from %s:\n%s" % (blockLogDir2, json.dumps(commonBlockLogs[i], indent=1))) + Utils.Print(Cluster.__fileDivider) + Utils.errorExit("Block logs do not match, difference description -> %s" % (ret)) + + return True + + def stripValues(lowestMaxes,greaterThan): + newLowest=[] + for low in lowestMaxes: + if low > greaterThan: + newLowest.append(low) + return newLowest + + first=0 + while len(lowestMaxes)>0 and compareCommon(blockLogs, blockNameExtensions, first, lowestMaxes[0]): + first=lowestMaxes[0]+1 + lowestMaxes=stripValues(lowestMaxes,lowestMaxes[0]) + diff --git a/tests/Node.py b/tests/Node.py index 2795ba63c5e..82d6581bb02 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -31,7 +31,7 @@ class Node(object): # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-arguments - def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="EOStest"): + def __init__(self, host, port, pid=None, cmd=None, walletMgr=None, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="EOStest"): self.host=host self.port=port self.pid=pid @@ -44,16 +44,19 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.mongoDb=mongoDb self.endpointHttp="http://%s:%d" % (self.host, self.port) self.endpointArgs="--url %s" % (self.endpointHttp) - self.miscEosClientArgs="--no-auto-keosd" self.mongoEndpointArgs="" self.infoValid=None self.lastRetrievedHeadBlockNum=None self.lastRetrievedLIB=None + self.transCache={} + self.walletMgr=walletMgr + self.missingTransaction=False if self.enableMongo: self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) def eosClientArgs(self): - return self.endpointArgs + " " + self.miscEosClientArgs + walletArgs=" " + self.walletMgr.getWalletEndpointArgs() if self.walletMgr is not None else "" + return self.endpointArgs + walletArgs + " " + Utils.MiscEosClientArgs def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) @@ -64,11 +67,80 @@ def validateTransaction(trans): assert trans assert isinstance(trans, dict), print("Input type is %s" % type(trans)) - def printTrans(trans): - Utils.Print("ERROR: Failure in transaction validation.") + executed="executed" + def printTrans(trans, status): + Utils.Print("ERROR: Valid transaction should be \"%s\" but it was \"%s\"." % (executed, status)) Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1))) - assert trans["processed"]["receipt"]["status"] == "executed", printTrans(trans) + transStatus=Node.getTransStatus(trans) + assert transStatus == executed, printTrans(trans, transStatus) + + @staticmethod + def __printTransStructureError(trans, context): + Utils.Print("ERROR: Failure in expected transaction structure. Missing trans%s." % (context)) + Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1))) + + class Context: + def __init__(self, obj, desc): + self.obj=obj + self.sections=[obj] + self.keyContext=[] + self.desc=desc + + def __json(self): + return "%s=\n%s" % (self.desc, json.dumps(self.obj, indent=1)) + + def __keyContext(self): + msg="" + for key in self.keyContext: + if msg=="": + msg="[" + else: + msg+="][" + msg+=key + if msg!="": + msg+="]" + return msg + + def __contextDesc(self): + return "%s%s" % (self.desc, self.__keyContext()) + + def add(self, newKey): + assert isinstance(newKey, str), print("ERROR: Trying to use %s as a key" % (newKey)) + subSection=self.sections[-1] + assert isinstance(subSection, dict), print("ERROR: Calling \"add\" method when context is not a dictionary. %s in %s" % (self.__contextDesc(), self.__json())) + assert newKey in subSection, print("ERROR: %s%s does not contain key \"%s\". %s" % (self.__contextDesc(), key, self.__json())) + current=subSection[newKey] + self.sections.append(current) + self.keyContext.append(newKey) + return current + + def index(self, i): + assert isinstance(i, int), print("ERROR: Trying to use \"%s\" as a list index" % (i)) + cur=self.getCurrent() + assert isinstance(cur, list), print("ERROR: Calling \"index\" method when context is not a list. %s in %s" % (self.__contextDesc(), self.__json())) + listLen=len(cur) + assert i < listLen, print("ERROR: Index %s is beyond the size of the current list (%s). %s in %s" % (i, listLen, self.__contextDesc(), self.__json())) + return self.sections.append(cur[i]) + + def getCurrent(self): + return self.sections[-1] + + @staticmethod + def getTransStatus(trans): + cntxt=Node.Context(trans, "trans") + cntxt.add("processed") + cntxt.add("receipt") + return cntxt.add("status") + + @staticmethod + def getTransBlockNum(trans): + cntxt=Node.Context(trans, "trans") + cntxt.add("processed") + cntxt.add("action_traces") + cntxt.index(0) + return cntxt.add("block_num") + @staticmethod def stdinAndCheckOutput(cmd, subcommand): @@ -117,7 +189,7 @@ def runMongoCmdReturnJson(cmd, subcommand, trace=False, exitOnError=False): outStr=Node.byteArrToStr(outs) if not outStr: return None - extJStr=Utils.filterJsonObject(outStr) + extJStr=Utils.filterJsonObjectOrArray(outStr) if not extJStr: return None jStr=Node.normalizeJsonObject(extJStr) @@ -140,17 +212,22 @@ def getTransId(trans): assert trans assert isinstance(trans, dict), print("Input type is %s" % type(trans)) - #Utils.Print("%s" % trans) + assert "transaction_id" in trans, print("trans does not contain key %s. trans={%s}" % ("transaction_id", json.dumps(trans, indent=2, sort_keys=True))) transId=trans["transaction_id"] return transId + @staticmethod + def isTrans(obj): + """Identify if this is a transaction dictionary.""" + if obj is None or not isinstance(obj, dict): + return False + + return True if "transaction_id" in obj else False + @staticmethod def byteArrToStr(arr): return arr.decode("utf-8") - def setWalletEndpointArgs(self, args): - self.endpointArgs="--url http://%s:%d %s" % (self.host, self.port, args) - def validateAccounts(self, accounts): assert(accounts) assert(isinstance(accounts, list)) @@ -248,45 +325,11 @@ def isBlockFinalized(self, blockNum): """Is blockNum finalized""" return self.isBlockPresent(blockNum, blockType=BlockType.lib) - class BlockWalker: - def __init__(self, node, trans, startBlockNum=None, endBlockNum=None): - self.trans=trans - self.node=node - self.startBlockNum=startBlockNum - self.endBlockNum=endBlockNum - - def walkBlocks(self): - start=None - end=None - blockNum=self.trans["processed"]["action_traces"][0]["block_num"] - # it should be blockNum or later, but just in case the block leading up have any clues... - if self.startBlockNum is not None: - start=self.startBlockNum - else: - start=blockNum-5 - if self.endBlockNum is not None: - end=self.endBlockNum - else: - info=self.node.getInfo() - end=info["head_block_num"] - msg="Original transaction=\n%s\nExpected block_num=%s\n" % (json.dumps(trans, indent=2, sort_keys=True), blockNum) - for blockNum in range(start, end+1): - block=self.node.getBlock(blockNum) - msg+=json.dumps(block, indent=2, sort_keys=True)+"\n" - # pylint: disable=too-many-branches - def getTransaction(self, transOrTransId, silentErrors=False, exitOnError=False, delayedRetry=True): - transId=None - trans=None - assert(isinstance(transOrTransId, (str,dict))) - if isinstance(transOrTransId, str): - transId=transOrTransId - else: - trans=transOrTransId - transId=Node.getTransId(trans) + def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayedRetry=True): + assert(isinstance(transId, str)) exitOnErrorForDelayed=not delayedRetry and exitOnError timeout=3 - blockWalker=None if not self.enableMongo: cmdDesc="get transaction" cmd="%s %s" % (cmdDesc, transId) @@ -295,12 +338,10 @@ def getTransaction(self, transOrTransId, silentErrors=False, exitOnError=False, trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed, exitMsg=msg) if trans is not None or not delayedRetry: return trans - if blockWalker is None: - blockWalker=Node.BlockWalker(self, trans) if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId)) time.sleep(timeout) - msg+="\nBlock printout -->>\n%s" % blockWalker.walkBlocks(); + self.missingTransaction=True # either it is there or the transaction has timed out return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) else: @@ -367,16 +408,11 @@ def isTransInBlock(self, transId, blockId): return False - def getBlockIdByTransId(self, transOrTransId, delayedRetry=True): - """Given a transaction (dictionary) or transaction Id (string), will return the actual block id (int) containing the transaction""" - assert(transOrTransId) - transId=None - assert(isinstance(transOrTransId, (str,dict))) - if isinstance(transOrTransId, str): - transId=transOrTransId - else: - transId=Node.getTransId(transOrTransId) - trans=self.getTransaction(transOrTransId, exitOnError=True, delayedRetry=delayedRetry) + def getBlockIdByTransId(self, transId, delayedRetry=True): + """Given a transaction Id (string), will return the actual block id (int) containing the transaction""" + assert(transId) + assert(isinstance(transId, str)) + trans=self.getTransaction(transId, exitOnError=True, delayedRetry=delayedRetry) refBlockNum=None key="" @@ -466,6 +502,7 @@ def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, w account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + self.trackCmdTransaction(trans) transId=Node.getTransId(trans) if stakedDeposit > 0: @@ -483,11 +520,13 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + self.trackCmdTransaction(trans) transId=Node.getTransId(trans) if stakedDeposit > 0: self.waitForTransInBlock(transId) # seems like account creation needs to be finlized before transfer can happen trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init") + self.trackCmdTransaction(trans) transId=Node.getTransId(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -638,6 +677,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False trans=None try: trans=Utils.runCmdArrReturnJson(cmdArr) + self.trackCmdTransaction(trans) except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") Utils.Print("ERROR: Exception during funds transfer. %s" % (msg)) @@ -819,6 +859,7 @@ def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransB trans=None try: trans=Utils.runCmdReturnJson(cmd, trace=False) + self.trackCmdTransaction(trans) except subprocess.CalledProcessError as ex: if not shouldFail: msg=ex.output.decode("utf-8") @@ -876,6 +917,7 @@ def pushMessage(self, account, action, data, opts, silentErrors=False): if Utils.Debug: Utils.Print("cmd: %s" % (cmdArr)) try: trans=Utils.runCmdArrReturnJson(cmdArr) + self.trackCmdTransaction(trans, ignoreNonTrans=True) return (True, trans) except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") @@ -887,6 +929,7 @@ def setPermission(self, account, code, pType, requirement, waitForTransBlock=Fal cmdDesc="set action permission" cmd="%s -j %s %s %s %s" % (cmdDesc, account, code, pType, requirement) trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) + self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -895,11 +938,12 @@ def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, tran toAccount=fromAccount cmdDesc="system delegatebw" - transferStr="--transfer" if transferTo else "" + transferStr="--transfer" if transferTo else "" cmd="%s -j %s %s \"%s %s\" \"%s %s\" %s" % ( cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -909,6 +953,7 @@ def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnEr cmdDesc, producer.name, producer.activePublicKey, url, location) msg="producer=%s" % (producer.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -918,6 +963,7 @@ def vote(self, account, producers, waitForTransBlock=False, exitOnError=False): cmdDesc, account.name, " ".join(producers)) msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -949,7 +995,7 @@ def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, ex return None if exitOnError and trans is None: - Utils.cmdError("could not \"%s\" - %s" % (cmdDesc,exitMsg)) + Utils.cmdError("could not \"%s\". %s" % (cmdDesc,exitMsg)) errorExit("Failed to \"%s\"" % (cmdDesc)) return trans @@ -963,12 +1009,12 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ (self.endpointHttp, producer, whereInSequence, basedOnLib) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - trans=None + rtn=None try: if returnType==ReturnType.json: - trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) + rtn=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) elif returnType==ReturnType.raw: - trans=Utils.runCmdReturnStr(cmd) + rtn=Utils.runCmdReturnStr(cmd) else: unhandledEnumType(returnType) except subprocess.CalledProcessError as ex: @@ -986,11 +1032,11 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head exitMsg=": " + exitMsg else: exitMsg="" - if exitOnError and trans is None: + if exitOnError and rtn is None: Utils.cmdError("could not \"%s\" - %s" % (cmd,exitMsg)) Utils.errorExit("Failed to \"%s\"" % (cmd)) - return trans + return rtn def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False): if not waitForTransBlock: @@ -1166,7 +1212,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim cmdArr=[] myCmd=self.cmd - toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} + toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} if not newChain: skip=False swapValue=None @@ -1227,6 +1273,26 @@ def isNodeAlive(): self.killed=False return True + def trackCmdTransaction(self, trans, ignoreNonTrans=False): + if trans is None: + if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans)) + return + + if ignoreNonTrans and not Node.isTrans(trans): + if Utils.Debug: Utils.Print(" cmd returned a non-transaction") + return + + transId=Node.getTransId(trans) + if Utils.Debug: + status=Node.getTransStatus(trans) + blockNum=Node.getTransBlockNum(trans) + if transId in self.transCache.keys(): + replaceMsg="replacing previous trans=\n%s" % json.dumps(self.transCache[transId], indent=2, sort_keys=True) + else: + replaceMsg="" + Utils.Print(" cmd returned transaction id: %s, status: %s, (possible) block num: %s %s" % (transId, status, blockNum, replaceMsg)) + self.transCache[transId]=trans + def reportStatus(self): Utils.Print("Node State:") Utils.Print(" cmd : %s" % (self.cmd)) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 6e00645e9dc..1650597dee5 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -26,6 +26,7 @@ def add(self, flag, type, help, default, choices=None): class TestHelper(object): LOCAL_HOST="localhost" DEFAULT_PORT=8888 + DEFAULT_WALLET_PORT=9899 @staticmethod # pylint: disable=too-many-branches @@ -70,6 +71,12 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): if "--port" in includeArgs: parser.add_argument("-p", "--port", type=int, help="%s host port" % Utils.EosServerName, default=TestHelper.DEFAULT_PORT) + if "--wallet-host" in includeArgs: + parser.add_argument("--wallet-host", type=str, help="%s host" % Utils.EosWalletName, + default=TestHelper.LOCAL_HOST) + if "--wallet-port" in includeArgs: + parser.add_argument("--wallet-port", type=int, help="%s port" % Utils.EosWalletName, + default=TestHelper.DEFAULT_WALLET_PORT) if "--prod-count" in includeArgs: parser.add_argument("-c", "--prod-count", type=int, help="Per node producer count", default=1) if "--defproducera_prvt_key" in includeArgs: @@ -141,6 +148,7 @@ def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, kil cluster.dumpErrorDetails() if walletMgr: walletMgr.dumpErrorDetails() + cluster.printBlockLogIfNeeded() Utils.Print("== Errors see above ==") if len(Utils.CheckOutputDeque)>0: Utils.Print("== cout/cerr pairs from last %d calls to Utils. ==" % len(Utils.CheckOutputDeque)) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index c46dd78d6fd..8b7e4957277 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -12,8 +12,10 @@ Wallet=namedtuple("Wallet", "name password host port") # pylint: disable=too-many-instance-attributes class WalletMgr(object): - __walletLogFile="test_keosd_output.log" + __walletLogOutFile="test_keosd_out.log" + __walletLogErrFile="test_keosd_err.log" __walletDataDir="test_wallet_0" + __MaxPort=9999 # pylint: disable=too-many-arguments # walletd [True|False] True=Launch wallet(keosd) process; False=Manage launch process externally. @@ -25,26 +27,76 @@ def __init__(self, walletd, nodeosPort=8888, nodeosHost="localhost", port=9899, self.host=host self.wallets={} self.__walletPid=None - self.endpointArgs="--url http://%s:%d" % (self.nodeosHost, self.nodeosPort) - self.walletEndpointArgs="" - if self.walletd: - self.walletEndpointArgs += " --wallet-url http://%s:%d" % (self.host, self.port) - self.endpointArgs += self.walletEndpointArgs + + def getWalletEndpointArgs(self): + if not self.walletd or not self.isLaunched(): + return "" + + return " --wallet-url http://%s:%d" % (self.host, self.port) + + def getArgs(self): + return " --url http://%s:%d%s %s" % (self.nodeosHost, self.nodeosPort, self.getWalletEndpointArgs(), Utils.MiscEosClientArgs) + + def isLaunched(self): + return self.__walletPid is not None + + def isLocal(self): + return self.host=="localhost" or self.host=="127.0.0.1" + + def findAvailablePort(self): + for i in range(WalletMgr.__MaxPort): + port=self.port+i + if port > WalletMgr.__MaxPort: + port-=WalletMgr.__MaxPort + if Utils.arePortsAvailable(port): + return port + if Utils.Debug: Utils.Print("Port %d not available for %s" % (port, Utils.EosWalletPath)) + + Utils.errorExit("Failed to find free port to use for %s" % (Utils.EosWalletPath)) def launch(self): if not self.walletd: Utils.Print("ERROR: Wallet Manager wasn't configured to launch keosd") return False + if self.isLaunched(): + return True + + if self.isLocal(): + self.port=self.findAvailablePort() + + pgrepCmd=Utils.pgrepCmd(Utils.EosWalletName) + if Utils.Debug: + portTaken=False + if self.isLocal(): + if not Utils.arePortsAvailable(self.port): + portTaken=True + psOut=Utils.checkOutput(pgrepCmd.split(), ignoreError=True) + if psOut or portTaken: + statusMsg="" + if psOut: + statusMsg+=" %s - {%s}." % (pgrepCmd, psOut) + if portTaken: + statusMsg+=" port %d is NOT available." % (self.port) + Utils.Print("Launching %s, note similar processes running. %s" % (Utils.EosWalletName, statusMsg)) + cmd="%s --data-dir %s --config-dir %s --http-server-address=%s:%d --verbose-http-errors" % ( Utils.EosWalletPath, WalletMgr.__walletDataDir, WalletMgr.__walletDataDir, self.host, self.port) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - with open(WalletMgr.__walletLogFile, 'w') as sout, open(WalletMgr.__walletLogFile, 'w') as serr: + with open(WalletMgr.__walletLogOutFile, 'w') as sout, open(WalletMgr.__walletLogErrFile, 'w') as serr: popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) self.__walletPid=popen.pid # Give keosd time to warm up time.sleep(2) + + try: + if Utils.Debug: Utils.Print("Checking if %s launched. %s" % (Utils.EosWalletName, pgrepCmd)) + psOut=Utils.checkOutput(pgrepCmd.split()) + if Utils.Debug: Utils.Print("Launched %s. {%s}" % (Utils.EosWalletName, psOut)) + except subprocess.CalledProcessError as ex: + Utils.errorExit("Failed to launch the wallet manager") + return True def create(self, name, accounts=None, exitOnError=True): @@ -54,7 +106,7 @@ def create(self, name, accounts=None, exitOnError=True): return wallet p = re.compile(r'\n\"(\w+)\"\n', re.MULTILINE) cmdDesc="wallet create" - cmd="%s %s %s --name %s --to-console" % (Utils.EosClientPath, self.endpointArgs, cmdDesc, name) + cmd="%s %s %s --name %s --to-console" % (Utils.EosClientPath, self.getArgs(), cmdDesc, name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=None maxRetryCount=4 @@ -67,7 +119,15 @@ def create(self, name, accounts=None, exitOnError=True): retryCount+=1 if retryCount"+prod0 if index is None: - return + if expectDivergence: + errorInDivergence=True + break + return None bpsStr0=None bpsStr2=None @@ -84,13 +87,17 @@ def analyzeBPs(bps0, bps1, expectDivergence): bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff if errorInDivergence: - msg="Failed analyzing block producers - " - if expectDivergence: - msg+="nodes indicate different block producers for the same blocks, but did not expect them to diverge." - else: - msg+="did not expect nodes to indicate different block producers for the same blocks." - msg+="\n Matching Blocks= %s \n Diverging branch node0= %s \n Diverging branch node1= %s" % (bpsStr,bpsStr0,bpsStr1) - Utils.errorExit(msg) + break + + if errorInDivergence: + msg="Failed analyzing block producers - " + if expectDivergence: + msg+="nodes indicate different block producers for the same blocks, but did not expect them to diverge." + else: + msg+="did not expect nodes to indicate different block producers for the same blocks." + msg+="\n Matching Blocks= %s \n Diverging branch node0= %s \n Diverging branch node1= %s" % (bpsStr,bpsStr0,bpsStr1) + Utils.errorExit(msg) + return firstDivergence def getMinHeadAndLib(prodNodes): @@ -102,7 +109,8 @@ def getMinHeadAndLib(prodNodes): -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"}) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "--p2p-plugin","--wallet-port"}) Utils.Debug=args.v totalProducerNodes=2 totalNonProducerNodes=1 @@ -116,18 +124,20 @@ def getMinHeadAndLib(prodNodes): prodCount=args.prod_count killAll=args.clean_run p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port -walletMgr=WalletMgr(True) +walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill killWallet=not dontKill -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") @@ -141,7 +151,7 @@ def getMinHeadAndLib(prodNodes): # "bridge" shape connects defprocera through defproducerk (in node0) to each other and defproducerl through defproduceru (in node01) # and the only connection between those 2 groups is through the bridge node - if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, topo="bridge", pnodes=totalProducerNodes, + if cluster.launch(prodCount=prodCount, onlyBios=False, topo="bridge", pnodes=totalProducerNodes, totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") @@ -164,12 +174,6 @@ def getMinHeadAndLib(prodNodes): testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - Utils.cmdError("%s" % (WalletdName)) - Utils.errorExit("Failed to stand up eos walletd.") - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) for _, account in cluster.defProducerAccounts.items(): @@ -208,11 +212,11 @@ def getMinHeadAndLib(prodNodes): # create accounts via eosio as otherwise a bid is needed for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) - trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) + trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) - node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") - trans=node.delegatebw(account, 20000000.0000, 20000000.0000, exitOnError=True) + node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) + trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True) # *** vote using accounts *** @@ -222,7 +226,7 @@ def getMinHeadAndLib(prodNodes): index=0 for account in accounts: Print("Vote for producers=%s" % (producers)) - trans=prodNodes[index % len(prodNodes)].vote(account, producers) + trans=prodNodes[index % len(prodNodes)].vote(account, producers, waitForTransBlock=True) index+=1 diff --git a/tests/nodeos_run_remote_test.py b/tests/nodeos_run_remote_test.py index 5b3459e780c..6c918f71c64 100755 --- a/tests/nodeos_run_remote_test.py +++ b/tests/nodeos_run_remote_test.py @@ -33,7 +33,7 @@ actualTest="tests/nodeos_run_test.py" testSuccessful=False -cluster=Cluster() +cluster=Cluster(walletd=True) try: Print("BEGIN") cluster.killall(allInstances=killAll) @@ -42,7 +42,8 @@ Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, prodCount, topo, delay, onlyBios=onlyBios, dontKill=dontKill) is False: + + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, prodCount=prodCount, topo=topo, delay=delay, onlyBios=onlyBios, dontKill=dontKill) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") @@ -54,7 +55,7 @@ defproduceraPrvtKey=producerKeys["defproducera"]["private"] defproducerbPrvtKey=producerKeys["defproducerb"]["private"] - cmd="%s --dont-launch --defproducera_prvt_key %s --defproducerb_prvt_key %s %s %s %s" % (actualTest, defproduceraPrvtKey, defproducerbPrvtKey, "-v" if debug else "", "--dont-kill" if dontKill else "", "--only-bios" if onlyBios else "") + cmd="%s --dont-launch --defproducera_prvt_key %s --defproducerb_prvt_key %s %s %s %s" % (actualTest, defproduceraPrvtKey, defproducerbPrvtKey, "-v" if debug else "", "--leave-running" if dontKill else "", "--only-bios" if onlyBios else "") Print("Starting up %s test: %s" % ("nodeos", actualTest)) Print("cmd: %s\n" % (cmd)) if 0 != subprocess.call(cmd, shell=True): diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index f28f62a730a..37b46613aaf 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -22,7 +22,7 @@ args = TestHelper.parse_args({"--host","--port","--prod-count","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb" ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" - ,"--sanity-test","--p2p-plugin"}) + ,"--sanity-test","--p2p-plugin","--wallet-port"}) server=args.host port=args.port debug=args.v @@ -38,42 +38,49 @@ killAll=args.clean_run sanityTest=args.sanity_test p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port Utils.Debug=debug localTest=True if server == TestHelper.LOCAL_HOST else False cluster=Cluster(walletd=True, enableMongo=enableMongo, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) -walletMgr=WalletMgr(True) +walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill killWallet=not dontKill -dontBootstrap=sanityTest +dontBootstrap=sanityTest # intent is to limit the scope of the sanity test to just verifying that nodes can be started -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding Utils.setIrreversibleTimeout(timeout) try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) Print("SERVER: %s" % (server)) Print("PORT: %d" % (port)) if enableMongo and not cluster.isMongodDbRunning(): errorExit("MongoDb doesn't seem to be running.") - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if localTest and not dontLaunch: cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontKill=dontKill, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin) is False: + if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: + Print("Collecting cluster info.") cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) killEosInstances=False + Print("Stand up %s" % (WalletdName)) + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + print("Stand up walletd") + if walletMgr.launch() is False: + cmdError("%s" % (WalletdName)) + errorExit("Failed to stand up eos walletd.") if sanityTest: testSuccessful=True @@ -105,16 +112,12 @@ exchangeAccount.ownerPrivateKey=PRV_KEY2 exchangeAccount.ownerPublicKey=PUB_KEY2 - Print("Stand up %s" % (WalletdName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") - testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,cluster.defproduceraAccount,cluster.defproducerbAccount]) + walletAccounts=[cluster.defproduceraAccount,cluster.defproducerbAccount] + if not dontLaunch: + walletAccounts.append(cluster.eosioAccount) + testWallet=walletMgr.create(testWalletName, walletAccounts) Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) @@ -201,15 +204,14 @@ Print("Validating accounts before user accounts creation") cluster.validateAccounts(None) - # create accounts via eosio as otherwise a bid is needed - Print("Create new account %s via %s" % (testeraAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(testeraAccount, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) + Print("Create new account %s via %s" % (testeraAccount.name, cluster.defproduceraAccount.name)) + transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) - Print("Create new account %s via %s" % (currencyAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000, exitOnError=True) + Print("Create new account %s via %s" % (currencyAccount.name, cluster.defproduceraAccount.name)) + transId=node.createInitializeAccount(currencyAccount, cluster.defproduceraAccount, buyRAM=200000, stakedDeposit=5000, exitOnError=True) - Print("Create new account %s via %s" % (exchangeAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, buyRAM=1000000, waitForTransBlock=True, exitOnError=True) + Print("Create new account %s via %s" % (exchangeAccount.name, cluster.defproduceraAccount.name)) + transId=node.createInitializeAccount(exchangeAccount, cluster.defproduceraAccount, buyRAM=200000, waitForTransBlock=True, exitOnError=True) Print("Validating accounts after user accounts creation") accounts=[testeraAccount, currencyAccount, exchangeAccount] @@ -282,7 +284,7 @@ node.waitForTransInBlock(transId) - transaction=node.getTransaction(trans, exitOnError=True, delayedRetry=False) + transaction=node.getTransaction(transId, exitOnError=True, delayedRetry=False) typeVal=None amountVal=None @@ -467,7 +469,7 @@ raise Print("Test for block decoded packed transaction (issue 2932)") - blockId=node.getBlockIdByTransId(trans[1]) + blockId=node.getBlockIdByTransId(transId) assert(blockId) block=node.getBlock(blockId, exitOnError=True) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index aae717a8cea..040be402ca3 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -55,7 +55,7 @@ def setName(self, num): # --keep-logs ############################################################### -args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}) +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--wallet-port"}) Utils.Debug=args.v totalNodes=4 cluster=Cluster(walletd=True) @@ -63,17 +63,19 @@ def setName(self, num): keepLogs=args.keep_logs dontKill=args.leave_running killAll=args.clean_run +walletPort=args.wallet_port -walletMgr=WalletMgr(True) +walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill killWallet=not dontKill -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) cluster.killall(allInstances=killAll) cluster.cleanup() @@ -83,7 +85,7 @@ def setName(self, num): maxRAMFlag="--chain-state-db-size-mb" maxRAMValue=1010 extraNodeosArgs=" %s %d %s %d " % (minRAMFlag, minRAMValue, maxRAMFlag, maxRAMValue) - if cluster.launch(onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs, useBiosBootFile=False) is False: + if cluster.launch(onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs, useBiosBootFile=False) is False: Utils.cmdError("launcher") errorExit("Failed to stand up eos cluster.") @@ -97,12 +99,6 @@ def setName(self, num): testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - Utils.cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount]) for _, account in cluster.defProducerAccounts.items(): @@ -127,7 +123,7 @@ def setName(self, num): transferAmount="70000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) nodes[0].transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") - trans=nodes[0].delegatebw(account, 1000000.0000, 68000000.0000, exitOnError=True) + trans=nodes[0].delegatebw(account, 1000000.0000, 68000000.0000, waitForTransBlock=True, exitOnError=True) contractAccount=cluster.createAccountKeys(1)[0] contractAccount.name="contracttest" @@ -137,7 +133,7 @@ def setName(self, num): transferAmount="90000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, contractAccount.name)) nodes[0].transferFunds(cluster.eosioAccount, contractAccount, transferAmount, "test transfer") - trans=nodes[0].delegatebw(contractAccount, 1000000.0000, 88000000.0000, exitOnError=True) + trans=nodes[0].delegatebw(contractAccount, 1000000.0000, 88000000.0000, waitForTransBlock=True, exitOnError=True) contractDir="contracts/integration_test" wasmFile="integration_test.wasm" diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index b6f176af8c9..d4781d0eefe 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -140,7 +140,8 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): from core_symbol import CORE_SYMBOL -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"}) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "--p2p-plugin","--wallet-port"}) Utils.Debug=args.v totalNodes=4 cluster=Cluster(walletd=True) @@ -150,22 +151,24 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): prodCount=args.prod_count killAll=args.clean_run p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port -walletMgr=WalletMgr(True) +walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill killWallet=not dontKill -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") @@ -184,12 +187,6 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - Utils.cmdError("%s" % (WalletdName)) - Utils.errorExit("Failed to stand up eos walletd.") - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) for _, account in cluster.defProducerAccounts.items(): @@ -216,7 +213,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") - trans=node.delegatebw(account, 20000000.0000, 20000000.0000, exitOnError=True) + trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True) # containers for tracking producers prodsActive={} @@ -229,7 +226,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): #first account will vote for node0 producers, all others will vote for node1 producers node=node0 for account in accounts: - trans=node.vote(account, node.producers) + trans=node.vote(account, node.producers, waitForTransBlock=True) node=node1 setActiveProducers(prodsActive, node1.producers) @@ -240,7 +237,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): # first account will vote for node2 producers, all others will vote for node3 producers node1 for account in accounts: - trans=node.vote(account, node.producers) + trans=node.vote(account, node.producers, waitForTransBlock=True) node=node2 setActiveProducers(prodsActive, node2.producers) diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index d3329c8b65f..a8ae17d78b7 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -52,6 +52,7 @@ try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) cluster.setChainStrategy(chainSyncStrategyStr) cluster.setWalletMgr(walletMgr) @@ -74,11 +75,6 @@ errorExit("Cluster never stabilized") Print("Stand up EOS wallet keosd") - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - errorExit("Failed to stand up keosd.") - accountsCount=total_nodes walletName="MyWallet" Print("Creating wallet %s if one doesn't already exist." % walletName) diff --git a/tests/testUtils.py b/tests/testUtils.py index d2a69231513..a8dbe0fd4d2 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,11 +1,14 @@ +import errno import subprocess import time import os +import platform from collections import deque from collections import namedtuple import inspect import json import shlex +import socket from sys import stdout from sys import exit import traceback @@ -16,6 +19,7 @@ class Utils: FNull = open(os.devnull, 'w') EosClientPath="programs/cleos/cleos" + MiscEosClientArgs="--no-auto-keosd" EosWalletName="keosd" EosWalletPath="programs/keosd/"+ EosWalletName @@ -28,6 +32,8 @@ class Utils: ShuttingDown=False CheckOutputDeque=deque(maxlen=10) + EosBlockLogPath="programs/eosio-blocklog/eosio-blocklog" + @staticmethod def Print(*args, **kwargs): stackDepth=len(inspect.stack())-2 @@ -75,12 +81,12 @@ def getChainStrategies(): return chainSyncStrategies @staticmethod - def checkOutput(cmd): + def checkOutput(cmd, ignoreError=False): assert(isinstance(cmd, list)) popen=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output,error)=popen.communicate() Utils.CheckOutputDeque.append((output,error,cmd)) - if popen.returncode != 0: + if popen.returncode != 0 and not ignoreError: raise subprocess.CalledProcessError(returncode=popen.returncode, cmd=cmd, output=error) return output.decode("utf-8") @@ -132,16 +138,25 @@ def waitForBool(lam, timeout=None): return False if ret is None else ret @staticmethod - def filterJsonObject(data): - firstIdx=data.find('{') - lastIdx=data.rfind('}') - retStr=data[firstIdx:lastIdx+1] + def filterJsonObjectOrArray(data): + firstObjIdx=data.find('{') + lastObjIdx=data.rfind('}') + firstArrayIdx=data.find('[') + lastArrayIdx=data.rfind(']') + if firstArrayIdx==-1 or lastArrayIdx==-1: + retStr=data[firstObjIdx:lastObjIdx+1] + elif firstObjIdx==-1 or lastObjIdx==-1: + retStr=data[firstArrayIdx:lastArrayIdx+1] + elif lastArrayIdx < lastObjIdx: + retStr=data[firstObjIdx:lastObjIdx+1] + else: + retStr=data[firstArrayIdx:lastArrayIdx+1] return retStr @staticmethod def runCmdArrReturnJson(cmdArr, trace=False, silentErrors=True): retStr=Utils.checkOutput(cmdArr) - jStr=Utils.filterJsonObject(retStr) + jStr=Utils.filterJsonObjectOrArray(retStr) if trace: Utils.Print ("RAW > %s" % (retStr)) if trace: Utils.Print ("JSON> %s" % (jStr)) if not jStr: @@ -171,6 +186,130 @@ def runCmdReturnJson(cmd, trace=False, silentErrors=False): cmdArr=shlex.split(cmd) return Utils.runCmdArrReturnJson(cmdArr, trace=trace, silentErrors=silentErrors) + @staticmethod + def arePortsAvailable(ports): + """Check if specified port (as int) or ports (as set) is/are available for listening on.""" + assert(ports) + if isinstance(ports, int): + ports={ports} + assert(isinstance(ports, set)) + + for port in ports: + if Utils.Debug: Utils.Print("Checking if port %d is available." % (port)) + assert(isinstance(port, int)) + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + try: + s.bind(("127.0.0.1", port)) + except socket.error as e: + if e.errno == errno.EADDRINUSE: + Utils.Print("ERROR: Port %d is already in use" % (port)) + else: + # something else raised the socket.error exception + Utils.Print("ERROR: Unknown exception while trying to listen on port %d" % (port)) + Utils.Print(e) + return False + finally: + s.close() + + return True + + @staticmethod + def pgrepCmd(serverName): + pgrepOpts="-fl" + # pylint: disable=deprecated-method + if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: + pgrepOpts="-a" + + return "pgrep %s %s" % (pgrepOpts, serverName) + + @staticmethod + def getBlockLog(blockLogLocation, silentErrors=False, exitOnError=False): + assert(isinstance(blockLogLocation, str)) + cmd="%s --blocks-dir %s --as-json-array" % (Utils.EosBlockLogPath, blockLogLocation) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + rtn=None + try: + rtn=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) + except subprocess.CalledProcessError as ex: + if not silentErrors: + msg=ex.output.decode("utf-8") + errorMsg="Exception during \"%s\". %s" % (cmd, msg) + if exitOnError: + Utils.cmdError(errorMsg) + Utils.errorExit(errorMsg) + else: + Utils.Print("ERROR: %s" % (errorMsg)) + return None + + if exitOnError and rtn is None: + Utils.cmdError("could not \"%s\"" % (cmd)) + Utils.errorExit("Failed to \"%s\"" % (cmd)) + + return rtn + + @staticmethod + def compare(obj1,obj2,context): + type1=type(obj1) + type2=type(obj2) + if type1!=type2: + return "obj1(%s) and obj2(%s) are different types, so cannot be compared, context=%s" % (type1,type2,context) + + if obj1 is None and obj2 is None: + return None + + typeName=type1.__name__ + if type1 == str or type1 == int or type1 == float or type1 == bool: + if obj1!=obj2: + return "obj1=%s and obj2=%s are different (type=%s), context=%s" % (obj1,obj2,typeName,context) + return None + + if type1 == list: + len1=len(obj1) + len2=len(obj2) + diffSizes=False + minLen=len1 + if len1!=len2: + diffSizes=True + minLen=min([len1,len2]) + + for i in range(minLen): + nextContext=context + "[%d]" % (i) + ret=Utils.compare(obj1[i],obj2[i], nextContext) + if ret is not None: + return ret + + if diffSizes: + return "left and right side %s comparison have different sizes %d != %d, context=%s" % (typeName,len1,len2,context) + return None + + if type1 == dict: + keys1=sorted(obj1.keys()) + keys2=sorted(obj2.keys()) + len1=len(keys1) + len2=len(keys2) + diffSizes=False + minLen=len1 + if len1!=len2: + diffSizes=True + minLen=min([len1,len2]) + + for i in range(minLen): + key=keys1[i] + nextContext=context + "[\"%s\"]" % (key) + if key not in obj2: + return "right side does not contain key=%s (has %s) that left side does, context=%s" % (key,keys2,context) + ret=Utils.compare(obj1[key],obj2[key], nextContext) + if ret is not None: + return ret + + if diffSizes: + return "left and right side %s comparison have different number of keys %d != %d, context=%s" % (typeName,len1,len2,context) + + return None + + return "comparison of %s type is not supported, context=%s" % (typeName,context) ########################################################################################### class Account(object): diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 328b66a4462..138c9c6afaf 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -28,13 +28,10 @@ target_include_directories( unit_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/include ) -add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig payloadless tic_tac_toe deferred_test) +add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop eosio.msig payloadless tic_tac_toe deferred_test snapshot_test) #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose -add_test(NAME unit_test_binaryen COMMAND unit_test - -t \!wasm_tests/weighted_cpu_limit_tests - --report_level=detailed --color_output -- --binaryen) add_test(NAME unit_test_wavm COMMAND unit_test -t \!wasm_tests/weighted_cpu_limit_tests --report_level=detailed --color_output --catch_system_errors=no -- --wavm) @@ -59,7 +56,7 @@ if(ENABLE_COVERAGE_TESTING) endif() # NOT GENHTML_PATH # no spaces allowed within tests list - set(ctest_tests 'unit_test_binaryen|unit_test_wavm') + set(ctest_tests 'unit_test_wabt|unit_test_wavm') set(ctest_exclude_tests '') # Setup target @@ -71,7 +68,7 @@ if(ENABLE_COVERAGE_TESTING) # Run tests COMMAND ./tools/ctestwrapper.sh -R ${ctest_tests} -E ${ctest_exclude_tests} - COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ./tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info + COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ${CMAKE_SOURCE_DIR}/tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info COMMAND ${LCOV_PATH} -remove ${Coverage_NAME}.info '*/boost/*' '/usr/lib/*' '/usr/include/*' '*/externals/*' '*/fc/*' '*/wasm-jit/*' --output-file ${Coverage_NAME}_filtered.info diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index 44677b261c7..c3b46d3f847 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -3528,6 +3528,8 @@ BOOST_AUTO_TEST_CASE(abi_deep_structs_validate) BOOST_AUTO_TEST_CASE(variants) { + using eosio::testing::fc_exception_message_starts_with; + auto duplicate_variant_abi = R"({ "version": "eosio::abi/1.1", "variants": [ @@ -3575,13 +3577,18 @@ BOOST_AUTO_TEST_CASE(variants) BOOST_CHECK_THROW( abi_serializer( fc::json::from_string(variant_abi_invalid_type).as(), max_serialization_time ), invalid_type_inside_abi ); // expected array containing variant - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(9)"), max_serialization_time), abi_exception ); - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"([4])"), max_serialization_time), abi_exception ); - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"([4, 5])"), max_serialization_time), abi_exception ); - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(["4", 5, 6])"), max_serialization_time), abi_exception ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(9)"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"([4])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"([4, 5])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Encountered non-string as first item of input array while processing variant 'v1") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["4", 5, 6])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1'") ); // type is not valid within this variant - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(["int9", 21])"), max_serialization_time), abi_exception ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["int9", 21])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Specified type 'int9' in input array is not valid within the variant 'v1'") ); verify_round_trip_conversion(abis, "v1", R"(["int8",21])", "0015"); verify_round_trip_conversion(abis, "v1", R"(["string","abcd"])", "010461626364"); @@ -3594,6 +3601,8 @@ BOOST_AUTO_TEST_CASE(variants) BOOST_AUTO_TEST_CASE(extend) { + using eosio::testing::fc_exception_message_starts_with; + auto abi = R"({ "version": "eosio::abi/1.1", "structs": [ @@ -3603,18 +3612,27 @@ BOOST_AUTO_TEST_CASE(extend) {"name": "i2", "type": "int8$"}, {"name": "a", "type": "int8[]$"}, {"name": "o", "type": "int8?$"}, + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8$"}, + {"name": "i2", "type": "int8"}, ]} ], })"; + // NOTE: Ideally this ABI would be rejected during validation for an improper definition for struct "s2". + // Such a check is not yet implemented during validation, but it can check during serialization. try { abi_serializer abis(fc::json::from_string(abi).as(), max_serialization_time ); // missing i1 - BOOST_CHECK_THROW( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5})"), max_serialization_time), abi_exception ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Missing field 'i1' in input object while processing struct") ); // Unexpected 'a' - BOOST_CHECK_THROW( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5,"i1":6,"a":[8,9,10]})"), max_serialization_time), pack_exception ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5,"i1":6,"a":[8,9,10]})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Unexpected field 'a' found in input object while processing struct") ); verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6})", "0506"); verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7})", "050607"); @@ -3627,6 +3645,11 @@ BOOST_AUTO_TEST_CASE(extend) verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10]])", "0506070308090a", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10]})"); verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],null])", "0506070308090a00", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":null})"); verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],31])", "0506070308090a011f", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":31})"); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"i0":1})"), max_serialization_time), + abi_exception, fc_exception_message_starts_with("Encountered field 'i2' without binary extension designation while processing struct") ); + + } FC_LOG_AND_RETHROW() } @@ -3660,10 +3683,10 @@ BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_array) abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([])"), max_serialization_time), - pack_exception, fc_exception_message_starts_with("Early end to array specifying the fields of struct") ); + pack_exception, fc_exception_message_starts_with("Early end to input array specifying the fields of struct") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([1,2])"), max_serialization_time), - pack_exception, fc_exception_message_starts_with("Early end to array specifying the fields of struct") ); + pack_exception, fc_exception_message_starts_with("Early end to input array specifying the fields of struct") ); verify_round_trip_conversion(abis, "s", R"([1,2,3])", "010203", R"({"i0":1,"i1":2,"i2":3})"); @@ -3672,7 +3695,7 @@ BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_array) BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_object) { - using eosio::testing::fc_exception_message_is; + using eosio::testing::fc_exception_message_starts_with; auto abi = R"({ "version": "eosio::abi/1.0", @@ -3692,10 +3715,10 @@ BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_object) abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({})"), max_serialization_time), - pack_exception, fc_exception_message_is("Missing 'f0' in variant object") ); + pack_exception, fc_exception_message_starts_with("Missing field 'f0' in input object") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":{"i0":1}})"), max_serialization_time), - pack_exception, fc_exception_message_is("Missing 'i1' in variant object") ); + pack_exception, fc_exception_message_starts_with("Missing field 'i1' in input object") ); verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1,"i1":2},"i2":3})", "010203"); @@ -3723,12 +3746,249 @@ BOOST_AUTO_TEST_CASE(abi_serialize_json_mismatching_type) abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":1,"i1":2})"), max_serialization_time), - pack_exception, fc_exception_message_is("Failed to serialize struct 's1' in variant object") ); + pack_exception, fc_exception_message_is("Unexpected input encountered while processing struct 's2.f0'") ); verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1},"i1":2})", "0102"); } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(abi_serialize_detailed_error_messages) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.1", + "types": [ + {"new_type_name": "foo", "type": "s2"}, + {"new_type_name": "bar", "type": "foo"}, + {"new_type_name": "s1array", "type": "s1[]"}, + {"new_type_name": "s1arrayarray", "type": "s1array[]"} + ], + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"} + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "s1"}, + {"name": "i2", "type": "int8"} + ]}, + {"name": "s3", "base": "s1", "fields": [ + {"name": "i2", "type": "int8"}, + {"name": "f3", "type": "v2"}, + {"name": "f4", "type": "foo$"}, + {"name": "f5", "type": "s1$"} + ]}, + {"name": "s4", "base": "", "fields": [ + {"name": "f0", "type": "int8[]"}, + {"name": "f1", "type": "s1[]"} + ]}, + {"name": "s5", "base": "", "fields": [ + {"name": "f0", "type": "v2[]"}, + ]}, + ], + "variants": [ + {"name": "v1", "types": ["s3", "int8", "s4"]}, + {"name": "v2", "types": ["foo", "bar"]}, + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("bar", fc::json::from_string(R"({"f0":{"i0":1},"i2":3})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's2.f0'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i2":3})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's3'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["s2",{}]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Specified type 's2' in input array is not valid within the variant 's3.f3'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11},"i2":13}]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's3.f3..f0'") ); + + verify_round_trip_conversion(abis, "s3", R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}]})", "010203010b0c0d"); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s3",{"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}],"f5":0}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Unexpected field 'f5' found in input object while processing struct 'v1.'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s4",{"f0":[0,1],"f1":[{"i0":2,"i1":3},{"i1":5}]}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'v1..f1[1]'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 'ARRAY[1].f0'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s5", fc::json::from_string(R"({"f0":[["bar",{"f0":{"i0":1,"i1":2},"i2":3}],["foo",{"f0":{"i0":4},"i2":6}]]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's5.f0[1]..f0'") ); + + verify_round_trip_conversion( abis, "s1arrayarray", R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":5,"i1":6},{"i0":7,"i1":8},{"i0":9,"i1":10}]])", "0202010203040305060708090a"); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s1arrayarray", fc::json::from_string(R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":6,"i1":6},{"i0":7,"i1":8},{"i1":10}]])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'ARRAY[1][2]'") ); + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(abi_serialize_short_error_messages) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.1", + "types": [ + {"new_type_name": "foo", "type": "s2"}, + {"new_type_name": "bar", "type": "foo"}, + {"new_type_name": "s1array", "type": "s1[]"}, + {"new_type_name": "s1arrayarray", "type": "s1array[]"} + ], + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"} + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "s1"}, + {"name": "i2", "type": "int8"} + ]}, + {"name": "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", "base": "s1", "fields": [ + {"name": "i2", "type": "int8"}, + {"name": "f3", "type": "v2"}, + {"name": "f4", "type": "foo$"}, + {"name": "very_very_very_very_very_very_very_very_very_very_long_field_name_f5", "type": "s1$"} + ]}, + {"name": "s4", "base": "", "fields": [ + {"name": "f0", "type": "int8[]"}, + {"name": "f1", "type": "s1[]"} + ]}, + {"name": "s5", "base": "", "fields": [ + {"name": "f0", "type": "v2[]"}, + ]}, + ], + "variants": [ + {"name": "v1", "types": ["very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", "int8", "s4"]}, + {"name": "v2", "types": ["foo", "bar"]}, + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("bar", fc::json::from_string(R"({"f0":{"i0":1},"i2":3})"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i2":3})"), max_serialization_time, true ), + pack_exception, + fc_exception_message_is("Missing field 'i1' in input object while processing struct 'very_very_very_very_very_very_very_very_very_very_long_...ame_s3'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["s2",{}]})"), max_serialization_time, true ), + pack_exception, fc_exception_message_is("Specified type 's2' in input array is not valid within the variant 'v2'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11},"i2":13}]})"), max_serialization_time, true ), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "v1", + fc::json::from_string(R"(["very_very_very_very_very_very_very_very_very_very_long_struct_name_s3",{"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}],"very_very_very_very_very_very_very_very_very_very_long_field_name_f5":0}])"), + max_serialization_time, true ), + pack_exception, + fc_exception_message_is("Unexpected field 'very_very_very_very_very_very_very_very_very_very_long_...ame_f5' found in input object while processing struct 'very_very_very_very_very_very_very_very_very_very_long_...ame_s3'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s4",{"f0":[0,1],"f1":[{"i0":2,"i1":3},{"i1":5}]}])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s5", fc::json::from_string(R"({"f0":[["bar",{"f0":{"i0":1,"i1":2},"i2":3}],["foo",{"f0":{"i0":4},"i2":6}]]})"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s1arrayarray", fc::json::from_string(R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":6,"i1":6},{"i0":7,"i1":8},{"i1":10}]])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 's1'") ); + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(abi_deserialize_detailed_error_messages) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.1", + "types": [ + {"new_type_name": "oint", "type": "int8?"}, + {"new_type_name": "os1", "type": "s1?"} + ], + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"} + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "int8[]"}, + {"name": "f1", "type": "s1[]"} + ]}, + {"name": "s3", "base": "s1", "fields": [ + {"name": "i3", "type": "int8"}, + {"name": "i4", "type": "int8$"}, + {"name": "i5", "type": "int8"} + ]}, + {"name": "s4", "base": "", "fields": [ + {"name": "f0", "type": "oint[]"} + ]}, + {"name": "s5", "base": "", "fields": [ + {"name": "f0", "type": "os1[]"}, + {"name": "f1", "type": "v1[]"}, + ]}, + {"name": "s6", "base": "", "fields": [ + ]}, + ], + "variants": [ + {"name": "v1", "types": ["int8", "s1"]}, + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("020102").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'f1' of struct 's2'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("0201020103").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'i1' of struct 's2.f1[0]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("020102ff").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack size of array 's2.f1'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s3", fc::variant("010203").as(), max_serialization_time), + abi_exception, fc_exception_message_is("Encountered field 'i5' without binary extension designation while processing struct 's3'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s3", fc::variant("02010304").as(), max_serialization_time), + abi_exception, fc_exception_message_is("Encountered field 'i5' without binary extension designation while processing struct 's3'") ); + + // This check actually points to a problem with the current abi_serializer. + // An array of optionals (which is unfortunately not rejected in validation) leads to an unpack_exception here because one of the optional elements is not present. + // However, abis.binary_to_variant("s4", fc::variant("03010101020103").as(), max_serialization_time) would work just fine! + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s4", fc::variant("030101000103").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Invalid packed array 's4.f0[1]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s4", fc::variant("020101").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack optional of built-in type 'int8' while processing 's4.f0[1]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("02010102").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack presence flag of optional 's5.f0[1]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("0001").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack tag of variant 's5.f1[0]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("00010501").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unpacked invalid tag (5) for variant 's5.f1[0]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("00010101").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'i1' of struct 's5.f1[0].'") ); + + } FC_LOG_AND_RETHROW() +} BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index c9811adf071..60bde1edd99 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1964,23 +1964,9 @@ BOOST_FIXTURE_TEST_CASE(new_api_feature_tests, TESTER) { try { }); // change privilege - { - chainbase::database &db = control->db(); - const account_object &account = db.get(N(testapi)); - db.modify(account, [&](account_object &v) { - v.privileged = true; - }); - } - -#ifndef NON_VALIDATING_TEST - { - chainbase::database &db = validating_node->db(); - const account_object &account = db.get(N(testapi)); - db.modify(account, [&](account_object &v) { - v.privileged = true; - }); - } -#endif + push_action(config::system_account_name, N(setpriv), config::system_account_name, mutable_variant_object() + ("account", "testapi") + ("is_priv", 1)); CALL_TEST_FUNCTION( *this, "test_transaction", "new_feature", {} ); diff --git a/unittests/auth_tests.cpp b/unittests/auth_tests.cpp index bec8ab67b18..e54964b87b4 100644 --- a/unittests/auth_tests.cpp +++ b/unittests/auth_tests.cpp @@ -383,7 +383,7 @@ try { chain.create_account(acc1a); chain.produce_block(); - chainbase::database &db = chain.control->db(); + const chainbase::database &db = chain.control->db(); using resource_usage_object = eosio::chain::resource_limits::resource_usage_object; using by_owner = eosio::chain::resource_limits::by_owner; diff --git a/unittests/database_tests.cpp b/unittests/database_tests.cpp index d2192d980f2..ac97f6c21a6 100644 --- a/unittests/database_tests.cpp +++ b/unittests/database_tests.cpp @@ -26,7 +26,9 @@ BOOST_AUTO_TEST_SUITE(database_tests) BOOST_AUTO_TEST_CASE(undo_test) { try { TESTER test; - auto &db = test.control->db(); + + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + eosio::chain::database& db = const_cast( test.control->db() ); auto ses = db.start_undo_session(true); diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index 6d2d0ea63cc..095ba93dfbe 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -2317,16 +2317,10 @@ BOOST_AUTO_TEST_CASE( max_transaction_delay_execute ) { try { chain.produce_blocks(); //change max_transaction_delay to 60 sec - chain.control->db().modify( chain.control->get_global_properties(), - [&]( auto& gprops ) { - gprops.configuration.max_transaction_delay = 60; - }); -#ifndef NON_VALIDATING_TEST - chain.validating_node->db().modify( chain.validating_node->get_global_properties(), - [&]( auto& gprops ) { - gprops.configuration.max_transaction_delay = 60; - }); -#endif + auto params = chain.control->get_global_properties().configuration; + params.max_transaction_delay = 60; + chain.push_action( config::system_account_name, N(setparams), config::system_account_name, mutable_variant_object() + ("params", params) ); chain.produce_blocks(); //should be able to create transaction with delay 60 sec, despite permission delay being 30 days, because max_transaction_delay is 60 sec diff --git a/unittests/dice_tests.cpp b/unittests/dice_tests.cpp deleted file mode 100644 index cb63511d39c..00000000000 --- a/unittests/dice_tests.cpp +++ /dev/null @@ -1,425 +0,0 @@ -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include - -#include - -#ifdef NON_VALIDATING_TEST -#define TESTER tester -#else -#define TESTER validating_tester -#endif - -using namespace eosio; -using namespace eosio::chain; -using namespace eosio::testing; -using namespace fc; -using namespace std; -using mvo = fc::mutable_variant_object; - -struct offer_bet_t { - asset bet; - account_name player; - checksum256_type commitment; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(offerbet); } -}; -FC_REFLECT(offer_bet_t, (bet)(player)(commitment)); - -struct cancel_offer_t { - checksum256_type commitment; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(canceloffer); } -}; -FC_REFLECT(cancel_offer_t, (commitment)); - -struct reveal_t { - checksum256_type commitment; - checksum256_type source; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(reveal); } -}; -FC_REFLECT(reveal_t, (commitment)(source)); - -struct deposit_t { - account_name from; - asset amount; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(deposit); } -}; -FC_REFLECT( deposit_t, (from)(amount) ); - -struct withdraw_t { - account_name to; - asset amount; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(withdraw); } -}; -FC_REFLECT( withdraw_t, (to)(amount) ); - -struct __attribute((packed)) account_t { - account_name owner; - asset eos_balance; - uint32_t open_offers; - uint32_t open_games; -}; -FC_REFLECT(account_t, (owner)(eos_balance)(open_offers)(open_games)); - -struct player_t { - checksum_type commitment; - checksum_type reveal; -}; -FC_REFLECT(player_t, (commitment)(reveal)); - -struct __attribute((packed)) game_t { - uint64_t gameid; - asset bet; - fc::time_point_sec deadline; - player_t player1; - player_t player2; -}; -FC_REFLECT(game_t, (gameid)(bet)(deadline)(player1)(player2)); - -struct dice_tester : TESTER { - - template - const auto& get_index() { - return control->db().get_index(); - } - - void offer_bet(account_name account, asset amount, const checksum_type& commitment) { - signed_transaction trx; - action act( {{account, config::active_name}}, - offer_bet_t{amount, account, commitment} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - void cancel_offer(account_name account, const checksum_type& commitment) { - signed_transaction trx; - action act( {{account, config::active_name}}, - cancel_offer_t{commitment} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - void deposit(account_name account, asset amount) { - signed_transaction trx; - action act( {{account, config::active_name}}, - deposit_t{account, amount} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - void withdraw(account_name account, asset amount) { - signed_transaction trx; - action act( {{account, config::active_name}}, - withdraw_t{account, amount} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - void reveal(account_name account, const checksum_type& commitment, const checksum_type& source ) { - signed_transaction trx; - action act( {{account, config::active_name}}, - reveal_t{commitment, source} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - bool dice_account(account_name account, account_t& acnt) { - auto* maybe_tid = find_table(N(dice), N(dice), N(account)); - if(maybe_tid == nullptr) return false; - - auto* o = control->db().find(boost::make_tuple(maybe_tid->id, account)); - if(o == nullptr) { - return false; - } - - fc::raw::unpack(o->value.data(), o->value.size(), acnt); - return true; - } - - bool dice_game(uint64_t game_id, game_t& game) { - const bool not_required = false; - return get_table_entry(game, N(dice), N(dice), N(game), game_id, not_required); - } - - uint32_t open_games(account_name account) { - account_t acnt; - if(!dice_account(account, acnt)) return 0; - return acnt.open_games; - } - - asset game_bet(uint64_t game_id) { - game_t game; - if(!dice_game(game_id, game)) return asset(); - return game.bet; - } - - uint32_t open_offers(account_name account) { - account_t acnt; - if(!dice_account(account, acnt)) return 0; - return acnt.open_offers; - } - - asset balance_of(account_name account) { - account_t acnt; - if(!dice_account(account, acnt)) return asset(); - return acnt.eos_balance; - } - - checksum_type commitment_for( const char* secret ) { - return commitment_for(checksum_type(secret)); - } - - checksum_type commitment_for( const checksum_type& secret ) { - return fc::sha256::hash( secret.data(), sizeof(secret) ); - } - - void add_dice_authority(account_name account) { - auto auth = authority{ - 1, - { - {.key = get_public_key(account,"active"), .weight = 1} - }, - { - {.permission = {N(dice),N(active)}, .weight = 1} - } - }; - set_authority(account, N(active), auth, N(owner) ); - } -}; - -BOOST_AUTO_TEST_SUITE(dice_tests) - -BOOST_FIXTURE_TEST_CASE( dice_test, dice_tester ) try { - - create_accounts( {N(eosio.token), N(dice),N(alice),N(bob),N(carol),N(david)}, false); - - set_code(N(eosio.token), eosio_token_wast); - set_abi(N(eosio.token), eosio_token_abi); - - produce_block(); - - add_dice_authority(N(alice)); - add_dice_authority(N(bob)); - add_dice_authority(N(carol)); - - push_action(N(eosio.token), N(create), N(eosio.token), mvo() - ("issuer", "eosio.token") - ("maximum_supply", core_from_string("1000000000.0000")) - ); - - push_action(N(eosio.token), N(issue), N(eosio.token), mvo() - ("to", "eosio") - ("quantity", core_from_string("1000000000.0000")) - ("memo", "") - ); - - transfer( config::system_account_name, N(alice), core_from_string("10000.0000"), "", N(eosio.token) ); - transfer( config::system_account_name, N(bob), core_from_string("10000.0000"), "", N(eosio.token) ); - transfer( config::system_account_name, N(carol), core_from_string("10000.0000"), "", N(eosio.token) ); - - produce_block(); - - set_code(N(dice), dice_wast); - set_abi(N(dice), dice_abi); - - produce_block(); - - // Alice deposits 1000 - deposit( N(alice), core_from_string("1000.0000")); - produce_block(); - - BOOST_REQUIRE_EQUAL( balance_of(N(alice)), core_from_string("1000.0000")); - BOOST_REQUIRE_EQUAL( open_games(N(alice)), 0); - - // Alice tries to bet 0 (fail) - // secret : 9b886346e1351d4144d0b8392a975612eb0f8b6de7eae1cc9bcc55eb52be343c - BOOST_CHECK_THROW( offer_bet( N(alice), core_from_string("0.0000"), - commitment_for("9b886346e1351d4144d0b8392a975612eb0f8b6de7eae1cc9bcc55eb52be343c") - ), fc::exception); - - // Alice bets 10 (success) - // secret : 0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46 - offer_bet( N(alice), core_from_string("10.0000"), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ); - produce_block(); - - // Bob tries to bet using a secret previously used by Alice (fail) - // secret : 00000000000000000000000000000002c334abe6ce13219a4cf3b862abb03c46 - BOOST_CHECK_THROW( offer_bet( N(bob), core_from_string("10.0000"), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ), fc::exception); - produce_block(); - - // Alice tries to bet 1000 (fail) - // secret : a512f6b1b589a8906d574e9de74a529e504a5c53a760f0991a3e00256c027971 - BOOST_CHECK_THROW( offer_bet( N(alice), core_from_string("1000.0000"), - commitment_for("a512f6b1b589a8906d574e9de74a529e504a5c53a760f0991a3e00256c027971") - ), fc::exception); - produce_block(); - - // Bob tries to bet 90 without deposit - // secret : 4facfc98932dde46fdc4403125a16337f6879a842a7ff8b0dc8e1ecddd59f3c8 - BOOST_CHECK_THROW( offer_bet( N(bob), core_from_string("90.0000"), - commitment_for("4facfc98932dde46fdc4403125a16337f6879a842a7ff8b0dc8e1ecddd59f3c8") - ), fc::exception); - produce_block(); - - // Bob deposits 500 - deposit( N(bob), core_from_string("500.0000")); - BOOST_REQUIRE_EQUAL( balance_of(N(bob)), core_from_string("500.0000")); - - // Bob bets 11 (success) - // secret : eec3272712d974c474a3e7b4028b53081344a5f50008e9ccf918ba0725a8d784 - offer_bet( N(bob), core_from_string("11.0000"), - commitment_for("eec3272712d974c474a3e7b4028b53081344a5f50008e9ccf918ba0725a8d784") - ); - produce_block(); - - // Bob cancels (success) - BOOST_REQUIRE_EQUAL( open_offers(N(bob)), 1); - cancel_offer( N(bob), commitment_for("eec3272712d974c474a3e7b4028b53081344a5f50008e9ccf918ba0725a8d784") ); - BOOST_REQUIRE_EQUAL( open_offers(N(bob)), 0); - - // Carol deposits 300 - deposit( N(carol), core_from_string("300.0000")); - - // Carol bets 10 (success) - // secret : 3efb4bd5e19b780f4980c919330c0306f8157f93db1fc72c7cefec63e0e7f37a - offer_bet( N(carol), core_from_string("10.0000"), - commitment_for("3efb4bd5e19b780f4980c919330c0306f8157f93db1fc72c7cefec63e0e7f37a") - ); - produce_block(); - - BOOST_REQUIRE_EQUAL( open_games(N(alice)), 1); - BOOST_REQUIRE_EQUAL( open_offers(N(alice)), 0); - - BOOST_REQUIRE_EQUAL( open_games(N(carol)), 1); - BOOST_REQUIRE_EQUAL( open_offers(N(carol)), 0); - - BOOST_REQUIRE_EQUAL( game_bet(1), core_from_string("10.0000")); - - - // Alice tries to cancel a nonexistent bet (fail) - BOOST_CHECK_THROW( cancel_offer( N(alice), - commitment_for("00000000000000000000000000000000000000000000000000000000abb03c46") - ), fc::exception); - - // Alice tries to cancel an in-game bet (fail) - BOOST_CHECK_THROW( cancel_offer( N(alice), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ), fc::exception); - - // Alice reveals secret (success) - reveal( N(alice), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46"), - checksum_type("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ); - produce_block(); - - // Alice tries to reveal again (fail) - BOOST_CHECK_THROW( reveal( N(alice), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46"), - checksum_type("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ), fc::exception); - - // Bob tries to reveal an invalid (secret,commitment) pair (fail) - BOOST_CHECK_THROW( reveal( N(bob), - commitment_for("121344d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46"), - checksum_type("141544d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ), fc::exception); - - // Bob tries to reveal a valid (secret,commitment) pair that has no offer/game (fail) - BOOST_CHECK_THROW( reveal( N(bob), - commitment_for("e48c6884bb97ac5f5951df6012ce79f63bb8549ad0111315ad9ecbaf4c9b1eb8"), - checksum_type("e48c6884bb97ac5f5951df6012ce79f63bb8549ad0111315ad9ecbaf4c9b1eb8") - ), fc::exception); - - // Bob reveals Carol's secret (success) - reveal( N(bob), - commitment_for("3efb4bd5e19b780f4980c919330c0306f8157f93db1fc72c7cefec63e0e7f37a"), - checksum_type("3efb4bd5e19b780f4980c919330c0306f8157f93db1fc72c7cefec63e0e7f37a") - ); - - BOOST_REQUIRE_EQUAL( open_games(N(alice)), 0); - BOOST_REQUIRE_EQUAL( open_offers(N(alice)), 0); - BOOST_REQUIRE_EQUAL( balance_of(N(alice)), core_from_string("1010.0000")); - - BOOST_REQUIRE_EQUAL( open_games(N(carol)), 0); - BOOST_REQUIRE_EQUAL( open_offers(N(carol)), 0); - BOOST_REQUIRE_EQUAL( balance_of(N(carol)), core_from_string("290.0000")); - - // Alice withdraw 1009 (success) - withdraw( N(alice), core_from_string("1009.0000")); - BOOST_REQUIRE_EQUAL( balance_of(N(alice)), core_from_string("1.0000")); - - BOOST_REQUIRE_EQUAL( - get_currency_balance(N(eosio.token), symbol(CORE_SYMBOL), N(alice)), - core_from_string("10009.0000") - ); - - // Alice withdraw 2 (fail) - BOOST_CHECK_THROW( withdraw( N(alice), core_from_string("2.0000")), - fc::exception); - - // Alice withdraw 1 (success) - withdraw( N(alice), core_from_string("1.0000")); - - BOOST_REQUIRE_EQUAL( - get_currency_balance(N(eosio.token), symbol(CORE_SYMBOL), N(alice)), - core_from_string("10010.0000") - ); - - // Verify alice account was deleted - account_t alice_account; - BOOST_CHECK(dice_account(N(alice), alice_account) == false); - - // No games in table - auto* game_tid = find_table(N(dice), N(dice), N(game)); - BOOST_CHECK(game_tid == nullptr); - - // No offers in table - auto* offer_tid = find_table(N(dice), N(dice), N(offer)); - BOOST_CHECK(offer_tid == nullptr); - - // 2 records in account table (Bob & Carol) - auto* account_tid = find_table(N(dice), N(dice), N(account)); - BOOST_CHECK(account_tid != nullptr); - BOOST_CHECK(account_tid->count == 2); - -} FC_LOG_AND_RETHROW() /// basic_test - -BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp new file mode 100644 index 00000000000..c6100219d21 --- /dev/null +++ b/unittests/snapshot_tests.cpp @@ -0,0 +1,253 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ + +#include +#include +#include + +#include + +#include +#include + +#include + +using namespace eosio; +using namespace testing; +using namespace chain; + +class snapshotted_tester : public base_tester { +public: + snapshotted_tester(controller::config config, const snapshot_reader_ptr& snapshot, int ordinal) { + FC_ASSERT(config.blocks_dir.filename().generic_string() != "." + && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); + + controller::config copied_config = config; + copied_config.blocks_dir = + config.blocks_dir.parent_path() / std::to_string(ordinal).append(config.blocks_dir.filename().generic_string()); + copied_config.state_dir = + config.state_dir.parent_path() / std::to_string(ordinal).append(config.state_dir.filename().generic_string()); + + init(copied_config, snapshot); + } + + snapshotted_tester(controller::config config, const snapshot_reader_ptr& snapshot, int ordinal, int copy_block_log_from_ordinal) { + FC_ASSERT(config.blocks_dir.filename().generic_string() != "." + && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); + + controller::config copied_config = config; + copied_config.blocks_dir = + config.blocks_dir.parent_path() / std::to_string(ordinal).append(config.blocks_dir.filename().generic_string()); + copied_config.state_dir = + config.state_dir.parent_path() / std::to_string(ordinal).append(config.state_dir.filename().generic_string()); + + // create a copy of the desired block log and reversible + auto block_log_path = config.blocks_dir.parent_path() / std::to_string(copy_block_log_from_ordinal).append(config.blocks_dir.filename().generic_string()); + fc::create_directories(copied_config.blocks_dir); + fc::copy(block_log_path / "blocks.log", copied_config.blocks_dir / "blocks.log"); + fc::copy(block_log_path / config::reversible_blocks_dir_name, copied_config.blocks_dir / config::reversible_blocks_dir_name ); + + init(copied_config, snapshot); + } + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + return _produce_block(skip_time, false, skip_flag); + } + + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + control->abort_block(); + return _produce_block(skip_time, true, skip_flag); + } + + bool validate() { return true; } +}; + +struct variant_snapshot_suite { + using writer_t = variant_snapshot_writer; + using reader_t = variant_snapshot_reader; + using write_storage_t = fc::mutable_variant_object; + using snapshot_t = fc::variant; + + struct writer : public writer_t { + writer( const std::shared_ptr& storage ) + :writer_t(*storage) + ,storage(storage) + { + + } + + std::shared_ptr storage; + }; + + struct reader : public reader_t { + explicit reader(const snapshot_t& storage) + :reader_t(storage) + {} + }; + + + static auto get_writer() { + return std::make_shared(std::make_shared()); + } + + static auto finalize(const std::shared_ptr& w) { + w->finalize(); + return snapshot_t(*w->storage); + } + + static auto get_reader( const snapshot_t& buffer) { + return std::make_shared(buffer); + } + +}; + +struct buffered_snapshot_suite { + using writer_t = ostream_snapshot_writer; + using reader_t = istream_snapshot_reader; + using write_storage_t = std::ostringstream; + using snapshot_t = std::string; + using read_storage_t = std::istringstream; + + struct writer : public writer_t { + writer( const std::shared_ptr& storage ) + :writer_t(*storage) + ,storage(storage) + { + + } + + std::shared_ptr storage; + }; + + struct reader : public reader_t { + explicit reader(const std::shared_ptr& storage) + :reader_t(*storage) + ,storage(storage) + {} + + std::shared_ptr storage; + }; + + + static auto get_writer() { + return std::make_shared(std::make_shared()); + } + + static auto finalize(const std::shared_ptr& w) { + w->finalize(); + return w->storage->str(); + } + + static auto get_reader( const snapshot_t& buffer) { + return std::make_shared(std::make_shared(buffer)); + } + +}; + +BOOST_AUTO_TEST_SUITE(snapshot_tests) + +using snapshot_suites = boost::mpl::list; + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_exhaustive_snapshot, SNAPSHOT_SUITE, snapshot_suites) +{ + tester chain; + + chain.create_account(N(snapshot)); + chain.produce_blocks(1); + chain.set_code(N(snapshot), snapshot_test_wast); + chain.set_abi(N(snapshot), snapshot_test_abi); + chain.produce_blocks(1); + chain.control->abort_block(); + + static const int generation_count = 8; + std::list sub_testers; + + for (int generation = 0; generation < generation_count; generation++) { + // create a new snapshot child + auto writer = SNAPSHOT_SUITE::get_writer(); + chain.control->write_snapshot(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); + + // create a new child at this snapshot + sub_testers.emplace_back(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), generation); + + // increment the test contract + chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() + ( "value", 1 ) + ); + + // produce block + auto new_block = chain.produce_block(); + + // undo the auto-pending from tester + chain.control->abort_block(); + + auto integrity_value = chain.control->calculate_integrity_hash(); + + // push that block to all sub testers and validate the integrity of the database after it. + for (auto& other: sub_testers) { + other.push_block(new_block); + BOOST_REQUIRE_EQUAL(integrity_value.str(), other.control->calculate_integrity_hash().str()); + } + } +} + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapshot_suites) +{ + tester chain; + + chain.create_account(N(snapshot)); + chain.produce_blocks(1); + chain.set_code(N(snapshot), snapshot_test_wast); + chain.set_abi(N(snapshot), snapshot_test_abi); + chain.produce_blocks(1); + chain.control->abort_block(); + + static const int pre_snapshot_block_count = 12; + static const int post_snapshot_block_count = 12; + + for (int itr = 0; itr < pre_snapshot_block_count; itr++) { + // increment the contract + chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() + ( "value", 1 ) + ); + + // produce block + chain.produce_block(); + } + + chain.control->abort_block(); + auto expected_pre_integrity_hash = chain.control->calculate_integrity_hash(); + + // create a new snapshot child + auto writer = SNAPSHOT_SUITE::get_writer(); + chain.control->write_snapshot(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); + + // create a new child at this snapshot + snapshotted_tester snap_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), 1); + BOOST_REQUIRE_EQUAL(expected_pre_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); + + // push more blocks to build up a block log + for (int itr = 0; itr < post_snapshot_block_count; itr++) { + // increment the contract + chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() + ( "value", 1 ) + ); + + // produce & push block + snap_chain.push_block(chain.produce_block()); + } + + // verify the hash at the end + chain.control->abort_block(); + auto expected_post_integrity_hash = chain.control->calculate_integrity_hash(); + BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); + + // replay the block log from the snapshot child, from the snapshot + snapshotted_tester replay_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), 2, 1); + BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/special_accounts_tests.cpp b/unittests/special_accounts_tests.cpp index 9bdbc588e3a..5f5ebe198e8 100644 --- a/unittests/special_accounts_tests.cpp +++ b/unittests/special_accounts_tests.cpp @@ -35,7 +35,7 @@ BOOST_FIXTURE_TEST_CASE(accounts_exists, tester) tester test; chain::controller *control = test.control.get(); - chain::database &chain1_db = control->db(); + const chain::database& chain1_db = control->db(); auto nobody = chain1_db.find(config::null_account_name); BOOST_CHECK(nobody != nullptr); diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index 1621e2ef916..9957b4ccccc 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -41,14 +41,10 @@ class whitelist_blacklist_tester { cfg.genesis.initial_key = base_tester::get_public_key( config::system_account_name, "active" ); for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--binaryen")) - cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) + if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; - else - cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; } return cfg;