From 9d8896b072cfec52691153fa3a35e0f1dbd88bb8 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 24 Aug 2018 09:37:26 -0400 Subject: [PATCH 001/161] add some basic utilities to help consistently work with the database --- libraries/chain/controller.cpp | 54 ++++++++++----- .../include/eosio/chain/account_object.hpp | 1 + .../eosio/chain/contract_table_objects.hpp | 27 +++++++- .../include/eosio/chain/database_utils.hpp | 65 +++++++++++++++++++ .../chain/generated_transaction_object.hpp | 2 + .../include/eosio/chain/producer_schedule.hpp | 1 + .../eosio/chain/transaction_object.hpp | 1 + libraries/fc | 2 +- 8 files changed, 134 insertions(+), 19 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/database_utils.hpp diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 0c582871a11..f9c542b938a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -22,10 +22,29 @@ #include +#include + namespace eosio { namespace chain { using resource_limits::resource_limits_manager; +using controller_index_set = index_set< + account_index, + account_sequence_index, + table_id_multi_index, + key_value_index, + index64_index, + index128_index, + index256_index, + index_double_index, + index_long_double_index, + global_property_multi_index, + dynamic_global_property_multi_index, + block_summary_multi_index, + transaction_multi_index, + generated_transaction_multi_index +>; + class maybe_session { public: maybe_session() = default; @@ -320,6 +339,8 @@ struct controller_impl { db.undo(); } + ilog( "database initialized with hash: ${hash}", ("hash", calculate_db_hash())); + } ~controller_impl() { @@ -332,23 +353,8 @@ struct controller_impl { void add_indices() { reversible_blocks.add_index(); - db.add_index(); - db.add_index(); - - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - db.add_index(); - + controller_index_set::add_indices(db); + authorization.add_indices(); resource_limits.add_indices(); } @@ -365,6 +371,20 @@ struct controller_impl { }); } + + template + DigestType calculate_db_hash() { + + typename DigestType::encoder enc; + controller_index_set::walk_indices([this, &enc]( auto utils ){ + decltype(utils)::walk(db, [&enc]( const auto &row ) { + fc::raw::pack(enc, row); + }); + }); + + return enc.result(); + }; + /** * Sets fork database head to the genesis state. */ diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index e9c7885aea9..6fd13bf000e 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -82,3 +82,4 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_sequence_object, eosio::chain::ac FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(code_version)(code)(creation_date)) +FC_REFLECT(eosio::chain::account_sequence_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index bc2fff140c4..a38947fe59a 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -10,6 +10,8 @@ #include +#include + #include #include @@ -217,4 +219,27 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_double_object, eosio::chain::index_ CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_long_double_object, eosio::chain::index_long_double_index) FC_REFLECT(eosio::chain::table_id_object, (id)(code)(scope)(table) ) -FC_REFLECT(eosio::chain::key_value_object, (id)(t_id)(primary_key)(value)(payer) ) +FC_REFLECT(eosio::chain::key_value_object, (t_id)(primary_key)(value)(payer) ) + +template +DataStream& operator << ( DataStream& ds, const float64_t& v ) { + fc::raw::pack(ds, *reinterpret_cast(&v)); + return ds; +} + +template +DataStream& operator << ( DataStream& ds, const float128_t& v ) { + fc::raw::pack(ds, *reinterpret_cast(&v)); + return ds; +} + +#define REFLECT_SECONDARY(type)\ + FC_REFLECT(type, (t_id)(primary_key)(payer)(secondary_key) ) + +REFLECT_SECONDARY(eosio::chain::index64_object) +REFLECT_SECONDARY(eosio::chain::index128_object) +REFLECT_SECONDARY(eosio::chain::index256_object) +REFLECT_SECONDARY(eosio::chain::index_double_object) +REFLECT_SECONDARY(eosio::chain::index_long_double_object) + + diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp new file mode 100644 index 00000000000..096f2c86b85 --- /dev/null +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -0,0 +1,65 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once + +#include +#include + + +namespace eosio { namespace chain { + + template + class index_set; + + template + class index_utils { + public: + using index_t = Index; + + template + static void walk( const chainbase::database& db, F function ) { + auto const& index = db.get_index().indices(); + const auto& first = index.begin(); + const auto& last = index.end(); + for (auto itr = first; itr != last; ++itr) { + function(*itr); + } + } + }; + + template + class index_set { + public: + static void add_indices( chainbase::database& db ) { + db.add_index(); + } + + template + static void walk_indices( F function ) { + function( index_utils() ); + } + }; + + template + class index_set { + public: + static void add_indices( chainbase::database& db ) { + index_set::add_indices(db); + index_set::add_indices(db); + } + + template + static void walk_indices( F function ) { + index_set::walk_indices(function); + index_set::walk_indices(function); + } + }; + + template + DataStream& operator << ( DataStream& ds, const chainbase::oid& oid ) { + fc::raw::pack(ds, oid._id); + return ds; + } +} } \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp index 64c16de4dc6..061e76a4700 100644 --- a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp @@ -115,3 +115,5 @@ namespace eosio { namespace chain { } } // eosio::chain CHAINBASE_SET_INDEX_TYPE(eosio::chain::generated_transaction_object, eosio::chain::generated_transaction_multi_index) + +FC_REFLECT(eosio::chain::generated_transaction_object, (trx_id)(sender)(sender_id)(payer)(delay_until)(expiration)(published)(packed_trx)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/producer_schedule.hpp b/libraries/chain/include/eosio/chain/producer_schedule.hpp index 6ebf064bf20..09528d7b809 100644 --- a/libraries/chain/include/eosio/chain/producer_schedule.hpp +++ b/libraries/chain/include/eosio/chain/producer_schedule.hpp @@ -84,3 +84,4 @@ namespace eosio { namespace chain { FC_REFLECT( eosio::chain::producer_key, (producer_name)(block_signing_key) ) FC_REFLECT( eosio::chain::producer_schedule_type, (version)(producers) ) +FC_REFLECT( eosio::chain::shared_producer_schedule_type, (version)(producers) ) diff --git a/libraries/chain/include/eosio/chain/transaction_object.hpp b/libraries/chain/include/eosio/chain/transaction_object.hpp index 2b59f6ab813..0d049267e8e 100644 --- a/libraries/chain/include/eosio/chain/transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/transaction_object.hpp @@ -51,3 +51,4 @@ namespace eosio { namespace chain { CHAINBASE_SET_INDEX_TYPE(eosio::chain::transaction_object, eosio::chain::transaction_multi_index) +FC_REFLECT(eosio::chain::transaction_object, (expiration)(trx_id)) diff --git a/libraries/fc b/libraries/fc index 62a19a75868..adbf57a6e26 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 62a19a758682679e3de27d956986eaf8b016465d +Subproject commit adbf57a6e26eee7bb4e2a5442d60e92bd15bf046 From ed320c06e8c2014a8a66d73129d4a9e192b4629e Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Thu, 30 Aug 2018 10:37:31 -0400 Subject: [PATCH 002/161] add authorization and resource management to integrity hash --- libraries/chain/authorization_manager.cpp | 21 ++++++++++++++++--- libraries/chain/controller.cpp | 16 +++++++------- .../eosio/chain/authorization_manager.hpp | 1 + .../eosio/chain/permission_link_object.hpp | 2 +- .../include/eosio/chain/permission_object.hpp | 3 +-- .../include/eosio/chain/resource_limits.hpp | 3 +++ .../eosio/chain/resource_limits_private.hpp | 7 +++++++ libraries/chain/resource_limits.cpp | 21 +++++++++++++++---- 8 files changed, 57 insertions(+), 17 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index c5b29397bc4..7c1788666e0 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -12,22 +12,37 @@ #include #include #include +#include + namespace eosio { namespace chain { + using authorization_index_set = index_set< + permission_index, + permission_usage_index, + permission_link_index + >; + authorization_manager::authorization_manager(controller& c, database& d) :_control(c),_db(d){} void authorization_manager::add_indices() { - _db.add_index(); - _db.add_index(); - _db.add_index(); + authorization_index_set::add_indices(_db); } void authorization_manager::initialize_database() { _db.create([](auto&){}); /// reserve perm 0 (used else where) } + void authorization_manager::calculate_integrity_hash( fc::sha256::encoder& enc ) const { + authorization_index_set::walk_indices([this, &enc]( auto utils ){ + decltype(utils)::walk(_db, [&enc]( const auto &row ) { + fc::raw::pack(enc, row); + }); + }); + } + + const permission_object& authorization_manager::create_permission( account_name account, permission_name name, permission_id_type parent, diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f9c542b938a..c0d12b0fb3a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -339,7 +339,7 @@ struct controller_impl { db.undo(); } - ilog( "database initialized with hash: ${hash}", ("hash", calculate_db_hash())); + ilog( "database initialized with hash: ${hash}", ("hash", calculate_integrity_hash())); } @@ -371,19 +371,21 @@ struct controller_impl { }); } - - template - DigestType calculate_db_hash() { - - typename DigestType::encoder enc; + void calculate_integrity_hash( sha256::encoder& enc ) const { controller_index_set::walk_indices([this, &enc]( auto utils ){ decltype(utils)::walk(db, [&enc]( const auto &row ) { fc::raw::pack(enc, row); }); }); + }; + sha256 calculate_integrity_hash() const { + sha256::encoder enc; + calculate_integrity_hash(enc); + authorization.calculate_integrity_hash(enc); + resource_limits.calculate_integrity_hash(enc); return enc.result(); - }; + } /** * Sets fork database head to the genesis state. diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index e244475b996..798f3327dd7 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -27,6 +27,7 @@ namespace eosio { namespace chain { void add_indices(); void initialize_database(); + void calculate_integrity_hash( fc::sha256::encoder& enc ) const; const permission_object& create_permission( account_name account, permission_name name, diff --git a/libraries/chain/include/eosio/chain/permission_link_object.hpp b/libraries/chain/include/eosio/chain/permission_link_object.hpp index 627c1c1203b..9930b647ad8 100644 --- a/libraries/chain/include/eosio/chain/permission_link_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_link_object.hpp @@ -79,4 +79,4 @@ namespace eosio { namespace chain { CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_link_object, eosio::chain::permission_link_index) -FC_REFLECT(eosio::chain::permission_link_object, (id)(account)(code)(message_type)(required_permission)) +FC_REFLECT(eosio::chain::permission_link_object, (account)(code)(message_type)(required_permission)) diff --git a/libraries/chain/include/eosio/chain/permission_object.hpp b/libraries/chain/include/eosio/chain/permission_object.hpp index 16185390475..a9e572e404c 100644 --- a/libraries/chain/include/eosio/chain/permission_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_object.hpp @@ -110,8 +110,7 @@ namespace eosio { namespace chain { CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_object, eosio::chain::permission_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_usage_object, eosio::chain::permission_usage_index) -FC_REFLECT(chainbase::oid, (_id)) -FC_REFLECT(eosio::chain::permission_object, (id)(usage_id)(parent)(owner)(name)(last_updated)(auth)) +FC_REFLECT(eosio::chain::permission_object, (usage_id)(parent)(owner)(name)(last_updated)(auth)) FC_REFLECT(chainbase::oid, (_id)) FC_REFLECT(eosio::chain::permission_usage_object, (id)(last_used)) diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index 121c4608713..b5900e66ce5 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -43,6 +43,7 @@ namespace eosio { namespace chain { namespace resource_limits { void add_indices(); void initialize_database(); + void calculate_integrity_hash( fc::sha256::encoder& enc ) const; void initialize_account( const account_name& account ); void set_block_parameters( const elastic_limit_parameters& cpu_limit_parameters, const elastic_limit_parameters& net_limit_parameters ); @@ -80,3 +81,5 @@ namespace eosio { namespace chain { namespace resource_limits { } } } /// eosio::chain FC_REFLECT( eosio::chain::resource_limits::account_resource_limit, (used)(available)(max) ) +FC_REFLECT( eosio::chain::resource_limits::ratio, (numerator)(denominator)) +FC_REFLECT( eosio::chain::resource_limits::elastic_limit_parameters, (target)(max)(periods)(max_multiplier)(contract_rate)(expand_rate)) diff --git a/libraries/chain/include/eosio/chain/resource_limits_private.hpp b/libraries/chain/include/eosio/chain/resource_limits_private.hpp index 309387114e8..687a56a4d90 100644 --- a/libraries/chain/include/eosio/chain/resource_limits_private.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits_private.hpp @@ -264,3 +264,10 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_object, CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_usage_object, eosio::chain::resource_limits::resource_usage_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_config_object, eosio::chain::resource_limits::resource_limits_config_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::resource_limits::resource_limits_state_object, eosio::chain::resource_limits::resource_limits_state_index) + +FC_REFLECT(eosio::chain::resource_limits::usage_accumulator, (last_ordinal)(value_ex)(consumed)) + +FC_REFLECT(eosio::chain::resource_limits::resource_limits_object, (owner)(net_weight)(cpu_weight)(ram_bytes)) +FC_REFLECT(eosio::chain::resource_limits::resource_usage_object, (owner)(net_usage)(cpu_usage)(ram_usage)) +FC_REFLECT(eosio::chain::resource_limits::resource_limits_config_object, (cpu_limit_parameters)(net_limit_parameters)(account_cpu_usage_average_window)(account_net_usage_average_window)) +FC_REFLECT(eosio::chain::resource_limits::resource_limits_state_object, (average_block_net_usage)(average_block_cpu_usage)(pending_net_usage)(pending_cpu_usage)(total_net_weight)(total_cpu_weight)(total_ram_bytes)(virtual_net_limit)(virtual_cpu_limit)) \ No newline at end of file diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 4049252f8f8..1e498f3d3de 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -3,10 +3,18 @@ #include #include #include +#include #include namespace eosio { namespace chain { namespace resource_limits { +using resource_index_set = index_set< + resource_limits_index, + resource_usage_index, + resource_limits_state_index, + resource_limits_config_index +>; + static_assert( config::rate_limiting_precision > 0, "config::rate_limiting_precision must be positive" ); static uint64_t update_elastic_limit(uint64_t current_limit, uint64_t average_usage, const elastic_limit_parameters& params) { @@ -39,10 +47,7 @@ void resource_limits_state_object::update_virtual_net_limit( const resource_limi } void resource_limits_manager::add_indices() { - _db.add_index(); - _db.add_index(); - _db.add_index(); - _db.add_index(); + resource_index_set::add_indices(_db); } void resource_limits_manager::initialize_database() { @@ -59,6 +64,14 @@ void resource_limits_manager::initialize_database() { }); } +void resource_limits_manager::calculate_integrity_hash( fc::sha256::encoder& enc ) const { + resource_index_set::walk_indices([this, &enc]( auto utils ){ + decltype(utils)::walk(_db, [&enc]( const auto &row ) { + fc::raw::pack(enc, row); + }); + }); +} + void resource_limits_manager::initialize_account(const account_name& account) { _db.create([&]( resource_limits_object& bl ) { bl.owner = account; From a0681e676e1bdb256401ea0a831655507d5303c2 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Thu, 30 Aug 2018 10:46:30 -0400 Subject: [PATCH 003/161] add accessor for integrity hash --- libraries/chain/controller.cpp | 4 ++++ libraries/chain/include/eosio/chain/controller.hpp | 2 ++ 2 files changed, 6 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index c0d12b0fb3a..bea18a221ad 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1605,6 +1605,10 @@ block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try return signed_blk->id(); } FC_CAPTURE_AND_RETHROW( (block_num) ) } +sha256 controller::calculate_integrity_hash()const { try { + return my->calculate_integrity_hash(); +} FC_LOG_AND_RETHROW() } + void controller::pop_block() { my->pop_block(); } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index c457eb68cc9..f0486cf6000 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -199,6 +199,8 @@ namespace eosio { namespace chain { block_id_type get_block_id_for_num( uint32_t block_num )const; + sha256 calculate_integrity_hash()const; + void check_contract_list( account_name code )const; void check_action_list( account_name code, action_name action )const; void check_key_list( const public_key_type& key )const; From 3a066842033d9def01d87b3a7dc50c69a1468745 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 31 Aug 2018 13:46:45 -0400 Subject: [PATCH 004/161] initial plumbing for snapshot --- libraries/chain/controller.cpp | 41 ++++++++ .../eosio/chain/contract_table_objects.hpp | 18 +--- .../include/eosio/chain/database_utils.hpp | 97 ++++++++++++++++++- libraries/fc | 2 +- 4 files changed, 137 insertions(+), 21 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index bea18a221ad..4fbc15ee9bb 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -20,6 +20,9 @@ #include #include +#include +#include + #include #include @@ -387,6 +390,44 @@ struct controller_impl { return enc.result(); } + struct json_snapshot { + json_snapshot() + : snapshot(fc::mutable_variant_object()("sections", fc::variants())) + { + + } + + void start_section( const string& section_name ) { + snapshot["sections"].get_array().emplace_back(mutable_variant_object()("name", section_name)("rows", variants())); + + } + + void add_row( variant&& row ) { + fc::mutable_variant_object(snapshot["sections"].get_array().back())["rows"].get_array().emplace_back(row); + } + + void end_section( ) { + + } + + fc::mutable_variant_object snapshot; + }; + + void print_json_snapshot() const { + json_snapshot snapshot; + controller_index_set::walk_indices([this, &snapshot]( auto utils ){ + snapshot.start_section(boost::core::demangle(typeid(typename decltype(utils)::index_t).name())); + decltype(utils)::walk(db, [&snapshot]( const auto &row ) { + fc::variant vrow; + fc::to_variant(row, vrow); + snapshot.add_row(std::move(vrow)); + }); + snapshot.end_section(); + }); + + std::cerr << fc::json::to_pretty_string(snapshot.snapshot) << std::endl; + } + /** * Sets fork database head to the genesis state. */ diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index a38947fe59a..cbed0ceb46a 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -4,13 +4,9 @@ */ #pragma once +#include #include #include -#include - -#include - -#include #include #include @@ -221,18 +217,6 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_long_double_object, eosio::chain::i FC_REFLECT(eosio::chain::table_id_object, (id)(code)(scope)(table) ) FC_REFLECT(eosio::chain::key_value_object, (t_id)(primary_key)(value)(payer) ) -template -DataStream& operator << ( DataStream& ds, const float64_t& v ) { - fc::raw::pack(ds, *reinterpret_cast(&v)); - return ds; -} - -template -DataStream& operator << ( DataStream& ds, const float128_t& v ) { - fc::raw::pack(ds, *reinterpret_cast(&v)); - return ds; -} - #define REFLECT_SECONDARY(type)\ FC_REFLECT(type, (t_id)(primary_key)(payer)(secondary_key) ) diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index 096f2c86b85..c6253bdaa57 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -4,9 +4,9 @@ */ #pragma once -#include +#include #include - +#include namespace eosio { namespace chain { @@ -57,9 +57,100 @@ namespace eosio { namespace chain { } }; +} } + +namespace fc { + // overloads for to/from_variant + template + void to_variant( const chainbase::oid& oid, variant& v ) { + v = variant(oid._id); + } + + template + void from_variant( const variant& v, chainbase::oid& oid ) { + from_variant(v, oid._id); + } + + inline + void to_variant( const float64_t& f, variant& v ) { + v = variant(*reinterpret_cast(&f)); + } + + inline + void from_variant( const variant& v, float64_t& f ) { + from_variant(v, *reinterpret_cast(&f)); + } + + inline + void to_variant( const float128_t& f, variant& v ) { + v = variant(*reinterpret_cast(&f)); + } + + inline + void from_variant( const variant& v, float128_t& f ) { + from_variant(v, *reinterpret_cast(&f)); + } + + inline + void to_variant( const eosio::chain::shared_string& s, variant& v ) { + v = variant(s.c_str()); + } + + inline + void from_variant( const variant& v, eosio::chain::shared_string& s ) { + string _s; + from_variant(v, _s); + s = eosio::chain::shared_string(_s.begin(), _s.end(), s.get_allocator()); + } + + template + void to_variant( const eosio::chain::shared_vector& sv, variant& v ) { + to_variant(std::vector(sv.begin(), sv.end()), v); + } + + template + void from_variant( const variant& v, eosio::chain::shared_vector& sv ) { + std::vector _v; + from_variant(v, _v); + sv = eosio::chain::shared_vector(_v.begin(), _v.end(), sv.get_allocator()); + } + + // overloads for OID packing template DataStream& operator << ( DataStream& ds, const chainbase::oid& oid ) { fc::raw::pack(ds, oid._id); return ds; } -} } \ No newline at end of file + + template + DataStream& operator >> ( DataStream& ds, chainbase::oid& oid ) { + fc::raw::unpack(ds, oid._id); + return ds; + } + +// overloads for softfloat packing + template + DataStream& operator << ( DataStream& ds, const float64_t& v ) { + fc::raw::pack(ds, *reinterpret_cast(&v)); + return ds; + } + + template + DataStream& operator >> ( DataStream& ds, float64_t& v ) { + fc::raw::unpack(ds, *reinterpret_cast(&v)); + return ds; + } + + template + DataStream& operator << ( DataStream& ds, const float128_t& v ) { + fc::raw::pack(ds, *reinterpret_cast(&v)); + return ds; + } + + template + DataStream& operator >> ( DataStream& ds, float128_t& v ) { + fc::raw::unpack(ds, *reinterpret_cast(&v)); + return ds; + } + +} diff --git a/libraries/fc b/libraries/fc index adbf57a6e26..bc6e6b75de1 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit adbf57a6e26eee7bb4e2a5442d60e92bd15bf046 +Subproject commit bc6e6b75de1862c01c88e3d5c0e46db9122468ee From c7835789a1746f862266e53132e63ae736d15c7d Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 31 Aug 2018 14:18:51 -0400 Subject: [PATCH 005/161] fix some issues --- libraries/chain/controller.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 4fbc15ee9bb..70fc97011d4 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -398,25 +398,27 @@ struct controller_impl { } void start_section( const string& section_name ) { - snapshot["sections"].get_array().emplace_back(mutable_variant_object()("name", section_name)("rows", variants())); - + current_rows.clear(); + current_section_name = section_name; } void add_row( variant&& row ) { - fc::mutable_variant_object(snapshot["sections"].get_array().back())["rows"].get_array().emplace_back(row); + current_rows.emplace_back(row); } void end_section( ) { - + snapshot["sections"].get_array().emplace_back(mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); } fc::mutable_variant_object snapshot; + string current_section_name; + fc::variants current_rows; }; void print_json_snapshot() const { json_snapshot snapshot; controller_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.start_section(boost::core::demangle(typeid(typename decltype(utils)::index_t).name())); + snapshot.start_section(boost::core::demangle(typeid(typename decltype(utils)::index_t::value_type).name())); decltype(utils)::walk(db, [&snapshot]( const auto &row ) { fc::variant vrow; fc::to_variant(row, vrow); From edbcd13f2592d98c10cd255fa9deeef5be11d679 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 31 Aug 2018 16:02:15 -0400 Subject: [PATCH 006/161] move code, abi and packed trx to shared_blob which jsons to base 64 --- .../include/eosio/chain/account_object.hpp | 8 +++---- .../include/eosio/chain/database_utils.hpp | 24 ++++++++++++++++++- .../chain/generated_transaction_object.hpp | 4 ++-- libraries/chain/include/eosio/chain/types.hpp | 19 +++++++++++++++ 4 files changed, 48 insertions(+), 7 deletions(-) diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index 6fd13bf000e..8a945512b3c 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -3,7 +3,7 @@ * @copyright defined in eos/LICENSE.txt */ #pragma once -#include +#include #include #include #include @@ -25,8 +25,8 @@ namespace eosio { namespace chain { digest_type code_version; block_timestamp_type creation_date; - shared_string code; - shared_string abi; + shared_blob code; + shared_blob abi; void set_abi( const eosio::chain::abi_def& a ) { abi.resize( fc::raw::pack_size( a ) ); @@ -81,5 +81,5 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_object, eosio::chain::account_ind CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_sequence_object, eosio::chain::account_sequence_index) -FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(code_version)(code)(creation_date)) +FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(privileged)(last_code_update)(code_version)(creation_date)(code)(abi)) FC_REFLECT(eosio::chain::account_sequence_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index c6253bdaa57..9a855b9c73b 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -93,7 +93,7 @@ namespace fc { inline void to_variant( const eosio::chain::shared_string& s, variant& v ) { - v = variant(s.c_str()); + v = variant(std::string(s.begin(), s.end())); } inline @@ -103,6 +103,17 @@ namespace fc { s = eosio::chain::shared_string(_s.begin(), _s.end(), s.get_allocator()); } + inline + void to_variant( const eosio::chain::shared_blob& b, variant& v ) { + v = variant(base64_encode(b.data(), b.size())); + } + + inline + void from_variant( const variant& v, eosio::chain::shared_blob& b ) { + string _s = base64_decode(v.as_string()); + b = eosio::chain::shared_blob(_s.begin(), _s.end(), b.get_allocator()); + } + template void to_variant( const eosio::chain::shared_vector& sv, variant& v ) { to_variant(std::vector(sv.begin(), sv.end()), v); @@ -153,4 +164,15 @@ namespace fc { return ds; } + template + DataStream& operator << ( DataStream& ds, const eosio::chain::shared_blob& b ) { + fc::raw::pack(ds, static_cast(b)); + return ds; + } + + template + DataStream& operator >> ( DataStream& ds, eosio::chain::shared_blob& b ) { + fc::raw::unpack(ds, static_cast(b)); + return ds; + } } diff --git a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp index 061e76a4700..6d3e74dd558 100644 --- a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp @@ -3,7 +3,7 @@ * @copyright defined in eos/LICENSE.txt */ #pragma once -#include +#include #include #include @@ -34,7 +34,7 @@ namespace eosio { namespace chain { time_point delay_until; /// this generated transaction will not be applied until the specified time time_point expiration; /// this generated transaction will not be applied after this time time_point published; - shared_string packed_trx; + shared_blob packed_trx; uint32_t set( const transaction& trx ) { auto trxsize = fc::raw::pack_size( trx ); diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 2a9117a99fc..4610f24c891 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -96,6 +96,25 @@ namespace eosio { namespace chain { template using shared_set = boost::interprocess::set, allocator>; + /** + * For bugs in boost interprocess we moved our blob data to shared_string + * this wrapper allows us to continue that while also having a type-level distinction for + * serialization and to/from variant + */ + class shared_blob : public shared_string { + public: + shared_blob() = default; + + template + shared_blob(InputIterator f, InputIterator l, const allocator_type& a) + :shared_string(f,l,a) + {} + + shared_blob(const allocator_type& a) + :shared_string(a) + {} + }; + using action_name = name; using scope_name = name; using account_name = name; From 9e83384e2189e3a99e5331091908adb339958e79 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 31 Aug 2018 16:39:43 -0400 Subject: [PATCH 007/161] move kv values to shared_blob --- libraries/chain/include/eosio/chain/contract_table_objects.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index cbed0ceb46a..0555c28d3ac 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -62,7 +62,7 @@ namespace eosio { namespace chain { table_id t_id; uint64_t primary_key; account_name payer = 0; - shared_string value; + shared_blob value; }; using key_value_index = chainbase::shared_multi_index_container< From b145fa39a564f7592de3f9e4148c4a923a4c6e71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9sar=20Rodr=C3=ADguez?= Date: Wed, 5 Sep 2018 10:45:11 +0200 Subject: [PATCH 008/161] Update eosio_build.sh --- eosio_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eosio_build.sh b/eosio_build.sh index 59c76b0b54e..9290ea35e6c 100755 --- a/eosio_build.sh +++ b/eosio_build.sh @@ -258,7 +258,7 @@ -DCMAKE_C_COMPILER="${C_COMPILER}" -DWASM_ROOT="${WASM_ROOT}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ - -DCMAKE_INSTALL_PREFIX="/usr/local/eosio" "${SOURCE_DIR}" + -DCMAKE_INSTALL_PREFIX="/usr/local/eosio" ${LOCAL_CMAKE_FLAGS} "${SOURCE_DIR}" then printf "\\n\\t>>>>>>>>>>>>>>>>>>>> CMAKE building EOSIO has exited with the above error.\\n\\n" exit -1 From a8c7da070ae75af977d93caeebd3c069f262be6b Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 5 Sep 2018 16:20:10 -0400 Subject: [PATCH 009/161] add resource and authorization managers to snapshot --- libraries/chain/authorization_manager.cpp | 9 +++ libraries/chain/controller.cpp | 33 +++++------ .../eosio/chain/authorization_manager.hpp | 2 + .../include/eosio/chain/resource_limits.hpp | 3 + .../chain/include/eosio/chain/snapshot.hpp | 56 +++++++++++++++++++ libraries/chain/resource_limits.cpp | 10 ++++ 6 files changed, 97 insertions(+), 16 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/snapshot.hpp diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 7c1788666e0..cbf9adf8c57 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -42,6 +42,15 @@ namespace eosio { namespace chain { }); } + void authorization_manager::add_to_snapshot( abstract_snapshot_writer& snapshot ) const { + authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ + snapshot.start_section(); + decltype(utils)::walk(_db, [&snapshot]( const auto &row ) { + snapshot.add_row(row); + }); + snapshot.end_section(); + }); + } const permission_object& authorization_manager::create_permission( account_name account, permission_name name, diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 70fc97011d4..2530d184235 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -21,11 +21,11 @@ #include #include -#include #include #include +#include namespace eosio { namespace chain { @@ -374,39 +374,36 @@ struct controller_impl { }); } - void calculate_integrity_hash( sha256::encoder& enc ) const { + sha256 calculate_integrity_hash() const { + sha256::encoder enc; controller_index_set::walk_indices([this, &enc]( auto utils ){ decltype(utils)::walk(db, [&enc]( const auto &row ) { fc::raw::pack(enc, row); }); }); - }; - sha256 calculate_integrity_hash() const { - sha256::encoder enc; - calculate_integrity_hash(enc); authorization.calculate_integrity_hash(enc); resource_limits.calculate_integrity_hash(enc); return enc.result(); } - struct json_snapshot { + struct json_snapshot : public abstract_snapshot_writer { json_snapshot() : snapshot(fc::mutable_variant_object()("sections", fc::variants())) { } - void start_section( const string& section_name ) { + void start_named_section( const string& section_name ) { current_rows.clear(); current_section_name = section_name; } - void add_row( variant&& row ) { + void add_variant_row( variant&& row ) { current_rows.emplace_back(row); } - void end_section( ) { + void end_named_section( ) { snapshot["sections"].get_array().emplace_back(mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); } @@ -415,18 +412,22 @@ struct controller_impl { fc::variants current_rows; }; - void print_json_snapshot() const { - json_snapshot snapshot; + void add_to_snapshot( abstract_snapshot_writer& snapshot ) const { controller_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.start_section(boost::core::demangle(typeid(typename decltype(utils)::index_t::value_type).name())); + snapshot.start_section(); decltype(utils)::walk(db, [&snapshot]( const auto &row ) { - fc::variant vrow; - fc::to_variant(row, vrow); - snapshot.add_row(std::move(vrow)); + snapshot.add_row(row); }); snapshot.end_section(); }); + authorization.add_to_snapshot(snapshot); + resource_limits.add_to_snapshot(snapshot); + } + + void print_json_snapshot() const { + json_snapshot snapshot; + add_to_snapshot(snapshot); std::cerr << fc::json::to_pretty_string(snapshot.snapshot) << std::endl; } diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index 798f3327dd7..7a31eeacc35 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -28,6 +29,7 @@ namespace eosio { namespace chain { void add_indices(); void initialize_database(); void calculate_integrity_hash( fc::sha256::encoder& enc ) const; + void add_to_snapshot( abstract_snapshot_writer& snapshot ) const; const permission_object& create_permission( account_name account, permission_name name, diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index b5900e66ce5..c16fb0f5d82 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -1,6 +1,7 @@ #pragma once #include #include +#include #include #include @@ -44,6 +45,8 @@ namespace eosio { namespace chain { namespace resource_limits { void add_indices(); void initialize_database(); void calculate_integrity_hash( fc::sha256::encoder& enc ) const; + void add_to_snapshot( abstract_snapshot_writer& snapshot ) const; + void initialize_account( const account_name& account ); void set_block_parameters( const elastic_limit_parameters& cpu_limit_parameters, const elastic_limit_parameters& net_limit_parameters ); diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp new file mode 100644 index 00000000000..ffa59490e5e --- /dev/null +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -0,0 +1,56 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once + +#include +#include + + +namespace eosio { namespace chain { + + template + struct snapshot_section_traits { + static std::string section_name() { + return boost::core::demangle(typeid(T).name()); + } + }; + + + template + struct snapshot_row_traits { + using row_type = T; + using value_type = const T&; + }; + + template + auto to_snapshot_row( const T& value ) -> typename snapshot_row_traits::value_type { + return value; + }; + + class abstract_snapshot_writer { + public: + template + void start_section() { + start_named_section(snapshot_section_traits::section_name()); + } + + template + void add_row( const T& row ) { + fc::variant vrow; + fc::to_variant(to_snapshot_row(row), vrow); + add_variant_row(std::move(vrow)); + } + + void end_section( ) { + end_named_section(); + } + + protected: + virtual void start_named_section( const std::string& section_name ) = 0; + virtual void add_variant_row( fc::variant&& row ) = 0; + virtual void end_named_section() = 0; + }; + +}} diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 1e498f3d3de..b643295b064 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -72,6 +72,16 @@ void resource_limits_manager::calculate_integrity_hash( fc::sha256::encoder& enc }); } +void resource_limits_manager::add_to_snapshot( abstract_snapshot_writer& snapshot ) const { + resource_index_set::walk_indices([this, &snapshot]( auto utils ){ + snapshot.start_section(); + decltype(utils)::walk(_db, [&snapshot]( const auto &row ) { + snapshot.add_row(row); + }); + snapshot.end_section(); + }); +} + void resource_limits_manager::initialize_account(const account_name& account) { _db.create([&]( resource_limits_object& bl ) { bl.owner = account; From 2800c2e19dd1f22f08cddfab251b4e6f8da9bbd9 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Mon, 10 Sep 2018 14:16:50 -0400 Subject: [PATCH 010/161] allow sink to decide between variant or fc::raw::packed forms --- libraries/chain/authorization_manager.cpp | 2 +- libraries/chain/controller.cpp | 12 ++--- .../eosio/chain/authorization_manager.hpp | 2 +- .../include/eosio/chain/database_utils.hpp | 52 ++++++++++--------- .../include/eosio/chain/resource_limits.hpp | 2 +- .../chain/include/eosio/chain/snapshot.hpp | 48 +++++++++++++---- libraries/chain/resource_limits.cpp | 2 +- 7 files changed, 76 insertions(+), 44 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index cbf9adf8c57..79442fa7bb1 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -42,7 +42,7 @@ namespace eosio { namespace chain { }); } - void authorization_manager::add_to_snapshot( abstract_snapshot_writer& snapshot ) const { + void authorization_manager::add_to_snapshot( snapshot_writer& snapshot ) const { authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ snapshot.start_section(); decltype(utils)::walk(_db, [&snapshot]( const auto &row ) { diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2530d184235..0ba2affffbe 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -387,23 +387,23 @@ struct controller_impl { return enc.result(); } - struct json_snapshot : public abstract_snapshot_writer { + struct json_snapshot : public snapshot_writer { json_snapshot() : snapshot(fc::mutable_variant_object()("sections", fc::variants())) { } - void start_named_section( const string& section_name ) { + void write_section( const string& section_name ) override { current_rows.clear(); current_section_name = section_name; } - void add_variant_row( variant&& row ) { - current_rows.emplace_back(row); + void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override { + current_rows.emplace_back(row_writer.to_variant()); } - void end_named_section( ) { + void write_end_section( ) override { snapshot["sections"].get_array().emplace_back(mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); } @@ -412,7 +412,7 @@ struct controller_impl { fc::variants current_rows; }; - void add_to_snapshot( abstract_snapshot_writer& snapshot ) const { + void add_to_snapshot( snapshot_writer& snapshot ) const { controller_index_set::walk_indices([this, &snapshot]( auto utils ){ snapshot.start_section(); decltype(utils)::walk(db, [&snapshot]( const auto &row ) { diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index 7a31eeacc35..c5c5d669015 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -29,7 +29,7 @@ namespace eosio { namespace chain { void add_indices(); void initialize_database(); void calculate_integrity_hash( fc::sha256::encoder& enc ) const; - void add_to_snapshot( abstract_snapshot_writer& snapshot ) const; + void add_to_snapshot( snapshot_writer& snapshot ) const; const permission_object& create_permission( account_name account, permission_name name, diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index 9a855b9c73b..262a1ad3b9f 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -60,6 +60,7 @@ namespace eosio { namespace chain { } } namespace fc { + // overloads for to/from_variant template void to_variant( const chainbase::oid& oid, variant& v ) { @@ -139,31 +140,6 @@ namespace fc { return ds; } -// overloads for softfloat packing - template - DataStream& operator << ( DataStream& ds, const float64_t& v ) { - fc::raw::pack(ds, *reinterpret_cast(&v)); - return ds; - } - - template - DataStream& operator >> ( DataStream& ds, float64_t& v ) { - fc::raw::unpack(ds, *reinterpret_cast(&v)); - return ds; - } - - template - DataStream& operator << ( DataStream& ds, const float128_t& v ) { - fc::raw::pack(ds, *reinterpret_cast(&v)); - return ds; - } - - template - DataStream& operator >> ( DataStream& ds, float128_t& v ) { - fc::raw::unpack(ds, *reinterpret_cast(&v)); - return ds; - } - template DataStream& operator << ( DataStream& ds, const eosio::chain::shared_blob& b ) { fc::raw::pack(ds, static_cast(b)); @@ -176,3 +152,29 @@ namespace fc { return ds; } } + +// overloads for softfloat packing +template +DataStream& operator << ( DataStream& ds, const float64_t& v ) { + fc::raw::pack(ds, *reinterpret_cast(&v)); + return ds; +} + +template +DataStream& operator >> ( DataStream& ds, float64_t& v ) { + fc::raw::unpack(ds, *reinterpret_cast(&v)); + return ds; +} + +template +DataStream& operator << ( DataStream& ds, const float128_t& v ) { + fc::raw::pack(ds, *reinterpret_cast(&v)); + return ds; +} + +template +DataStream& operator >> ( DataStream& ds, float128_t& v ) { + fc::raw::unpack(ds, *reinterpret_cast(&v)); + return ds; +} + diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index c16fb0f5d82..0631711fc64 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -45,7 +45,7 @@ namespace eosio { namespace chain { namespace resource_limits { void add_indices(); void initialize_database(); void calculate_integrity_hash( fc::sha256::encoder& enc ) const; - void add_to_snapshot( abstract_snapshot_writer& snapshot ) const; + void add_to_snapshot( snapshot_writer& snapshot ) const; void initialize_account( const account_name& account ); void set_block_parameters( const elastic_limit_parameters& cpu_limit_parameters, const elastic_limit_parameters& net_limit_parameters ); diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index ffa59490e5e..36b24a4dfc7 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -4,8 +4,10 @@ */ #pragma once +#include #include #include +#include namespace eosio { namespace chain { @@ -29,28 +31,56 @@ namespace eosio { namespace chain { return value; }; - class abstract_snapshot_writer { + namespace detail { + struct abstract_snapshot_row_writer { + virtual void write(std::ostream& out) const = 0; + virtual variant to_variant() const = 0; + + }; + template + struct snapshot_row_writer : abstract_snapshot_row_writer { + explicit snapshot_row_writer( const T& data) + :data(data) {} + + void write(std::ostream& out) const override { + fc::raw::pack(out, data); + } + + fc::variant to_variant() const override { + variant var; + fc::to_variant(data, var); + return var; + } + + const T& data; + }; + + template + snapshot_row_writer make_row_writer( const T& data) { + return snapshot_row_writer(data); + } + } + + class snapshot_writer { public: template void start_section() { - start_named_section(snapshot_section_traits::section_name()); + write_section(snapshot_section_traits::section_name()); } template void add_row( const T& row ) { - fc::variant vrow; - fc::to_variant(to_snapshot_row(row), vrow); - add_variant_row(std::move(vrow)); + write_row(detail::make_row_writer(to_snapshot_row(row))); } void end_section( ) { - end_named_section(); + write_end_section(); } protected: - virtual void start_named_section( const std::string& section_name ) = 0; - virtual void add_variant_row( fc::variant&& row ) = 0; - virtual void end_named_section() = 0; + virtual void write_section( const std::string& section_name ) = 0; + virtual void write_row( const detail::abstract_snapshot_row_writer& row_writer ) = 0; + virtual void write_end_section() = 0; }; }} diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index b643295b064..e67bea44a6c 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -72,7 +72,7 @@ void resource_limits_manager::calculate_integrity_hash( fc::sha256::encoder& enc }); } -void resource_limits_manager::add_to_snapshot( abstract_snapshot_writer& snapshot ) const { +void resource_limits_manager::add_to_snapshot( snapshot_writer& snapshot ) const { resource_index_set::walk_indices([this, &snapshot]( auto utils ){ snapshot.start_section(); decltype(utils)::walk(_db, [&snapshot]( const auto &row ) { From 53180417dca81492dfb08b8fc9e80e60132b7c4d Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Mon, 10 Sep 2018 16:53:51 -0400 Subject: [PATCH 011/161] add writer interface and prototype to JSON --- libraries/chain/authorization_manager.cpp | 21 ++- libraries/chain/controller.cpp | 71 +++++++++-- .../eosio/chain/authorization_manager.hpp | 1 + .../include/eosio/chain/database_utils.hpp | 31 +++-- .../include/eosio/chain/resource_limits.hpp | 1 + .../chain/include/eosio/chain/snapshot.hpp | 120 ++++++++++++++++-- libraries/chain/resource_limits.cpp | 21 ++- 7 files changed, 224 insertions(+), 42 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 79442fa7bb1..7cbf3ff07da 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -44,11 +44,24 @@ namespace eosio { namespace chain { void authorization_manager::add_to_snapshot( snapshot_writer& snapshot ) const { authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.start_section(); - decltype(utils)::walk(_db, [&snapshot]( const auto &row ) { - snapshot.add_row(row); + snapshot.write_section([this]( auto& section ){ + decltype(utils)::walk(_db, [§ion]( const auto &row ) { + section.add_row(row); + }); + }); + }); + } + + void authorization_manager::read_from_snapshot( snapshot_reader& snapshot ) { + authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ + snapshot.read_section([this]( auto& section ) { + bool done = section.empty(); + while(!done) { + decltype(utils)::create(_db, [§ion]( auto &row ) { + section.read_row(row); + }); + } }); - snapshot.end_section(); }); } diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 0ba2affffbe..52ac5f7f09e 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -387,14 +387,14 @@ struct controller_impl { return enc.result(); } - struct json_snapshot : public snapshot_writer { - json_snapshot() + struct json_snapshot_writer : public snapshot_writer { + json_snapshot_writer() : snapshot(fc::mutable_variant_object()("sections", fc::variants())) { } - void write_section( const string& section_name ) override { + void write_start_section( const string& section_name ) override { current_rows.clear(); current_section_name = section_name; } @@ -412,13 +412,52 @@ struct controller_impl { fc::variants current_rows; }; + struct json_snapshot_reader : public snapshot_reader { + json_snapshot_reader(const fc::variant& snapshot) + :snapshot(snapshot) + ,cur_row(0) + { + + } + + void set_section( const string& section_name ) override { + const auto& sections = snapshot["sections"].get_array(); + for( const auto& section: sections ) { + if (section["name"].as_string() == section_name) { + cur_section = §ion.get_object(); + break; + } + } + } + + bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override { + const auto& rows = (*cur_section)["rows"].get_array(); + row_reader.provide(rows.at(cur_row++)); + return cur_row < rows.size(); + } + + bool empty ( ) override { + const auto& rows = (*cur_section)["rows"].get_array(); + return rows.empty(); + } + + void clear_section() override { + cur_section = nullptr; + cur_row = 0; + } + + const fc::variant& snapshot; + const fc::variant_object* cur_section; + int cur_row; + }; + void add_to_snapshot( snapshot_writer& snapshot ) const { controller_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.start_section(); - decltype(utils)::walk(db, [&snapshot]( const auto &row ) { - snapshot.add_row(row); + snapshot.write_section([this]( auto& section ){ + decltype(utils)::walk(db, [§ion]( const auto &row ) { + section.add_row(row); + }); }); - snapshot.end_section(); }); authorization.add_to_snapshot(snapshot); @@ -426,11 +465,27 @@ struct controller_impl { } void print_json_snapshot() const { - json_snapshot snapshot; + json_snapshot_writer snapshot; add_to_snapshot(snapshot); std::cerr << fc::json::to_pretty_string(snapshot.snapshot) << std::endl; } + void read_from_snapshot( snapshot_reader& snapshot ) { + controller_index_set::walk_indices([this, &snapshot]( auto utils ){ + snapshot.read_section([this]( auto& section ) { + bool done = section.empty(); + while(!done) { + decltype(utils)::create(db, [§ion]( auto &row ) { + section.read_row(row); + }); + } + }); + }); + + authorization.read_from_snapshot(snapshot); + resource_limits.read_from_snapshot(snapshot); + } + /** * Sets fork database head to the genesis state. */ diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index c5c5d669015..6230db2fc19 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -30,6 +30,7 @@ namespace eosio { namespace chain { void initialize_database(); void calculate_integrity_hash( fc::sha256::encoder& enc ) const; void add_to_snapshot( snapshot_writer& snapshot ) const; + void read_from_snapshot( snapshot_reader& snapshot ); const permission_object& create_permission( account_name account, permission_name name, diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index 262a1ad3b9f..de4c1a96af3 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -27,6 +27,11 @@ namespace eosio { namespace chain { function(*itr); } } + + template + static void create( chainbase::database& db, F cons ) { + db.create(cons); + } }; template @@ -127,6 +132,20 @@ namespace fc { sv = eosio::chain::shared_vector(_v.begin(), _v.end(), sv.get_allocator()); } + template + DataStream& operator << ( DataStream& ds, const eosio::chain::shared_blob& b ) { + fc::raw::pack(ds, static_cast(b)); + return ds; + } + + template + DataStream& operator >> ( DataStream& ds, eosio::chain::shared_blob& b ) { + fc::raw::unpack(ds, static_cast(b)); + return ds; + } +} + +namespace chainbase { // overloads for OID packing template DataStream& operator << ( DataStream& ds, const chainbase::oid& oid ) { @@ -139,18 +158,6 @@ namespace fc { fc::raw::unpack(ds, oid._id); return ds; } - - template - DataStream& operator << ( DataStream& ds, const eosio::chain::shared_blob& b ) { - fc::raw::pack(ds, static_cast(b)); - return ds; - } - - template - DataStream& operator >> ( DataStream& ds, eosio::chain::shared_blob& b ) { - fc::raw::unpack(ds, static_cast(b)); - return ds; - } } // overloads for softfloat packing diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index 0631711fc64..7dc2deb07d2 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -46,6 +46,7 @@ namespace eosio { namespace chain { namespace resource_limits { void initialize_database(); void calculate_integrity_hash( fc::sha256::encoder& enc ) const; void add_to_snapshot( snapshot_writer& snapshot ) const; + void read_from_snapshot( snapshot_reader& snapshot ); void initialize_account( const account_name& account ); void set_block_parameters( const elastic_limit_parameters& cpu_limit_parameters, const elastic_limit_parameters& net_limit_parameters ); diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 36b24a4dfc7..47d0f31b734 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -22,8 +22,8 @@ namespace eosio { namespace chain { template struct snapshot_row_traits { - using row_type = T; - using value_type = const T&; + using row_type = std::decay_t; + using value_type = const row_type&; }; template @@ -31,12 +31,17 @@ namespace eosio { namespace chain { return value; }; + template + auto from_snapshot_row( typename snapshot_row_traits::value_type&& row, T& value ) { + value = row; + } + namespace detail { struct abstract_snapshot_row_writer { virtual void write(std::ostream& out) const = 0; virtual variant to_variant() const = 0; - }; + template struct snapshot_row_writer : abstract_snapshot_row_writer { explicit snapshot_row_writer( const T& data) @@ -63,24 +68,111 @@ namespace eosio { namespace chain { class snapshot_writer { public: - template - void start_section() { - write_section(snapshot_section_traits::section_name()); + class section_writer { + public: + template + void add_row( const T& row ) { + _writer.write_row(detail::make_row_writer(to_snapshot_row(row))); + } + + private: + friend class snapshot_writer; + section_writer(snapshot_writer& writer) + :_writer(writer) + { + + } + snapshot_writer& _writer; + }; + + template + void write_section(F f) { + write_start_section(snapshot_section_traits::section_name()); + auto section = section_writer(*this); + f(section); + write_end_section(); } - template - void add_row( const T& row ) { - write_row(detail::make_row_writer(to_snapshot_row(row))); + protected: + virtual void write_start_section( const std::string& section_name ) = 0; + virtual void write_row( const detail::abstract_snapshot_row_writer& row_writer ) = 0; + virtual void write_end_section() = 0; + }; + + namespace detail { + struct abstract_snapshot_row_reader { + virtual void provide(std::istream& in) const = 0; + virtual void provide(const fc::variant&) const = 0; + }; + + template + struct snapshot_row_reader : abstract_snapshot_row_reader { + explicit snapshot_row_reader( T& data ) + :data(data) {} + + void provide(std::istream& in) const override { + fc::raw::unpack(in, data); } - void end_section( ) { - write_end_section(); + void provide(const fc::variant& var) const override { + fc::from_variant(var, data); } + T& data; + }; + + template + snapshot_row_reader make_row_reader( T& data ) { + return snapshot_row_reader(data); + } + } + + class snapshot_reader { + public: + class section_reader { + public: + template + auto read_row( T& out ) -> std::enable_if_t, typename snapshot_row_traits::row_type>::value,bool> { + auto reader = detail::make_row_reader(out); + return _reader.read_row(reader); + } + + template + auto read_row( T& out ) -> std::enable_if_t, typename snapshot_row_traits::row_type>::value,bool> { + auto temp = typename snapshot_row_traits::row_type(); + auto reader = detail::make_row_reader(temp); + bool result = _reader.read_row(reader); + from_snapshot_row(std::move(temp), out); + return result; + } + + bool empty() { + return _reader.empty(); + } + + private: + friend class snapshot_reader; + section_reader(snapshot_reader& _reader) + :_reader(_reader) + {} + + snapshot_reader& _reader; + + }; + + template + void read_section(F f) { + set_section(snapshot_section_traits::section_name()); + auto section = section_reader(*this); + f(section); + clear_section(); + } + protected: - virtual void write_section( const std::string& section_name ) = 0; - virtual void write_row( const detail::abstract_snapshot_row_writer& row_writer ) = 0; - virtual void write_end_section() = 0; + virtual void set_section( const std::string& section_name ) = 0; + virtual bool read_row( detail::abstract_snapshot_row_reader& row_reader ) = 0; + virtual bool empty( ) = 0; + virtual void clear_section() = 0; }; }} diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index e67bea44a6c..363f23c1150 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -74,11 +74,24 @@ void resource_limits_manager::calculate_integrity_hash( fc::sha256::encoder& enc void resource_limits_manager::add_to_snapshot( snapshot_writer& snapshot ) const { resource_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.start_section(); - decltype(utils)::walk(_db, [&snapshot]( const auto &row ) { - snapshot.add_row(row); + snapshot.write_section([this]( auto& section ){ + decltype(utils)::walk(_db, [§ion]( const auto &row ) { + section.add_row(row); + }); + }); + }); +} + +void resource_limits_manager::read_from_snapshot( snapshot_reader& snapshot ) { + resource_index_set::walk_indices([this, &snapshot]( auto utils ){ + snapshot.read_section([this]( auto& section ) { + bool done = section.empty(); + while(!done) { + decltype(utils)::create(_db, [§ion]( auto &row ) { + section.read_row(row); + }); + } }); - snapshot.end_section(); }); } From e2ec9f9dc48e738b2e07de0e55a7f2ca411da561 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Mon, 10 Sep 2018 18:07:10 -0400 Subject: [PATCH 012/161] add signals for plugins --- libraries/chain/controller.cpp | 6 +++++- libraries/chain/include/eosio/chain/controller.hpp | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 52ac5f7f09e..3718604d026 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -214,7 +214,7 @@ struct controller_impl { * */ template - void emit( const Signal& s, Arg&& a ) { + void emit( const Signal& s, Arg&& a ) const { try { s(std::forward(a)); } catch (boost::interprocess::bad_alloc& e) { @@ -462,6 +462,8 @@ struct controller_impl { authorization.add_to_snapshot(snapshot); resource_limits.add_to_snapshot(snapshot); + + emit(self.write_snapshot, snapshot); } void print_json_snapshot() const { @@ -484,6 +486,8 @@ struct controller_impl { authorization.read_from_snapshot(snapshot); resource_limits.read_from_snapshot(snapshot); + + emit(self.read_snapshot, snapshot); } /** diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index f0486cf6000..8e05f5e60d7 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -15,6 +15,8 @@ namespace chainbase { namespace eosio { namespace chain { class authorization_manager; + class snapshot_reader; + class snapshot_writer; namespace resource_limits { class resource_limits_manager; @@ -254,6 +256,9 @@ namespace eosio { namespace chain { signal post_apply_action; */ + signal write_snapshot; + signal read_snapshot; + const apply_handler* find_apply_handler( account_name contract, scope_name scope, action_name act )const; wasm_interface& get_wasm_interface(); From 89cdf6217ae6200bf80ae8d05411cbcd035386e9 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 12 Sep 2018 13:47:34 -0400 Subject: [PATCH 013/161] Revert "add signals for plugins" This reverts commit e2ec9f9dc48e738b2e07de0e55a7f2ca411da561. --- libraries/chain/controller.cpp | 6 +----- libraries/chain/include/eosio/chain/controller.hpp | 5 ----- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 3718604d026..52ac5f7f09e 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -214,7 +214,7 @@ struct controller_impl { * */ template - void emit( const Signal& s, Arg&& a ) const { + void emit( const Signal& s, Arg&& a ) { try { s(std::forward(a)); } catch (boost::interprocess::bad_alloc& e) { @@ -462,8 +462,6 @@ struct controller_impl { authorization.add_to_snapshot(snapshot); resource_limits.add_to_snapshot(snapshot); - - emit(self.write_snapshot, snapshot); } void print_json_snapshot() const { @@ -486,8 +484,6 @@ struct controller_impl { authorization.read_from_snapshot(snapshot); resource_limits.read_from_snapshot(snapshot); - - emit(self.read_snapshot, snapshot); } /** diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 8e05f5e60d7..f0486cf6000 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -15,8 +15,6 @@ namespace chainbase { namespace eosio { namespace chain { class authorization_manager; - class snapshot_reader; - class snapshot_writer; namespace resource_limits { class resource_limits_manager; @@ -256,9 +254,6 @@ namespace eosio { namespace chain { signal post_apply_action; */ - signal write_snapshot; - signal read_snapshot; - const apply_handler* find_apply_handler( account_name contract, scope_name scope, action_name act )const; wasm_interface& get_wasm_interface(); From 94510b699bb507dcfccffb9393cc7312939c08a7 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 12 Sep 2018 16:44:06 -0400 Subject: [PATCH 014/161] plumbing for loading/restoring from snapshot --- libraries/chain/authorization_manager.cpp | 8 +-- libraries/chain/controller.cpp | 57 ++++++++++++------- .../eosio/chain/authorization_manager.hpp | 4 +- .../chain/include/eosio/chain/controller.hpp | 3 +- .../include/eosio/chain/resource_limits.hpp | 4 +- .../chain/include/eosio/chain/snapshot.hpp | 9 +-- libraries/chain/resource_limits.cpp | 8 +-- 7 files changed, 55 insertions(+), 38 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 7cbf3ff07da..206b074cc43 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -42,9 +42,9 @@ namespace eosio { namespace chain { }); } - void authorization_manager::add_to_snapshot( snapshot_writer& snapshot ) const { + void authorization_manager::add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.write_section([this]( auto& section ){ + snapshot->write_section([this]( auto& section ){ decltype(utils)::walk(_db, [§ion]( const auto &row ) { section.add_row(row); }); @@ -52,9 +52,9 @@ namespace eosio { namespace chain { }); } - void authorization_manager::read_from_snapshot( snapshot_reader& snapshot ) { + void authorization_manager::read_from_snapshot( const snapshot_reader_ptr& snapshot ) { authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.read_section([this]( auto& section ) { + snapshot->read_section([this]( auto& section ) { bool done = section.empty(); while(!done) { decltype(utils)::create(_db, [§ion]( auto &row ) { diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 52ac5f7f09e..1f4f448350a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -24,9 +24,6 @@ #include -#include -#include - namespace eosio { namespace chain { using resource_limits::resource_limits_manager; @@ -149,6 +146,7 @@ struct controller_impl { } if ( read_mode == db_read_mode::SPECULATIVE ) { + EOS_ASSERT( head->block && head->block->transactions.size() == head->trxs.size(), block_validate_exception, "attempting to pop a block that was sparsely loaded from a snapshot"); for( const auto& t : head->trxs ) unapplied_transactions[t->signed_id] = t; } @@ -266,15 +264,14 @@ struct controller_impl { emit( self.irreversible_block, s ); } - void init() { + void init(const snapshot_reader_ptr& snapshot) { - /** - * The fork database needs an initial block_state to be set before - * it can accept any new blocks. This initial block state can be found - * in the database (whose head block state should be irreversible) or - * it would be the genesis state. - */ - if( !head ) { + if (snapshot) { + EOS_ASSERT(!head, fork_database_exception, ""); + + read_from_snapshot(snapshot); + + } else if( !head ) { initialize_fork_db(); // set head to genesis state auto end = blog.read_head(); @@ -325,7 +322,7 @@ struct controller_impl { ("head",head->block_num)("unconfimed", objitr->blocknum) ); } else { auto end = blog.read_head(); - EOS_ASSERT( end && end->block_num() == head->block_num, fork_database_exception, + EOS_ASSERT( !end || end->block_num() == head->block_num, fork_database_exception, "fork database exists but reversible block database does not, replay blockchain", ("blog_head",end->block_num())("head",head->block_num) ); } @@ -451,9 +448,13 @@ struct controller_impl { int cur_row; }; - void add_to_snapshot( snapshot_writer& snapshot ) const { + void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { + snapshot->write_section([this]( auto §ion ){ + section.template add_row(*fork_db.head()); + }); + controller_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.write_section([this]( auto& section ){ + snapshot->write_section([this]( auto& section ){ decltype(utils)::walk(db, [§ion]( const auto &row ) { section.add_row(row); }); @@ -466,13 +467,25 @@ struct controller_impl { void print_json_snapshot() const { json_snapshot_writer snapshot; - add_to_snapshot(snapshot); + auto snapshot_ptr = shared_ptr(&snapshot, [](snapshot_writer *) {}); + add_to_snapshot(snapshot_ptr); std::cerr << fc::json::to_pretty_string(snapshot.snapshot) << std::endl; } - void read_from_snapshot( snapshot_reader& snapshot ) { + void read_from_snapshot( const snapshot_reader_ptr& snapshot ) { + snapshot->read_section([this]( auto §ion ){ + block_header_state head_header_state; + section.read_row(head_header_state); + + auto head_state = std::make_shared(head_header_state); + fork_db.set(head_state); + fork_db.set_validity(head_state, true); + fork_db.mark_in_current_chain(head_state, true); + head = head_state; + }); + controller_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.read_section([this]( auto& section ) { + snapshot->read_section([this]( auto& section ) { bool done = section.empty(); while(!done) { decltype(utils)::create(db, [§ion]( auto &row ) { @@ -484,6 +497,8 @@ struct controller_impl { authorization.read_from_snapshot(snapshot); resource_limits.read_from_snapshot(snapshot); + + db.set_revision( head->block_num ); } /** @@ -1490,7 +1505,7 @@ controller::~controller() { } -void controller::startup() { +void controller::startup( const snapshot_reader_ptr& snapshot ) { // ilog( "${c}", ("c",fc::json::to_pretty_string(cfg)) ); my->add_indices(); @@ -1499,7 +1514,7 @@ void controller::startup() { if( !my->head ) { elog( "No head block in fork db, perhaps we need to replay" ); } - my->init(); + my->init(snapshot); } chainbase::database& controller::db()const { return my->db; } @@ -1665,7 +1680,7 @@ const global_property_object& controller::get_global_properties()const { signed_block_ptr controller::fetch_block_by_id( block_id_type id )const { auto state = my->fork_db.get_block(id); - if( state ) return state->block; + if( state && state->block ) return state->block; auto bptr = fetch_block_by_number( block_header::num_from_id(id) ); if( bptr && bptr->id() == id ) return bptr; return signed_block_ptr(); @@ -1673,7 +1688,7 @@ signed_block_ptr controller::fetch_block_by_id( block_id_type id )const { signed_block_ptr controller::fetch_block_by_number( uint32_t block_num )const { try { auto blk_state = my->fork_db.get_block_in_current_chain_by_num( block_num ); - if( blk_state ) { + if( blk_state && blk_state->block ) { return blk_state->block; } diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index 6230db2fc19..52f211de374 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -29,8 +29,8 @@ namespace eosio { namespace chain { void add_indices(); void initialize_database(); void calculate_integrity_hash( fc::sha256::encoder& enc ) const; - void add_to_snapshot( snapshot_writer& snapshot ) const; - void read_from_snapshot( snapshot_reader& snapshot ); + void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const; + void read_from_snapshot( const snapshot_reader_ptr& snapshot ); const permission_object& create_permission( account_name account, permission_name name, diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index f0486cf6000..7146c2e9a14 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -6,6 +6,7 @@ #include #include +#include namespace chainbase { class database; @@ -85,7 +86,7 @@ namespace eosio { namespace chain { controller( const config& cfg ); ~controller(); - void startup(); + void startup( const snapshot_reader_ptr& snapshot = nullptr ); /** * Starts a new pending block session upon which new transactions can diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index 7dc2deb07d2..b7fb7b24c29 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -45,8 +45,8 @@ namespace eosio { namespace chain { namespace resource_limits { void add_indices(); void initialize_database(); void calculate_integrity_hash( fc::sha256::encoder& enc ) const; - void add_to_snapshot( snapshot_writer& snapshot ) const; - void read_from_snapshot( snapshot_reader& snapshot ); + void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const; + void read_from_snapshot( const snapshot_reader_ptr& snapshot ); void initialize_account( const account_name& account ); void set_block_parameters( const elastic_limit_parameters& cpu_limit_parameters, const elastic_limit_parameters& net_limit_parameters ); diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 47d0f31b734..2904dae50be 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -9,9 +9,7 @@ #include #include - namespace eosio { namespace chain { - template struct snapshot_section_traits { static std::string section_name() { @@ -19,7 +17,6 @@ namespace eosio { namespace chain { } }; - template struct snapshot_row_traits { using row_type = std::decay_t; @@ -44,7 +41,7 @@ namespace eosio { namespace chain { template struct snapshot_row_writer : abstract_snapshot_row_writer { - explicit snapshot_row_writer( const T& data) + explicit snapshot_row_writer( const T& data ) :data(data) {} void write(std::ostream& out) const override { @@ -99,6 +96,8 @@ namespace eosio { namespace chain { virtual void write_end_section() = 0; }; + using snapshot_writer_ptr = std::shared_ptr; + namespace detail { struct abstract_snapshot_row_reader { virtual void provide(std::istream& in) const = 0; @@ -175,4 +174,6 @@ namespace eosio { namespace chain { virtual void clear_section() = 0; }; + using snapshot_reader_ptr = std::shared_ptr; + }} diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 363f23c1150..cc8fd4d0df0 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -72,9 +72,9 @@ void resource_limits_manager::calculate_integrity_hash( fc::sha256::encoder& enc }); } -void resource_limits_manager::add_to_snapshot( snapshot_writer& snapshot ) const { +void resource_limits_manager::add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { resource_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.write_section([this]( auto& section ){ + snapshot->write_section([this]( auto& section ){ decltype(utils)::walk(_db, [§ion]( const auto &row ) { section.add_row(row); }); @@ -82,9 +82,9 @@ void resource_limits_manager::add_to_snapshot( snapshot_writer& snapshot ) const }); } -void resource_limits_manager::read_from_snapshot( snapshot_reader& snapshot ) { +void resource_limits_manager::read_from_snapshot( const snapshot_reader_ptr& snapshot ) { resource_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot.read_section([this]( auto& section ) { + snapshot->read_section([this]( auto& section ) { bool done = section.empty(); while(!done) { decltype(utils)::create(_db, [§ion]( auto &row ) { From 013acac499a128a409ea8dd644cdc7520f99543d Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Thu, 13 Sep 2018 10:26:28 -0400 Subject: [PATCH 015/161] beginnings of unit test for snapshot --- contracts/CMakeLists.txt | 1 + .../multi_index_test/multi_index_test.cpp | 4 +- contracts/snapshot_test/CMakeLists.txt | 8 ++ contracts/snapshot_test/snapshot_test.abi | 21 +++++ contracts/snapshot_test/snapshot_test.cpp | 80 +++++++++++++++++++ unittests/snapshot_tests.cpp | 22 +++++ 6 files changed, 134 insertions(+), 2 deletions(-) create mode 100644 contracts/snapshot_test/CMakeLists.txt create mode 100644 contracts/snapshot_test/snapshot_test.abi create mode 100644 contracts/snapshot_test/snapshot_test.cpp create mode 100644 unittests/snapshot_tests.cpp diff --git a/contracts/CMakeLists.txt b/contracts/CMakeLists.txt index 99a32aa53fa..fbd912d0e64 100644 --- a/contracts/CMakeLists.txt +++ b/contracts/CMakeLists.txt @@ -12,6 +12,7 @@ add_subdirectory(eosio.token) add_subdirectory(eosio.msig) add_subdirectory(eosio.sudo) add_subdirectory(multi_index_test) +add_subdirectory(snapshot_test) add_subdirectory(eosio.system) add_subdirectory(identity) add_subdirectory(stltest) diff --git a/contracts/multi_index_test/multi_index_test.cpp b/contracts/multi_index_test/multi_index_test.cpp index 4b3c5a96702..531984f28fe 100644 --- a/contracts/multi_index_test/multi_index_test.cpp +++ b/contracts/multi_index_test/multi_index_test.cpp @@ -30,7 +30,7 @@ struct limit_order { EOSLIB_SERIALIZE( test_k256, (id)(val) ) }; - class multi_index_test { + class snapshot_test { public: ACTION(N(multitest), trigger) { @@ -170,7 +170,7 @@ namespace multi_index_test { /// The apply method implements the dispatch of events to this contract void apply( uint64_t /* receiver */, uint64_t code, uint64_t action ) { require_auth(code); - eosio_assert(eosio::dispatch(code, action), + eosio_assert(eosio::dispatch(code, action), "Could not dispatch"); } } diff --git a/contracts/snapshot_test/CMakeLists.txt b/contracts/snapshot_test/CMakeLists.txt new file mode 100644 index 00000000000..81af479e479 --- /dev/null +++ b/contracts/snapshot_test/CMakeLists.txt @@ -0,0 +1,8 @@ +file(GLOB ABI_FILES "*.abi") +configure_file("${ABI_FILES}" "${CMAKE_CURRENT_BINARY_DIR}" COPYONLY) +add_wast_executable(TARGET snapshot_test + INCLUDE_FOLDERS "${STANDARD_INCLUDE_FOLDERS}" + LIBRARIES libc++ libc eosiolib + DESTINATION_FOLDER ${CMAKE_CURRENT_BINARY_DIR} +) + diff --git a/contracts/snapshot_test/snapshot_test.abi b/contracts/snapshot_test/snapshot_test.abi new file mode 100644 index 00000000000..0bddc7293ce --- /dev/null +++ b/contracts/snapshot_test/snapshot_test.abi @@ -0,0 +1,21 @@ +{ + "version": "eosio::abi/1.0", + "types": [], + "structs": [{ + "name": "increment", + "base": "", + "fields": [ + {"name": "value", "type": "uint32" } + ] + } + ], + "actions": [{ + "name": "increment", + "type": "increment", + "ricaridian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "abi_extensions": [] +} diff --git a/contracts/snapshot_test/snapshot_test.cpp b/contracts/snapshot_test/snapshot_test.cpp new file mode 100644 index 00000000000..a63d05643b4 --- /dev/null +++ b/contracts/snapshot_test/snapshot_test.cpp @@ -0,0 +1,80 @@ +#include +#include + +using namespace eosio; + +namespace snapshot_test { + + struct main_record { + uint64_t id; + double index_f64 = 0.0; + long double index_f128 = 0.0L; + uint64_t index_i64 = 0ULL; + uint128_t index_i128 = 0ULL; + key256 index_i256 = key256(); + + auto primary_key() const { return id; } + + auto get_index_f64 () const { return index_f64 ; } + auto get_index_f128 () const { return index_f128; } + auto get_index_i64 () const { return index_i64 ; } + auto get_index_i128 () const { return index_i128; } + const key256& get_index_i256 () const { return index_i256; } + + EOSLIB_SERIALIZE( main_record, (id)(index_f64)(index_f128)(index_i64)(index_i128)(index_i256) ) + }; + + struct increment { + increment(): value(0) {} + increment(uint32_t v): value(v) {} + + uint32_t value; + + EOSLIB_SERIALIZE(increment, (value)) + }; + + using multi_index_type = eosio::multi_index>, + indexed_by< N(byff), const_mem_fun>, + indexed_by< N(byi ), const_mem_fun>, + indexed_by< N(byii), const_mem_fun>, + indexed_by< N(byiiii), const_mem_fun> + >; + + static void exec( uint64_t self, uint32_t value ) { + multi_index_type data(self, self); + auto current = data.begin( ); + if( current == data.end() ) { + data.emplace( self, [&]( auto& r ) { + r.id = value; + r.index_f64 = value; + r.index_f128 = value; + r.index_i64 = value; + r.index_i128 = value; + r.index_i256.data()[0] = value; + }); + + } else { + data.modify( current, self, [&]( auto& r ) { + r.id += value; + r.index_f64 += value; + r.index_f128 += value; + r.index_i64 += value; + r.index_i128 += value; + r.index_i256.data()[0] += value; + }); + } + } + +} /// multi_index_test + +namespace multi_index_test { + extern "C" { + /// The apply method implements the dispatch of events to this contract + void apply( uint64_t self, uint64_t code, uint64_t action ) { + require_auth(code); + eosio_assert(action == N(increment), "unsupported action"); + snapshot_test::exec(self, unpack_action_data().value); + } + } +} diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp new file mode 100644 index 00000000000..e5f6f09a5b9 --- /dev/null +++ b/unittests/snapshot_tests.cpp @@ -0,0 +1,22 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ + +#include +#include + +using namespace eosio; +using namespace testing; +using namespace chain; + +BOOST_AUTO_TEST_SUITE(snapshot_tests) + +BOOST_AUTO_TEST_CASE(test_multi_index_snapshot) +{ + tester main; + + +} + +BOOST_AUTO_TEST_SUITE_END() From f938aa3bc97bda08f5b05ccde3c22277c9e4d4e9 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 17 Sep 2018 20:47:25 -0400 Subject: [PATCH 016/161] Disable TCP port 8900 by default on keosd --- programs/keosd/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index 58a42d96b30..c457af3c0e3 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -43,7 +43,7 @@ int main(int argc, char** argv) http_plugin::set_defaults({ .address_config_prefix = "", .default_unix_socket_path = keosd::config::key_store_executable_name + ".sock", - .default_http_port = 8900 + .default_http_port = 0 }); app().register_plugin(); if(!app().initialize(argc, argv)) From 1f425f55c796865ae12f584d967c16c663430e95 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 17 Sep 2018 20:47:50 -0400 Subject: [PATCH 017/161] cleos use keosd's unix socket by default --- programs/cleos/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index db79b16164d..c240cc9e5fc 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -166,7 +166,7 @@ bfs::path determine_home_directory() } string url = "http://127.0.0.1:8888/"; -string wallet_url = "http://127.0.0.1:8900/"; +string wallet_url = "unix://" + (determine_home_directory() / "eosio-wallet" / (string(key_store_executable_name) + ".sock")).string(); bool no_verify = false; vector headers; From 117fc09232be8c92b1753c1cbd3c97b792c43aae Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 17 Sep 2018 23:43:30 -0400 Subject: [PATCH 018/161] cleos unix socket keosd auto launch Modify cleos to handle auto launch of keosd via unix socket. cleos will now only auto launch if the wallet_url is the default path for the unix socket. Additionally, auto launched keosd will explicitly have its http & https tcp ports disabled. --- programs/cleos/main.cpp | 49 ++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 27 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index c240cc9e5fc..ca5a9adccdf 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -166,7 +166,8 @@ bfs::path determine_home_directory() } string url = "http://127.0.0.1:8888/"; -string wallet_url = "unix://" + (determine_home_directory() / "eosio-wallet" / (string(key_store_executable_name) + ".sock")).string(); +string default_wallet_url = "unix://" + (determine_home_directory() / "eosio-wallet" / (string(key_store_executable_name) + ".sock")).string(); +string wallet_url; //to be set to default_wallet_url in main bool no_verify = false; vector headers; @@ -757,25 +758,22 @@ struct set_action_permission_subcommand { }; -bool local_port_used(const string& lo_address, uint16_t port) { +bool local_port_used() { using namespace boost::asio; io_service ios; - boost::asio::ip::tcp::endpoint endpoint(boost::asio::ip::address::from_string(lo_address), port); - boost::asio::ip::tcp::socket socket(ios); - boost::system::error_code ec = error::would_block; - //connecting/failing to connect to localhost should be always fast - don't care about timeouts - socket.async_connect(endpoint, [&](const boost::system::error_code& error) { ec = error; } ); - do { - ios.run_one(); - } while (ec == error::would_block); + local::stream_protocol::endpoint endpoint(wallet_url.substr(strlen("unix://"))); + local::stream_protocol::socket socket(ios); + boost::system::error_code ec; + socket.connect(endpoint, ec); + return !ec; } -void try_local_port( const string& lo_address, uint16_t port, uint32_t duration ) { +void try_local_port(uint32_t duration) { using namespace std::chrono; auto start_time = duration_cast( system_clock::now().time_since_epoch() ).count(); - while ( !local_port_used(lo_address, port)) { + while ( !local_port_used()) { if (duration_cast( system_clock::now().time_since_epoch()).count() - start_time > duration ) { std::cerr << "Unable to connect to keosd, if keosd is running please kill the process and try again.\n"; throw connection_exception(fc::log_messages{FC_LOG_MESSAGE(error, "Unable to connect to keosd")}); @@ -795,16 +793,11 @@ void ensure_keosd_running(CLI::App* app) { if (subapp->got_subcommand("listproducers") || subapp->got_subcommand("listbw") || subapp->got_subcommand("bidnameinfo")) // system list* do not require wallet return; } + if (wallet_url != default_wallet_url) + return; - auto parsed_url = parse_url(wallet_url); - auto resolved_url = resolve_url(context, parsed_url); - - if (!resolved_url.is_loopback) - return; - - for (const auto& addr: resolved_url.resolved_addresses) - if (local_port_used(addr, resolved_url.resolved_port)) // Hopefully taken by keosd - return; + if (local_port_used()) + return; boost::filesystem::path binPath = boost::dll::program_location(); binPath.remove_filename(); @@ -816,13 +809,15 @@ void ensure_keosd_running(CLI::App* app) { binPath.remove_filename().remove_filename().append("keosd").append(key_store_executable_name); } - const auto& lo_address = resolved_url.resolved_addresses.front(); if (boost::filesystem::exists(binPath)) { namespace bp = boost::process; binPath = boost::filesystem::canonical(binPath); vector pargs; - pargs.push_back("--http-server-address=" + lo_address + ":" + std::to_string(resolved_url.resolved_port)); + pargs.push_back("--http-server-address"); + pargs.push_back(""); + pargs.push_back("--https-server-address"); + pargs.push_back(""); ::boost::process::child keos(binPath, pargs, bp::std_in.close(), @@ -831,13 +826,12 @@ void ensure_keosd_running(CLI::App* app) { if (keos.running()) { std::cerr << binPath << " launched" << std::endl; keos.detach(); - try_local_port(lo_address, resolved_url.resolved_port, 2000); + try_local_port(2000); } else { - std::cerr << "No wallet service listening on " << lo_address << ":" - << std::to_string(resolved_url.resolved_port) << ". Failed to launch " << binPath << std::endl; + std::cerr << "No wallet service listening on " << wallet_url << ". Failed to launch " << binPath << std::endl; } } else { - std::cerr << "No wallet service listening on " << lo_address << ":" << std::to_string(resolved_url.resolved_port) + std::cerr << "No wallet service listening on " << ". Cannot automatically start keosd because keosd was not found." << std::endl; } } @@ -1727,6 +1721,7 @@ int main( int argc, char** argv ) { bindtextdomain(locale_domain, locale_path); textdomain(locale_domain); context = eosio::client::http::create_http_context(); + wallet_url = default_wallet_url; CLI::App app{"Command Line Interface to EOSIO Client"}; app.require_subcommand(); From d85e8c9a91650914f64cd9322b18ede7b66b7354 Mon Sep 17 00:00:00 2001 From: "kein.sung" Date: Tue, 18 Sep 2018 18:46:50 +0900 Subject: [PATCH 019/161] The code to convert `der bytes` to `ecdsa_sig` has been changed. --- plugins/wallet_plugin/se_wallet.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/plugins/wallet_plugin/se_wallet.cpp b/plugins/wallet_plugin/se_wallet.cpp index 6f612a17013..fc1013f776d 100644 --- a/plugins/wallet_plugin/se_wallet.cpp +++ b/plugins/wallet_plugin/se_wallet.cpp @@ -192,7 +192,6 @@ struct se_wallet_impl { return optional{}; fc::ecdsa_sig sig = ECDSA_SIG_new(); - BIGNUM *r = BN_new(), *s = BN_new(); CFErrorRef error = nullptr; CFDataRef digestData = CFDataCreateWithBytesNoCopy(nullptr, (UInt8*)d.data(), d.data_size(), kCFAllocatorNull); @@ -205,10 +204,8 @@ struct se_wallet_impl { } const UInt8* der_bytes = CFDataGetBytePtr(signature); - - BN_bin2bn(der_bytes+4, der_bytes[3], r); - BN_bin2bn(der_bytes+6+der_bytes[3], der_bytes[4+der_bytes[3]+1], s); - ECDSA_SIG_set0(sig, r, s); + long derSize = CFDataGetLength(signature); + d2i_ECDSA_SIG(&sig.obj, &der_bytes, derSize); public_key_data kd; compact_signature compact_sig; From 196741a6f613e417e5ce4adce1b34dcadab2a921 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 19 Sep 2018 14:56:38 -0400 Subject: [PATCH 020/161] Enable Secure Enclave wallet for 2018 MBP models --- plugins/wallet_plugin/se_wallet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/wallet_plugin/se_wallet.cpp b/plugins/wallet_plugin/se_wallet.cpp index fc1013f776d..6e1a4fe0e17 100644 --- a/plugins/wallet_plugin/se_wallet.cpp +++ b/plugins/wallet_plugin/se_wallet.cpp @@ -300,7 +300,7 @@ se_wallet::se_wallet() : my(new detail::se_wallet_impl()) { } unsigned int major, minor; if(sscanf(model, "MacBookPro%u,%u", &major, &minor) == 2) { - if(major >= 13 && minor >= 2) { + if((major >= 15) || (major >= 13 && minor >= 2)) { my->populate_existing_keys(); return; } From e702f950e2350a16881778611c0ad4c98a5d5148 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 19 Sep 2018 15:15:29 -0400 Subject: [PATCH 021/161] Remove some unused lambda captures These fill the output with warnings on newer clang --- plugins/chain_api_plugin/chain_api_plugin.cpp | 2 +- plugins/history_api_plugin/history_api_plugin.cpp | 2 +- plugins/test_control_api_plugin/test_control_api_plugin.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 31e576ae4d8..dabdf71f66a 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -37,7 +37,7 @@ struct async_result_visitor : public fc::visitor { #define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ + [api_handle](string, string body, url_response_callback cb) mutable { \ api_handle.validate(); \ try { \ if (body.empty()) body = "{}"; \ diff --git a/plugins/history_api_plugin/history_api_plugin.cpp b/plugins/history_api_plugin/history_api_plugin.cpp index 71f43701728..bd78dede086 100644 --- a/plugins/history_api_plugin/history_api_plugin.cpp +++ b/plugins/history_api_plugin/history_api_plugin.cpp @@ -21,7 +21,7 @@ void history_api_plugin::plugin_initialize(const variables_map&) {} #define CALL(api_name, api_handle, api_namespace, call_name) \ {std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ + [api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ auto result = api_handle.call_name(fc::json::from_string(body).as()); \ diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp index 91d5535c796..16510b06460 100644 --- a/plugins/test_control_api_plugin/test_control_api_plugin.cpp +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -37,7 +37,7 @@ struct async_result_visitor : public fc::visitor { #define CALL(api_name, api_handle, api_namespace, call_name, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ + [api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ auto result = api_handle.call_name(fc::json::from_string(body).as()); \ From 88d619b1dd1d9c509ff6790f6ac0b334bc188edf Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 19 Sep 2018 15:50:55 -0400 Subject: [PATCH 022/161] Make wabt the default wasm runtime --- libraries/chain/include/eosio/chain/config.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index 9e1dcd0b073..c0e9806319e 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -93,7 +93,7 @@ const static uint32_t setcode_ram_bytes_multiplier = 10; ///< multip const static uint32_t hashing_checktime_block_size = 10*1024; /// call checktime from hashing intrinsic once per this number of bytes -const static eosio::chain::wasm_interface::vm_type default_wasm_runtime = eosio::chain::wasm_interface::vm_type::binaryen; +const static eosio::chain::wasm_interface::vm_type default_wasm_runtime = eosio::chain::wasm_interface::vm_type::wabt; const static uint32_t default_abi_serializer_max_time_ms = 15*1000; ///< default deadline for abi serialization methods /** From 6eb8362a16d4658cbfa09e5d0778dabbe5b19718 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 19 Sep 2018 18:03:26 -0400 Subject: [PATCH 023/161] refactor abi_serializer to simplify threading context around for binary to variant (and vice versa) conversions --- libraries/chain/abi_serializer.cpp | 137 +++++---- .../include/eosio/chain/abi_serializer.hpp | 268 ++++++++++-------- 2 files changed, 226 insertions(+), 179 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 1c654ddc4d8..ca0dcf1e3f1 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -104,7 +104,8 @@ namespace eosio { namespace chain { } void abi_serializer::set_abi(const abi_def& abi, const fc::microseconds& max_serialization_time) { - const fc::time_point deadline = fc::time_point::now() + max_serialization_time; + impl::abi_traverse_context ctx(max_serialization_time); + EOS_ASSERT(starts_with(abi.version, "eosio::abi/1."), unsupported_abi_version_exception, "ABI has an unsupported version"); typedefs.clear(); @@ -118,8 +119,8 @@ namespace eosio { namespace chain { structs[st.name] = st; for( const auto& td : abi.types ) { - EOS_ASSERT(_is_type(td.type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "invalid type", ("type",td.type)); - EOS_ASSERT(!_is_type(td.new_type_name, 0, deadline, max_serialization_time), duplicate_abi_type_def_exception, "type already exists", ("new_type_name",td.new_type_name)); + EOS_ASSERT(_is_type(td.type, ctx), invalid_type_inside_abi, "invalid type", ("type",td.type)); + EOS_ASSERT(!_is_type(td.new_type_name, ctx), duplicate_abi_type_def_exception, "type already exists", ("new_type_name",td.new_type_name)); typedefs[td.new_type_name] = td.type; } @@ -146,7 +147,7 @@ namespace eosio { namespace chain { EOS_ASSERT( error_messages.size() == abi.error_messages.size(), duplicate_abi_err_msg_def_exception, "duplicate error message definition detected" ); EOS_ASSERT( variants.size() == abi.variants.value.size(), duplicate_abi_variant_def_exception, "duplicate variant definition detected" ); - validate(deadline, max_serialization_time); + validate(ctx); } bool abi_serializer::is_builtin_type(const type_name& type)const { @@ -180,6 +181,11 @@ namespace eosio { namespace chain { return ends_with(string(type), "?"); } + bool abi_serializer::is_type(const type_name& type, const fc::microseconds& max_serialization_time)const { + impl::abi_traverse_context ctx(max_serialization_time); + return _is_type(type, ctx); + } + type_name abi_serializer::fundamental_type(const type_name& type)const { if( is_array(type) ) { return type_name(string(type).substr(0, type.size()-2)); @@ -197,12 +203,11 @@ namespace eosio { namespace chain { return type; } - bool abi_serializer::_is_type(const type_name& rtype, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - if( ++recursion_depth > max_recursion_depth) return false; + bool abi_serializer::_is_type(const type_name& rtype, impl::abi_traverse_context& ctx )const { + auto h = ctx.enter_scope(); auto type = fundamental_type(rtype); if( built_in_types.find(type) != built_in_types.end() ) return true; - if( typedefs.find(type) != typedefs.end() ) return _is_type(typedefs.find(type)->second, recursion_depth, deadline, max_serialization_time); + if( typedefs.find(type) != typedefs.end() ) return _is_type(typedefs.find(type)->second, ctx); if( structs.find(type) != structs.end() ) return true; if( variants.find(type) != variants.end() ) return true; return false; @@ -214,26 +219,26 @@ namespace eosio { namespace chain { return itr->second; } - void abi_serializer::validate(const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const { + void abi_serializer::validate( impl::abi_traverse_context& ctx )const { for( const auto& t : typedefs ) { try { vector types_seen{t.first, t.second}; auto itr = typedefs.find(t.second); while( itr != typedefs.end() ) { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + ctx.check_deadline(); EOS_ASSERT( find(types_seen.begin(), types_seen.end(), itr->second) == types_seen.end(), abi_circular_def_exception, "Circular reference in type ${type}", ("type",t.first) ); types_seen.emplace_back(itr->second); itr = typedefs.find(itr->second); } } FC_CAPTURE_AND_RETHROW( (t) ) } for( const auto& t : typedefs ) { try { - EOS_ASSERT(_is_type(t.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",t.second) ); + EOS_ASSERT(_is_type(t.second, ctx), invalid_type_inside_abi, "", ("type",t.second) ); } FC_CAPTURE_AND_RETHROW( (t) ) } for( const auto& s : structs ) { try { if( s.second.base != type_name() ) { struct_def current = s.second; vector types_seen{current.name}; while( current.base != type_name() ) { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + ctx.check_deadline(); const auto& base = get_struct(current.base); //<-- force struct to inherit from another struct EOS_ASSERT( find(types_seen.begin(), types_seen.end(), base.name) == types_seen.end(), abi_circular_def_exception, "Circular reference in struct ${type}", ("type",s.second.name) ); types_seen.emplace_back(base.name); @@ -241,24 +246,24 @@ namespace eosio { namespace chain { } } for( const auto& field : s.second.fields ) { try { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(_remove_bin_extension(field.type), 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",field.type) ); + ctx.check_deadline(); + EOS_ASSERT(_is_type(_remove_bin_extension(field.type), ctx), invalid_type_inside_abi, "", ("type",field.type) ); } FC_CAPTURE_AND_RETHROW( (field) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& s : variants ) { try { for( const auto& type : s.second.types ) { try { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",type) ); + ctx.check_deadline(); + EOS_ASSERT(_is_type(type, ctx), invalid_type_inside_abi, "", ("type",type) ); } FC_CAPTURE_AND_RETHROW( (type) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& a : actions ) { try { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(a.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",a.second) ); + ctx.check_deadline(); + EOS_ASSERT(_is_type(a.second, ctx), invalid_type_inside_abi, "", ("type",a.second) ); } FC_CAPTURE_AND_RETHROW( (a) ) } for( const auto& t : tables ) { try { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - EOS_ASSERT(_is_type(t.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",t.second) ); + ctx.check_deadline(); + EOS_ASSERT(_is_type(t.second, ctx), invalid_type_inside_abi, "", ("type",t.second) ); } FC_CAPTURE_AND_RETHROW( (t) ) } } @@ -275,27 +280,24 @@ namespace eosio { namespace chain { } void abi_serializer::_binary_to_variant( const type_name& type, fc::datastream& stream, - fc::mutable_variant_object& obj, size_t recursion_depth, - const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + fc::mutable_variant_object& obj, impl::binary_to_variant_context& ctx )const { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const auto& st = get_struct(type); if( st.base != type_name() ) { - _binary_to_variant(resolve_type(st.base), stream, obj, recursion_depth, deadline, max_serialization_time); + _binary_to_variant(resolve_type(st.base), stream, obj, ctx); } for( const auto& field : st.fields ) { if( !stream.remaining() && ends_with(field.type, "$") ) continue; - obj( field.name, _binary_to_variant(resolve_type(_remove_bin_extension(field.type)), stream, recursion_depth, deadline, max_serialization_time) ); + obj( field.name, _binary_to_variant(resolve_type(_remove_bin_extension(field.type)), stream, ctx) ); } } fc::variant abi_serializer::_binary_to_variant( const type_name& type, fc::datastream& stream, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + impl::binary_to_variant_context& ctx )const { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); type_name rtype = resolve_type(type); auto ftype = fundamental_type(rtype); auto btype = built_in_types.find(ftype ); @@ -307,7 +309,7 @@ namespace eosio { namespace chain { fc::raw::unpack(stream, size); vector vars; for( decltype(size.value) i = 0; i < size; ++i ) { - auto v = _binary_to_variant(ftype, stream, recursion_depth, deadline, max_serialization_time); + auto v = _binary_to_variant(ftype, stream, ctx); EOS_ASSERT( !v.is_null(), unpack_exception, "Invalid packed array" ); vars.emplace_back(std::move(v)); } @@ -319,37 +321,43 @@ namespace eosio { namespace chain { } else if ( is_optional(rtype) ) { char flag; fc::raw::unpack(stream, flag); - return flag ? _binary_to_variant(ftype, stream, recursion_depth, deadline, max_serialization_time) : fc::variant(); + return flag ? _binary_to_variant(ftype, stream, ctx) : fc::variant(); } else { auto v = variants.find(rtype); if( v != variants.end() ) { fc::unsigned_int select; fc::raw::unpack(stream, select); EOS_ASSERT( (size_t)select < v->second.types.size(), unpack_exception, "Invalid packed variant" ); - return vector{v->second.types[select], _binary_to_variant(v->second.types[select], stream, recursion_depth, deadline, max_serialization_time)}; + return vector{v->second.types[select], _binary_to_variant(v->second.types[select], stream, ctx)}; } } fc::mutable_variant_object mvo; - _binary_to_variant(rtype, stream, mvo, recursion_depth, deadline, max_serialization_time); + _binary_to_variant(rtype, stream, mvo, ctx); EOS_ASSERT( mvo.size() > 0, unpack_exception, "Unable to unpack stream ${type}", ("type", type) ); return fc::variant( std::move(mvo) ); } - fc::variant abi_serializer::_binary_to_variant( const type_name& type, const bytes& binary, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + fc::variant abi_serializer::_binary_to_variant( const type_name& type, const bytes& binary, impl::binary_to_variant_context& ctx )const { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); fc::datastream ds( binary.data(), binary.size() ); - return _binary_to_variant(type, ds, recursion_depth, deadline, max_serialization_time); + return _binary_to_variant(type, ds, ctx); + } + + fc::variant abi_serializer::binary_to_variant(const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time)const { + impl::binary_to_variant_context ctx(max_serialization_time); + return _binary_to_variant(type, binary, ctx); + } + + fc::variant abi_serializer::binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const { + impl::binary_to_variant_context ctx(max_serialization_time); + return _binary_to_variant(type, binary, ctx); } - void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, bool allow_extensions, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, impl::variant_to_binary_context& ctx )const { try { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); auto rtype = resolve_type(type); auto btype = built_in_types.find(fundamental_type(rtype)); @@ -359,7 +367,8 @@ namespace eosio { namespace chain { vector vars = var.get_array(); fc::raw::pack(ds, (fc::unsigned_int)vars.size()); for (const auto& var : vars) { - _variant_to_binary(fundamental_type(rtype), var, ds, false, recursion_depth, deadline, max_serialization_time); + auto h2 = ctx.disallow_extensions_unless(false); + _variant_to_binary(fundamental_type(rtype), var, ds, ctx); } } else if ( variants.find(rtype) != variants.end() ) { EOS_ASSERT( var.is_array() && var.size() == 2 && var[size_t(0)].is_string(), abi_exception, "expected array containing variant" ); @@ -367,7 +376,7 @@ namespace eosio { namespace chain { auto it = find(v.types.begin(), v.types.end(), var[size_t(0)].get_string()); EOS_ASSERT( it != v.types.end(), abi_exception, "type is not valid within this variant" ); fc::raw::pack(ds, fc::unsigned_int(it - v.types.begin())); - _variant_to_binary( *it, var[size_t(1)], ds, allow_extensions, recursion_depth, deadline, max_serialization_time ); + _variant_to_binary( *it, var[size_t(1)], ds, ctx ); } else { const auto& st = get_struct(rtype); @@ -375,15 +384,19 @@ namespace eosio { namespace chain { const auto& vo = var.get_object(); if( st.base != type_name() ) { - _variant_to_binary(resolve_type(st.base), var, ds, false, recursion_depth, deadline, max_serialization_time); + auto h2 = ctx.disallow_extensions_unless(false); + _variant_to_binary(resolve_type(st.base), var, ds, ctx); } bool missing_extension = false; for( const auto& field : st.fields ) { if( vo.contains( string(field.name).c_str() ) ) { if( missing_extension ) EOS_THROW( pack_exception, "Unexpected '${f}' in variant object", ("f",field.name) ); - _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); - } else if( ends_with(field.type, "$") && allow_extensions ) { + { + auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); + _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, ctx); + } + } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { missing_extension = true; } else { EOS_THROW( pack_exception, "Missing '${f}' in variant object", ("f",field.name) ); @@ -394,13 +407,15 @@ namespace eosio { namespace chain { EOS_ASSERT( st.base == type_name(), invalid_type_inside_abi, "support for base class as array not yet implemented" ); uint32_t i = 0; for( const auto& field : st.fields ) { - if( va.size() > i ) - _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, allow_extensions && &field == &st.fields.back(), recursion_depth, deadline, max_serialization_time); - else if( ends_with(field.type, "$") && allow_extensions ) + if( va.size() > i ) { + auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); + _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, ctx); + } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { break; - else + } else { EOS_THROW( pack_exception, "Early end to array specifying the fields of struct '${t}'; require input for field '${f}'", ("t", st.name)("f", field.name) ); + } ++i; } } else { @@ -409,22 +424,30 @@ namespace eosio { namespace chain { } } FC_CAPTURE_AND_RETHROW( (type)(var) ) } - bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, bool allow_extensions, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time )const + bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, impl::variant_to_binary_context& ctx )const { try { - EOS_ASSERT( ++recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - if( !_is_type(type, recursion_depth, deadline, max_serialization_time) ) { + auto h = ctx.enter_scope(); + if( !_is_type(type, ctx) ) { return var.as(); } bytes temp( 1024*1024 ); fc::datastream ds(temp.data(), temp.size() ); - _variant_to_binary(type, var, ds, allow_extensions, recursion_depth, deadline, max_serialization_time); + _variant_to_binary(type, var, ds, ctx); temp.resize(ds.tellp()); return temp; } FC_CAPTURE_AND_RETHROW( (type)(var) ) } + bytes abi_serializer::variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time)const { + impl::variant_to_binary_context ctx(max_serialization_time); + return _variant_to_binary(type, var, ctx); + } + + void abi_serializer::variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time)const { + impl::variant_to_binary_context ctx(max_serialization_time); + _variant_to_binary(type, var, ds, ctx); + } + type_name abi_serializer::get_action_type(name action)const { auto itr = actions.find(action); if( itr != actions.end() ) return itr->second; diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index a8e9ba3e520..f43128b2de9 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace eosio { namespace chain { @@ -19,6 +20,10 @@ using namespace fc; namespace impl { struct abi_from_variant; struct abi_to_variant; + + struct abi_traverse_context; + struct binary_to_variant_context; + struct variant_to_binary_context; } /** @@ -33,9 +38,7 @@ struct abi_serializer { type_name resolve_type(const type_name& t)const; bool is_array(const type_name& type)const; bool is_optional(const type_name& type)const; - bool is_type(const type_name& type, const fc::microseconds& max_serialization_time)const { - return _is_type(type, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } + bool is_type(const type_name& type, const fc::microseconds& max_serialization_time)const; bool is_builtin_type(const type_name& type)const; bool is_integer(const type_name& type) const; int get_integer_size(const type_name& type) const; @@ -49,19 +52,11 @@ struct abi_serializer { optional get_error_message( uint64_t error_code )const; - fc::variant binary_to_variant(const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time)const { - return _binary_to_variant(type, binary, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } - bytes variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time)const { - return _variant_to_binary(type, var, true, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } + fc::variant binary_to_variant(const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time)const; + fc::variant binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const; - fc::variant binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const { - return _binary_to_variant(type, binary, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } - void variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time)const { - _variant_to_binary(type, var, ds, true, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); - } + bytes variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time)const; + void variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time)const; template static void to_variant( const T& o, fc::variant& vo, Resolver resolver, const fc::microseconds& max_serialization_time ); @@ -105,29 +100,93 @@ struct abi_serializer { map> built_in_types; void configure_built_in_types(); - fc::variant _binary_to_variant(const type_name& type, const bytes& binary, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - bytes _variant_to_binary(const type_name& type, const fc::variant& var, bool allow_extensions, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + fc::variant _binary_to_variant( const type_name& type, const bytes& binary, impl::binary_to_variant_context& ctx )const; + fc::variant _binary_to_variant( const type_name& type, fc::datastream& binary, impl::binary_to_variant_context& ctx )const; + void _binary_to_variant( const type_name& type, fc::datastream& stream, + fc::mutable_variant_object& obj, impl::binary_to_variant_context& ctx )const; - fc::variant _binary_to_variant(const type_name& type, fc::datastream& binary, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - void _variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, bool allow_extensions, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; - - void _binary_to_variant(const type_name& type, fc::datastream& stream, fc::mutable_variant_object& obj, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + bytes _variant_to_binary( const type_name& type, const fc::variant& var, impl::variant_to_binary_context& ctx )const; + void _variant_to_binary( const type_name& type, const fc::variant& var, + fc::datastream& ds, impl::variant_to_binary_context& ctx )const; static type_name _remove_bin_extension(const type_name& type); - bool _is_type(const type_name& type, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + bool _is_type( const type_name& type, impl::abi_traverse_context& ctx )const; - void validate(const fc::time_point& deadline, const fc::microseconds& max_serialization_time)const; + void validate( impl::abi_traverse_context& ctx )const; friend struct impl::abi_from_variant; friend struct impl::abi_to_variant; }; namespace impl { + + struct abi_traverse_context { + abi_traverse_context( fc::microseconds max_serialization_time ) + : max_serialization_time( max_serialization_time ), deadline( fc::time_point::now() + max_serialization_time ), recursion_depth(0) + {} + + abi_traverse_context( fc::microseconds max_serialization_time, fc::time_point deadline ) + : max_serialization_time( max_serialization_time ), deadline( deadline ), recursion_depth(0) + {} + + void check_deadline()const { + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + } + + fc::scoped_exit> enter_scope() { + std::function callback = [old_recursion_depth=recursion_depth, this](){ + recursion_depth = old_recursion_depth; + }; + + ++recursion_depth; + EOS_ASSERT( recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, + "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); + + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, + "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + + return {std::move(callback)}; + } + + protected: + fc::microseconds max_serialization_time; + fc::time_point deadline; + size_t recursion_depth; + }; + + struct binary_to_variant_context : public abi_traverse_context { + using abi_traverse_context::abi_traverse_context; + + binary_to_variant_context( const abi_traverse_context& ctx ) + : abi_traverse_context(ctx) + {} + }; + + struct variant_to_binary_context : public abi_traverse_context { + using abi_traverse_context::abi_traverse_context; + + variant_to_binary_context( const abi_traverse_context& ctx ) + : abi_traverse_context(ctx) + {} + + fc::scoped_exit> disallow_extensions_unless( bool condition ) { + std::function callback = [old_recursion_depth=recursion_depth, old_allow_extensions=allow_extensions, this](){ + allow_extensions = old_allow_extensions; + }; + + if( !condition ) { + allow_extensions = false; + } + + return {std::move(callback)}; + } + + bool extensions_allowed()const { return allow_extensions; } + + protected: + bool allow_extensions = true; + }; + /** * Determine if a type contains ABI related info, perhaps deeply nested * @tparam T - the type to check @@ -187,11 +246,9 @@ namespace impl { * and can be degraded to the normal ::to_variant(...) processing */ template = 1> - static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mvo(name,v); } @@ -200,25 +257,22 @@ namespace impl { * for these types we create new ABI aware visitors */ template = 1> - static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ); + static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, abi_traverse_context& ctx ); /** * template which overloads add for vectors of types which contain ABI information in their trees * for these members we call ::add in order to trigger further processing */ template = 1> - static void add( mutable_variant_object &mvo, const char* name, const vector& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &mvo, const char* name, const vector& v, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); vector array; array.reserve(v.size()); for (const auto& iter: v) { mutable_variant_object elem_mvo; - add(elem_mvo, "_", iter, resolver, recursion_depth, deadline, max_serialization_time); + add(elem_mvo, "_", iter, resolver, ctx); array.emplace_back(std::move(elem_mvo["_"])); } mvo(name, std::move(array)); @@ -229,14 +283,12 @@ namespace impl { * for these members we call ::add in order to trigger further processing */ template = 1> - static void add( mutable_variant_object &mvo, const char* name, const std::shared_ptr& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &mvo, const char* name, const std::shared_ptr& v, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); if( !v ) return; mutable_variant_object obj_mvo; - add(obj_mvo, "_", *v, resolver, recursion_depth, deadline, max_serialization_time); + add(obj_mvo, "_", *v, resolver, ctx); mvo(name, std::move(obj_mvo["_"])); } @@ -245,27 +297,24 @@ namespace impl { { mutable_variant_object& obj_mvo; Resolver& resolver; - size_t recursion_depth; - fc::time_point deadline; - fc::microseconds max_serialization_time; - add_static_variant( mutable_variant_object& o, Resolver& r, size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) - :obj_mvo(o), resolver(r), recursion_depth(recursion_depth), deadline(deadline), max_serialization_time(max_serialization_time){} + abi_traverse_context& ctx; + + add_static_variant( mutable_variant_object& o, Resolver& r, abi_traverse_context& ctx ) + :obj_mvo(o), resolver(r), ctx(ctx) {} typedef void result_type; template void operator()( T& v )const { - add(obj_mvo, "_", v, resolver, recursion_depth, deadline, max_serialization_time); + add(obj_mvo, "_", v, resolver, ctx); } }; template - static void add( mutable_variant_object &mvo, const char* name, const fc::static_variant& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &mvo, const char* name, const fc::static_variant& v, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mutable_variant_object obj_mvo; - add_static_variant adder(obj_mvo, resolver, recursion_depth, deadline, max_serialization_time); + add_static_variant adder(obj_mvo, resolver, ctx); v.visit(adder); mvo(name, std::move(obj_mvo["_"])); } @@ -278,11 +327,9 @@ namespace impl { * @return */ template - static void add( mutable_variant_object &out, const char* name, const action& act, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &out, const char* name, const action& act, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mutable_variant_object mvo; mvo("account", act.account); mvo("name", act.name); @@ -294,7 +341,8 @@ namespace impl { auto type = abi->get_action_type(act.name); if (!type.empty()) { try { - mvo( "data", abi->_binary_to_variant( type, act.data, recursion_depth, deadline, max_serialization_time )); + binary_to_variant_context _ctx(ctx); + mvo( "data", abi->_binary_to_variant( type, act.data, _ctx )); mvo("hex_data", act.data); } catch(...) { // any failure to serialize data, then leave as not serailzed @@ -320,11 +368,9 @@ namespace impl { * @return */ template - static void add( mutable_variant_object &out, const char* name, const packed_transaction& ptrx, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void add( mutable_variant_object &out, const char* name, const packed_transaction& ptrx, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mutable_variant_object mvo; auto trx = ptrx.get_transaction(); mvo("id", trx.id()); @@ -333,7 +379,7 @@ namespace impl { mvo("packed_context_free_data", ptrx.packed_context_free_data); mvo("context_free_data", ptrx.get_context_free_data()); mvo("packed_trx", ptrx.packed_trx); - add(mvo, "transaction", trx, resolver, recursion_depth, deadline, max_serialization_time); + add(mvo, "transaction", trx, resolver, ctx); out(name, std::move(mvo)); } @@ -350,14 +396,11 @@ namespace impl { class abi_to_variant_visitor { public: - abi_to_variant_visitor( mutable_variant_object& _mvo, const T& _val, Resolver _resolver, - size_t _recursion_depth, const fc::time_point& _deadline, const fc::microseconds& max_serialization_time ) + abi_to_variant_visitor( mutable_variant_object& _mvo, const T& _val, Resolver _resolver, abi_traverse_context& _ctx ) :_vo(_mvo) ,_val(_val) ,_resolver(_resolver) - ,_recursion_depth(_recursion_depth) - ,_deadline(_deadline) - ,_max_serialization_time(max_serialization_time) + ,_ctx(_ctx) {} /** @@ -370,16 +413,14 @@ namespace impl { template void operator()( const char* name )const { - abi_to_variant::add( _vo, name, (_val.*member), _resolver, _recursion_depth, _deadline, _max_serialization_time ); + abi_to_variant::add( _vo, name, (_val.*member), _resolver, _ctx ); } private: mutable_variant_object& _vo; const T& _val; Resolver _resolver; - size_t _recursion_depth; - fc::time_point _deadline; - fc::microseconds _max_serialization_time; + abi_traverse_context& _ctx; }; struct abi_from_variant { @@ -388,11 +429,9 @@ namespace impl { * and can be degraded to the normal ::from_variant(...) processing */ template = 1> - static void extract( const variant& v, M& o, Resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, M& o, Resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); from_variant(v, o); } @@ -401,25 +440,22 @@ namespace impl { * for these types we create new ABI aware visitors */ template = 1> - static void extract( const variant& v, M& o, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ); + static void extract( const variant& v, M& o, Resolver resolver, abi_traverse_context& ctx ); /** * template which overloads extract for vectors of types which contain ABI information in their trees * for these members we call ::extract in order to trigger further processing */ template = 1> - static void extract( const variant& v, vector& o, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, vector& o, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variants& array = v.get_array(); o.clear(); o.reserve( array.size() ); for( auto itr = array.begin(); itr != array.end(); ++itr ) { M o_iter; - extract(*itr, o_iter, resolver, recursion_depth, deadline, max_serialization_time); + extract(*itr, o_iter, resolver, ctx); o.emplace_back(std::move(o_iter)); } } @@ -429,14 +465,12 @@ namespace impl { * for these members we call ::extract in order to trigger further processing */ template = 1> - static void extract( const variant& v, std::shared_ptr& o, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, std::shared_ptr& o, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); M obj; - extract(vo, obj, resolver, recursion_depth, deadline, max_serialization_time); + extract(vo, obj, resolver, ctx); o = std::make_shared(obj); } @@ -446,11 +480,9 @@ namespace impl { * exploded and processed explicitly */ template - static void extract( const variant& v, action& act, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, action& act, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); EOS_ASSERT(vo.contains("account"), packed_transaction_type_exception, "Missing account"); EOS_ASSERT(vo.contains("name"), packed_transaction_type_exception, "Missing name"); @@ -472,7 +504,8 @@ namespace impl { if (abi.valid()) { auto type = abi->get_action_type(act.name); if (!type.empty()) { - act.data = std::move( abi->_variant_to_binary( type, data, true, recursion_depth, deadline, max_serialization_time )); + variant_to_binary_context _ctx(ctx); + act.data = std::move( abi->_variant_to_binary( type, data, _ctx )); valid_empty_data = act.data.empty(); } } @@ -493,11 +526,9 @@ namespace impl { } template - static void extract( const variant& v, packed_transaction& ptrx, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + static void extract( const variant& v, packed_transaction& ptrx, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); EOS_ASSERT(vo.contains("signatures"), packed_transaction_type_exception, "Missing signatures"); EOS_ASSERT(vo.contains("compression"), packed_transaction_type_exception, "Missing compression"); @@ -519,7 +550,7 @@ namespace impl { EOS_ASSERT(vo.contains("transaction"), packed_transaction_type_exception, "Missing transaction"); transaction trx; vector context_free_data; - extract(vo["transaction"], trx, resolver, recursion_depth, deadline, max_serialization_time); + extract(vo["transaction"], trx, resolver, ctx); if( vo.contains("packed_context_free_data") && vo["packed_context_free_data"].is_string() && !vo["packed_context_free_data"].as_string().empty() ) { from_variant(vo["packed_context_free_data"], ptrx.packed_context_free_data ); context_free_data = ptrx.get_context_free_data(); @@ -542,14 +573,11 @@ namespace impl { class abi_from_variant_visitor : reflector_verifier_visitor { public: - abi_from_variant_visitor( const variant_object& _vo, T& v, Resolver _resolver, - size_t _recursion_depth, const fc::time_point& _deadline, const fc::microseconds& max_serialization_time ) + abi_from_variant_visitor( const variant_object& _vo, T& v, Resolver _resolver, abi_traverse_context& _ctx ) : reflector_verifier_visitor(v) ,_vo(_vo) ,_resolver(_resolver) - ,_recursion_depth(_recursion_depth) - ,_deadline(_deadline) - ,_max_serialization_time(max_serialization_time) + ,_ctx(_ctx) {} /** @@ -564,49 +592,45 @@ namespace impl { { auto itr = _vo.find(name); if( itr != _vo.end() ) - abi_from_variant::extract( itr->value(), this->obj.*member, _resolver, _recursion_depth, _deadline, _max_serialization_time ); + abi_from_variant::extract( itr->value(), this->obj.*member, _resolver, _ctx ); } private: const variant_object& _vo; Resolver _resolver; - size_t _recursion_depth; - fc::time_point _deadline; - fc::microseconds _max_serialization_time; + abi_traverse_context& _ctx; }; template> - void abi_to_variant::add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + void abi_to_variant::add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); mutable_variant_object member_mvo; - fc::reflector::visit( impl::abi_to_variant_visitor( member_mvo, v, resolver, recursion_depth, deadline, max_serialization_time ) ); + fc::reflector::visit( impl::abi_to_variant_visitor( member_mvo, v, resolver, ctx) ); mvo(name, std::move(member_mvo)); } template> - void abi_from_variant::extract( const variant& v, M& o, Resolver resolver, - size_t recursion_depth, const fc::time_point& deadline, const fc::microseconds& max_serialization_time ) + void abi_from_variant::extract( const variant& v, M& o, Resolver resolver, abi_traverse_context& ctx ) { - EOS_ASSERT( ++recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); - fc::reflector::visit( abi_from_variant_visitor( vo, o, resolver, recursion_depth, deadline, max_serialization_time ) ); + fc::reflector::visit( abi_from_variant_visitor( vo, o, resolver, ctx ) ); } -} +} /// namespace eosio::chain::impl template void abi_serializer::to_variant( const T& o, variant& vo, Resolver resolver, const fc::microseconds& max_serialization_time ) try { mutable_variant_object mvo; - impl::abi_to_variant::add(mvo, "_", o, resolver, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); + impl::abi_traverse_context ctx(max_serialization_time); + impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); } FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("object",o)) template void abi_serializer::from_variant( const variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ) try { - impl::abi_from_variant::extract(v, o, resolver, 0, fc::time_point::now() + max_serialization_time, max_serialization_time); + impl::abi_traverse_context ctx(max_serialization_time); + impl::abi_from_variant::extract(v, o, resolver, ctx); } FC_RETHROW_EXCEPTIONS(error, "Failed to deserialize variant", ("variant",v)) From b7fe029f0194f2032c79defdb99a3cb02877a582 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 19 Sep 2018 20:22:24 -0400 Subject: [PATCH 024/161] major improvements to error messages thrown from `abi_serializer::variant_to_binary` --- libraries/chain/abi_serializer.cpp | 46 ++++++-- .../include/eosio/chain/abi_serializer.hpp | 50 +++++++- unittests/abi_tests.cpp | 111 ++++++++++++++++-- 3 files changed, 181 insertions(+), 26 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index ca0dcf1e3f1..6c6752dad6f 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -366,20 +366,36 @@ namespace eosio { namespace chain { } else if ( is_array(rtype) ) { vector vars = var.get_array(); fc::raw::pack(ds, (fc::unsigned_int)vars.size()); + auto h1 = ctx.push_to_path( fundamental_type(rtype), ctx.is_path_empty() ); + int64_t i = 0; for (const auto& var : vars) { + ctx.set_array_index_of_path_back(i); auto h2 = ctx.disallow_extensions_unless(false); _variant_to_binary(fundamental_type(rtype), var, ds, ctx); + ++i; } } else if ( variants.find(rtype) != variants.end() ) { - EOS_ASSERT( var.is_array() && var.size() == 2 && var[size_t(0)].is_string(), abi_exception, "expected array containing variant" ); auto& v = variants.find(rtype)->second; - auto it = find(v.types.begin(), v.types.end(), var[size_t(0)].get_string()); - EOS_ASSERT( it != v.types.end(), abi_exception, "type is not valid within this variant" ); + auto h1 = ctx.push_to_path( v.name, ctx.is_path_empty() ); + EOS_ASSERT( var.is_array() && var.size() == 2, pack_exception, + "Expected input to be an array of two items while processing variant '${p}'", ("p", ctx.get_path_string()) ); + EOS_ASSERT( var[size_t(0)].is_string(), pack_exception, + "Encountered non-string as first item of input array while processing variant '${p}'", ("p", ctx.get_path_string()) ); + auto variant_type_str = var[size_t(0)].get_string(); + auto it = find(v.types.begin(), v.types.end(), variant_type_str); + EOS_ASSERT( it != v.types.end(), pack_exception, + "Specified type '${t}' in input array is not valid within the variant '${p}'", + ("t", variant_type_str)("p", ctx.get_path_string()) ); fc::raw::pack(ds, fc::unsigned_int(it - v.types.begin())); + std::stringstream s; + s << ""; + auto h3 = ctx.push_to_path(s.str()); _variant_to_binary( *it, var[size_t(1)], ds, ctx ); } else { const auto& st = get_struct(rtype); + auto h1 = ctx.push_to_path( st.name, ctx.is_path_empty() ); + if( var.is_object() ) { const auto& vo = var.get_object(); @@ -387,39 +403,45 @@ namespace eosio { namespace chain { auto h2 = ctx.disallow_extensions_unless(false); _variant_to_binary(resolve_type(st.base), var, ds, ctx); } - bool missing_extension = false; + bool extension_encountered = false; for( const auto& field : st.fields ) { if( vo.contains( string(field.name).c_str() ) ) { - if( missing_extension ) - EOS_THROW( pack_exception, "Unexpected '${f}' in variant object", ("f",field.name) ); + if( extension_encountered ) + EOS_THROW( pack_exception, "Unexpected field '${f}' found in input object while processing struct '${p}'", ("f",field.name)("p",ctx.get_path_string()) ); { auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); + auto h3 = ctx.push_to_path( field.name ); _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, ctx); } } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { - missing_extension = true; + extension_encountered = true; + } else if( extension_encountered ) { + EOS_THROW( pack_exception, "Encountered field '${f}' without binary extension designation while processing struct '${p}'", ("f",field.name)("p",ctx.get_path_string()) ); } else { - EOS_THROW( pack_exception, "Missing '${f}' in variant object", ("f",field.name) ); + EOS_THROW( pack_exception, "Missing field '${f}' in input object while processing struct '${p}'", ("f",field.name)("p",ctx.get_path_string()) ); } } } else if( var.is_array() ) { const auto& va = var.get_array(); - EOS_ASSERT( st.base == type_name(), invalid_type_inside_abi, "support for base class as array not yet implemented" ); + EOS_ASSERT( st.base == type_name(), invalid_type_inside_abi, + "Using input array to specify the fields of the derived struct '${p}'; input arrays are currently only allowed for structs without a base", + ("p",ctx.get_path_string()) ); uint32_t i = 0; for( const auto& field : st.fields ) { if( va.size() > i ) { auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); + auto h3 = ctx.push_to_path( field.name ); _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, ctx); } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { break; } else { - EOS_THROW( pack_exception, "Early end to array specifying the fields of struct '${t}'; require input for field '${f}'", - ("t", st.name)("f", field.name) ); + EOS_THROW( pack_exception, "Early end to input array specifying the fields of struct '${p}'; require input for field '${f}'", + ("p", ctx.get_path_string())("f", field.name) ); } ++i; } } else { - EOS_THROW( pack_exception, "Failed to serialize struct '${t}' in variant object", ("t", st.name)); + EOS_THROW( pack_exception, "Unexpected input encountered while processing struct '${p}'", ("p",ctx.get_path_string()) ); } } } FC_CAPTURE_AND_RETHROW( (type)(var) ) } diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index f43128b2de9..9682f0a1506 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -181,10 +181,58 @@ namespace impl { return {std::move(callback)}; } + fc::scoped_exit> push_to_path( const string& n, bool condition = true ) { + + if( !condition ) { + fc::scoped_exit> h([](){}); + h.cancel(); + return h; + } + + std::function callback = [this](){ + EOS_ASSERT( path.size() > 0 && path_array_index.size() > 0, abi_exception, + "invariant failure in variant_to_binary_context: path is empty on scope exit" ); + path.pop_back(); + path_array_index.pop_back(); + }; + + path.push_back( n ); + path_array_index.push_back( -1 ); + + return {std::move(callback)}; + } + + void set_array_index_of_path_back( int64_t i ) { + EOS_ASSERT( path_array_index.size() > 0, abi_exception, "path is empty" ); + path_array_index.back() = i; + } + bool extensions_allowed()const { return allow_extensions; } + bool is_path_empty()const { return path.size() == 0; } + + string get_path_string()const { + EOS_ASSERT( path.size() == path_array_index.size(), abi_exception, + "invariant failure in variant_to_binary_context: mismatch in path vector sizes" ); + + std::stringstream s; + for( size_t i = 0, n = path.size(); i < n; ++i ) { + s << path[i]; + if( path_array_index[i] >= 0 ) { + s << "[" << path_array_index[i] << "]"; + } + if( (i + 1) != n ) { // if not the last element in the path + s << "."; + } + } + + return s.str(); + } + protected: - bool allow_extensions = true; + bool allow_extensions = true; + vector path; + vector path_array_index; }; /** diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index 44677b261c7..f2f415825c9 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -3528,6 +3528,8 @@ BOOST_AUTO_TEST_CASE(abi_deep_structs_validate) BOOST_AUTO_TEST_CASE(variants) { + using eosio::testing::fc_exception_message_starts_with; + auto duplicate_variant_abi = R"({ "version": "eosio::abi/1.1", "variants": [ @@ -3575,13 +3577,18 @@ BOOST_AUTO_TEST_CASE(variants) BOOST_CHECK_THROW( abi_serializer( fc::json::from_string(variant_abi_invalid_type).as(), max_serialization_time ), invalid_type_inside_abi ); // expected array containing variant - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(9)"), max_serialization_time), abi_exception ); - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"([4])"), max_serialization_time), abi_exception ); - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"([4, 5])"), max_serialization_time), abi_exception ); - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(["4", 5, 6])"), max_serialization_time), abi_exception ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(9)"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"([4])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"([4, 5])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Encountered non-string as first item of input array while processing variant 'v1") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["4", 5, 6])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1'") ); // type is not valid within this variant - BOOST_CHECK_THROW( abis.variant_to_binary("v1", fc::json::from_string(R"(["int9", 21])"), max_serialization_time), abi_exception ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["int9", 21])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Specified type 'int9' in input array is not valid within the variant 'v1'") ); verify_round_trip_conversion(abis, "v1", R"(["int8",21])", "0015"); verify_round_trip_conversion(abis, "v1", R"(["string","abcd"])", "010461626364"); @@ -3594,6 +3601,8 @@ BOOST_AUTO_TEST_CASE(variants) BOOST_AUTO_TEST_CASE(extend) { + using eosio::testing::fc_exception_message_starts_with; + auto abi = R"({ "version": "eosio::abi/1.1", "structs": [ @@ -3603,18 +3612,27 @@ BOOST_AUTO_TEST_CASE(extend) {"name": "i2", "type": "int8$"}, {"name": "a", "type": "int8[]$"}, {"name": "o", "type": "int8?$"}, + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8$"}, + {"name": "i2", "type": "int8"}, ]} ], })"; + // NOTE: Ideally this ABI would be rejected during validation for an improper definition for struct "s2". + // Such a check is not yet implemented during validation, but it can check during serialization. try { abi_serializer abis(fc::json::from_string(abi).as(), max_serialization_time ); // missing i1 - BOOST_CHECK_THROW( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5})"), max_serialization_time), abi_exception ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Missing field 'i1' in input object while processing struct") ); // Unexpected 'a' - BOOST_CHECK_THROW( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5,"i1":6,"a":[8,9,10]})"), max_serialization_time), pack_exception ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5,"i1":6,"a":[8,9,10]})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Unexpected field 'a' found in input object while processing struct") ); verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6})", "0506"); verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7})", "050607"); @@ -3627,6 +3645,10 @@ BOOST_AUTO_TEST_CASE(extend) verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10]])", "0506070308090a", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10]})"); verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],null])", "0506070308090a00", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":null})"); verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],31])", "0506070308090a011f", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":31})"); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"i0":1})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Encountered field 'i2' without binary extension designation while processing struct") ); + } FC_LOG_AND_RETHROW() } @@ -3660,10 +3682,10 @@ BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_array) abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([])"), max_serialization_time), - pack_exception, fc_exception_message_starts_with("Early end to array specifying the fields of struct") ); + pack_exception, fc_exception_message_starts_with("Early end to input array specifying the fields of struct") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([1,2])"), max_serialization_time), - pack_exception, fc_exception_message_starts_with("Early end to array specifying the fields of struct") ); + pack_exception, fc_exception_message_starts_with("Early end to input array specifying the fields of struct") ); verify_round_trip_conversion(abis, "s", R"([1,2,3])", "010203", R"({"i0":1,"i1":2,"i2":3})"); @@ -3672,7 +3694,7 @@ BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_array) BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_object) { - using eosio::testing::fc_exception_message_is; + using eosio::testing::fc_exception_message_starts_with; auto abi = R"({ "version": "eosio::abi/1.0", @@ -3692,10 +3714,10 @@ BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_object) abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({})"), max_serialization_time), - pack_exception, fc_exception_message_is("Missing 'f0' in variant object") ); + pack_exception, fc_exception_message_starts_with("Missing field 'f0' in input object") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":{"i0":1}})"), max_serialization_time), - pack_exception, fc_exception_message_is("Missing 'i1' in variant object") ); + pack_exception, fc_exception_message_starts_with("Missing field 'i1' in input object") ); verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1,"i1":2},"i2":3})", "010203"); @@ -3723,12 +3745,75 @@ BOOST_AUTO_TEST_CASE(abi_serialize_json_mismatching_type) abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":1,"i1":2})"), max_serialization_time), - pack_exception, fc_exception_message_is("Failed to serialize struct 's1' in variant object") ); + pack_exception, fc_exception_message_is("Unexpected input encountered while processing struct 's2.f0'") ); verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1},"i1":2})", "0102"); } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(abi_serialize_detailed_error_messages) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.1", + "types": [ + {"new_type_name": "foo", "type": "s2"}, + {"new_type_name": "bar", "type": "foo"}, + ], + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"} + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "s1"}, + {"name": "i2", "type": "int8"} + ]}, + {"name": "s3", "base": "s1", "fields": [ + {"name": "i2", "type": "int8"}, + {"name": "f3", "type": "v2"}, + {"name": "f4", "type": "foo$"}, + {"name": "f5", "type": "s1$"} + ]}, + {"name": "s4", "base": "", "fields": [ + {"name": "f0", "type": "int8[]"}, + {"name": "f1", "type": "s1[]"} + ]}, + ], + "variants": [ + {"name": "v1", "types": ["s3", "int8", "s4"]}, + {"name": "v2", "types": ["foo", "bar"]}, + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("bar", fc::json::from_string(R"({"f0":{"i0":1},"i2":3})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's2.f0'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i2":3})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's3'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["s2",{}]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Specified type 's2' in input array is not valid within the variant 's3.f3'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11},"i2":13}]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's3.f3..f0'") ); + + verify_round_trip_conversion(abis, "s3", R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}]})", "010203010b0c0d"); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s3",{"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}],"f5":0}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Unexpected field 'f5' found in input object while processing struct 'v1.'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s4",{"f0":[0,1],"f1":[{"i0":2,"i1":3},{"i1":5}]}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'v1..f1[1]'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's2[1].f0'") ); + } FC_LOG_AND_RETHROW() +} BOOST_AUTO_TEST_SUITE_END() From da473e0d647cc211a1710d8f54bb5f514be3c1f0 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 18 Sep 2018 06:52:26 -0500 Subject: [PATCH 025/161] Fixed reporting block walker output. GH #5674 --- tests/Node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 2795ba63c5e..dfacfd56cc5 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -949,7 +949,7 @@ def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, ex return None if exitOnError and trans is None: - Utils.cmdError("could not \"%s\" - %s" % (cmdDesc,exitMsg)) + Utils.cmdError("could not \"%s\". %s" % (cmdDesc,exitMsg)) errorExit("Failed to \"%s\"" % (cmdDesc)) return trans From 94b8167acd2532fa8cf93637cc1d71b335e7000d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 18 Sep 2018 11:17:26 -0500 Subject: [PATCH 026/161] Refactored arePortsAvailable to Utils and added support for passing in single port. GH #5674 --- tests/Cluster.py | 31 +------------------------------ tests/testUtils.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 8a9a1cb4b8f..def90d69598 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -12,7 +12,6 @@ import sys import random import json -import socket import errno from core_symbol import CORE_SYMBOL @@ -131,7 +130,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne producerFlag="--producers %s" % (totalProducers) tries = 30 - while not Cluster.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))): + while not Utils.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))): Utils.Print("ERROR: Another process is listening on nodeos default port. wait...") if tries == 0: return False @@ -366,34 +365,6 @@ def initAccountKeys(account, keys): return True - @staticmethod - def arePortsAvailable(ports): - """Check if specified ports are available for listening on.""" - assert(ports) - assert(isinstance(ports, set)) - - for port in ports: - if Utils.Debug: Utils.Print("Checking if port %d is available." % (port)) - assert(isinstance(port, int)) - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - - try: - s.bind(("127.0.0.1", port)) - except socket.error as e: - if e.errno == errno.EADDRINUSE: - Utils.Print("ERROR: Port %d is already in use" % (port)) - else: - # something else raised the socket.error exception - Utils.Print("ERROR: Unknown exception while trying to listen on port %d" % (port)) - Utils.Print(e) - return False - finally: - s.close() - - return True - - # Initialize the default nodes (at present just the root node) def initializeNodes(self, defproduceraPrvtKey=None, defproducerbPrvtKey=None, onlyBios=False): port=Cluster.__BiosPort if onlyBios else self.port diff --git a/tests/testUtils.py b/tests/testUtils.py index d2a69231513..b048e96ce44 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -6,6 +6,7 @@ import inspect import json import shlex +import socket from sys import stdout from sys import exit import traceback @@ -171,6 +172,35 @@ def runCmdReturnJson(cmd, trace=False, silentErrors=False): cmdArr=shlex.split(cmd) return Utils.runCmdArrReturnJson(cmdArr, trace=trace, silentErrors=silentErrors) + @staticmethod + def arePortsAvailable(ports): + """Check if specified port (as int) or ports (as set) is/are available for listening on.""" + assert(ports) + if isinstance(ports, int): + ports={ports} + assert(isinstance(ports, set)) + + for port in ports: + if Utils.Debug: Utils.Print("Checking if port %d is available." % (port)) + assert(isinstance(port, int)) + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + try: + s.bind(("127.0.0.1", port)) + except socket.error as e: + if e.errno == errno.EADDRINUSE: + Utils.Print("ERROR: Port %d is already in use" % (port)) + else: + # something else raised the socket.error exception + Utils.Print("ERROR: Unknown exception while trying to listen on port %d" % (port)) + Utils.Print(e) + return False + finally: + s.close() + + return True + ########################################################################################### class Account(object): From 9aee5911bbe758bef8b380c6c9ee8c46a8569938 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 18 Sep 2018 11:18:49 -0500 Subject: [PATCH 027/161] Added pgrepCmd function to Utils to construct the appropriate pgrep command to find running processes. GH #5674 --- tests/Cluster.py | 8 +------- tests/testUtils.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index def90d69598..e21c15910e7 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -4,7 +4,6 @@ import glob import shutil import os -import platform import re import string import signal @@ -1175,12 +1174,7 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKil @staticmethod def pgrepEosServers(timeout=None): - pgrepOpts="-fl" - # pylint: disable=deprecated-method - if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: - pgrepOpts="-a" - - cmd="pgrep %s %s" % (pgrepOpts, Utils.EosServerName) + cmd=Utils.pgrepCmd(Utils.EosServerName) def myFunc(): psOut=None diff --git a/tests/testUtils.py b/tests/testUtils.py index b048e96ce44..c9a7faa3702 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,6 +1,7 @@ import subprocess import time import os +import platform from collections import deque from collections import namedtuple import inspect @@ -201,6 +202,15 @@ def arePortsAvailable(ports): return True + @staticmethod + def pgrepCmd(serverName): + pgrepOpts="-fl" + # pylint: disable=deprecated-method + if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: + pgrepOpts="-a" + + return "pgrep %s %s" % (pgrepOpts, serverName) + ########################################################################################### class Account(object): From 0d160ad2b24d538d55ccaa3112c1873901e80707 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 18 Sep 2018 11:19:49 -0500 Subject: [PATCH 028/161] Changed all tests to use common string definition for wallet name. GH #5674 --- tests/launcher_test.py | 2 +- tests/nodeos_forked_chain_test.py | 2 +- tests/nodeos_run_test.py | 2 +- tests/nodeos_under_min_avail_ram.py | 2 +- tests/nodeos_voting_test.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/launcher_test.py b/tests/launcher_test.py index 4db21658aa8..65d963c9d2e 100755 --- a/tests/launcher_test.py +++ b/tests/launcher_test.py @@ -38,7 +38,7 @@ killEosInstances=not dontKill killWallet=not dontKill -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding Utils.setIrreversibleTimeout(timeout) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 4c4105721d5..66202561c28 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -122,7 +122,7 @@ def getMinHeadAndLib(prodNodes): killEosInstances=not dontKill killWallet=not dontKill -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" try: diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index f28f62a730a..aab17129c54 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -48,7 +48,7 @@ killWallet=not dontKill dontBootstrap=sanityTest -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding Utils.setIrreversibleTimeout(timeout) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index c615b4fbb38..99001a73158 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -68,7 +68,7 @@ def setName(self, num): killEosInstances=not dontKill killWallet=not dontKill -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" try: diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index b6f176af8c9..20df85da346 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -156,7 +156,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): killEosInstances=not dontKill killWallet=not dontKill -WalletdName="keosd" +WalletdName=Utils.EosWalletName ClientName="cleos" try: From ed8bbb525f090b61f5e9f352d760e4197b81d90e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 18 Sep 2018 11:21:21 -0500 Subject: [PATCH 029/161] Added flag to Utils.checkOutput to not throw an exception when a return status is reported. GH #5674 --- tests/testUtils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/testUtils.py b/tests/testUtils.py index c9a7faa3702..88a3d9e1279 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -77,12 +77,12 @@ def getChainStrategies(): return chainSyncStrategies @staticmethod - def checkOutput(cmd): + def checkOutput(cmd, ignoreError=False): assert(isinstance(cmd, list)) popen=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output,error)=popen.communicate() Utils.CheckOutputDeque.append((output,error,cmd)) - if popen.returncode != 0: + if popen.returncode != 0 and not ignoreError: raise subprocess.CalledProcessError(returncode=popen.returncode, cmd=cmd, output=error) return output.decode("utf-8") From f72ee37c45707eeec3c6adaad6269b969b6fc1b9 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 18 Sep 2018 11:35:21 -0500 Subject: [PATCH 030/161] Added logging messages to provide status around keosd launching and creating wallets. GH #5674 --- tests/WalletMgr.py | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index c46dd78d6fd..ea5ee394a1e 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -36,6 +36,25 @@ def launch(self): Utils.Print("ERROR: Wallet Manager wasn't configured to launch keosd") return False + if Utils.Debug: + portStatus="N/A" + portTaken=False + if self.host=="localhost" or self.host=="127.0.0.1": + if Utils.arePortsAvailable(self.port): + portStatus="AVAILABLE" + portTaken=True + else: + portStatus="NOT AVAILABLE" + pgrepCmd=Utils.pgrepCmd(Utils.EosWalletName) + psOut=Utils.checkOutput(pgrepCmd.split(), ignoreError=True) + if psOut or portTaken: + statusMsg="" + if psOut: + statusMsg+=" %s - {%s}." % (pgrepCmd, psOut) + if portTaken: + statusMsg+=" port %d is NOT available." % (self.port) + Utils.Print("Launching %s, note similar processes running. %s" % (Utils.EosWalletName, statusMsg)) + cmd="%s --data-dir %s --config-dir %s --http-server-address=%s:%d --verbose-http-errors" % ( Utils.EosWalletPath, WalletMgr.__walletDataDir, WalletMgr.__walletDataDir, self.host, self.port) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) @@ -45,6 +64,11 @@ def launch(self): # Give keosd time to warm up time.sleep(2) + + if Utils.Debug: + psOut=Utils.checkOutput(pgrepCmd.split()) + Utils.Print("Launched %s. %s - {%s}" % (Utils.EosWalletName, pgrepCmd, psOut)) + return True def create(self, name, accounts=None, exitOnError=True): @@ -67,7 +91,15 @@ def create(self, name, accounts=None, exitOnError=True): retryCount+=1 if retryCount Date: Wed, 19 Sep 2018 11:52:10 -0500 Subject: [PATCH 031/161] Added printing out transaction information after Node commands that create transactions to provide traceability for missing transactions. GH #5674 --- tests/Node.py | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index dfacfd56cc5..cd7aebf09a1 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -140,10 +140,18 @@ def getTransId(trans): assert trans assert isinstance(trans, dict), print("Input type is %s" % type(trans)) - #Utils.Print("%s" % trans) + assert "transaction_id" in trans, print("trans does not contain key %s. trans={%s}" % ("transaction_id", json.dumps(trans, indent=2, sort_keys=True))) transId=trans["transaction_id"] return transId + @staticmethod + def isTrans(obj): + """Identify if this is a transaction dictionary.""" + if obj is None or not isinstance(obj, dict): + return False + + return True if "transaction_id" in obj else False + @staticmethod def byteArrToStr(arr): return arr.decode("utf-8") @@ -205,6 +213,7 @@ def getBlockByIdMdb(self, blockId, silentErrors=False): if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd)) try: trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand) + Node.logCmdTransaction(trans) if trans is not None: return trans except subprocess.CalledProcessError as ex: @@ -466,6 +475,7 @@ def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, w account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + Node.logCmdTransaction(trans) transId=Node.getTransId(trans) if stakedDeposit > 0: @@ -483,11 +493,13 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + Node.logCmdTransaction(trans) transId=Node.getTransId(trans) if stakedDeposit > 0: self.waitForTransInBlock(transId) # seems like account creation needs to be finlized before transfer can happen trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init") + Node.logCmdTransaction(trans) transId=Node.getTransId(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -638,6 +650,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False trans=None try: trans=Utils.runCmdArrReturnJson(cmdArr) + Node.logCmdTransaction(trans) except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") Utils.Print("ERROR: Exception during funds transfer. %s" % (msg)) @@ -819,6 +832,7 @@ def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransB trans=None try: trans=Utils.runCmdReturnJson(cmd, trace=False) + Node.logCmdTransaction(trans) except subprocess.CalledProcessError as ex: if not shouldFail: msg=ex.output.decode("utf-8") @@ -876,6 +890,7 @@ def pushMessage(self, account, action, data, opts, silentErrors=False): if Utils.Debug: Utils.Print("cmd: %s" % (cmdArr)) try: trans=Utils.runCmdArrReturnJson(cmdArr) + Node.logCmdTransaction(trans, ignoreNonTrans=True) return (True, trans) except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") @@ -887,6 +902,7 @@ def setPermission(self, account, code, pType, requirement, waitForTransBlock=Fal cmdDesc="set action permission" cmd="%s -j %s %s %s %s" % (cmdDesc, account, code, pType, requirement) trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) + Node.logCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -900,6 +916,7 @@ def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, tran cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + Node.logCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -909,6 +926,7 @@ def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnEr cmdDesc, producer.name, producer.activePublicKey, url, location) msg="producer=%s" % (producer.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + Node.logCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -918,6 +936,7 @@ def vote(self, account, producers, waitForTransBlock=False, exitOnError=False): cmdDesc, account.name, " ".join(producers)) msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + Node.logCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -967,6 +986,7 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head try: if returnType==ReturnType.json: trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) + Node.logCmdTransaction(trans) elif returnType==ReturnType.raw: trans=Utils.runCmdReturnStr(cmd) else: @@ -1227,6 +1247,20 @@ def isNodeAlive(): self.killed=False return True + @staticmethod + def logCmdTransaction(trans, ignoreNonTrans=False): + if not Utils.Debug: + return + + if trans is None: + Utils.Print(" cmd returned transaction: %s" % (trans)) + + if ignoreNonTrans and not Node.isTrans(trans): + return + + transId=Node.getTransId(trans) + Utils.Print(" cmd returned transaction id: %s" % (transId)) + def reportStatus(self): Utils.Print("Node State:") Utils.Print(" cmd : %s" % (self.cmd)) From 776dcbc996973bb7be85a45558a92c01bbe563dc Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 19 Sep 2018 12:26:15 -0500 Subject: [PATCH 032/161] Added more transaction information and added Context class for tracking context inside an object for error reporting. GH #5674 --- tests/Node.py | 83 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 77 insertions(+), 6 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index cd7aebf09a1..2f0ab393b7f 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -64,11 +64,80 @@ def validateTransaction(trans): assert trans assert isinstance(trans, dict), print("Input type is %s" % type(trans)) - def printTrans(trans): - Utils.Print("ERROR: Failure in transaction validation.") + executed="executed" + def printTrans(trans, status): + Utils.Print("ERROR: Valid transaction should be \"%s\" but it was \"%s\"." % (executed, status)) Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1))) - assert trans["processed"]["receipt"]["status"] == "executed", printTrans(trans) + transStatus=Node.getTransStatus(trans) + assert transStatus == executed, printTrans(trans, transStatus) + + @staticmethod + def __printTransStructureError(trans, context): + Utils.Print("ERROR: Failure in expected transaction structure. Missing trans%s." % (context)) + Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1))) + + class Context: + def __init__(self, obj, desc): + self.obj=obj + self.sections=[obj] + self.keyContext=[] + self.desc=desc + + def __json(self): + return "%s=\n%s" % (self.desc, json.dumps(self.obj, indent=1)) + + def __keyContext(self): + msg="" + for key in self.keyContext: + if msg=="": + msg="[" + else: + msg+="][" + msg+=key + if msg!="": + msg+="]" + return msg + + def __contextDesc(self): + return "%s%s" % (self.desc, self.__keyContext()) + + def add(self, newKey): + assert isinstance(newKey, str), print("ERROR: Trying to use %s as a key" % (newKey)) + subSection=self.sections[-1] + assert isinstance(subSection, dict), print("ERROR: Calling \"add\" method when context is not a dictionary. %s in %s" % (self.__contextDesc(), self.__json())) + assert newKey in subSection, print("ERROR: %s%s does not contain key \"%s\". %s" % (self.__contextDesc(), key, self.__json())) + current=subSection[newKey] + self.sections.append(current) + self.keyContext.append(newKey) + return current + + def index(self, i): + assert isinstance(i, int), print("ERROR: Trying to use \"%s\" as a list index" % (i)) + cur=self.getCurrent() + assert isinstance(cur, list), print("ERROR: Calling \"index\" method when context is not a list. %s in %s" % (self.__contextDesc(), self.__json())) + listLen=len(cur) + assert i < listLen, print("ERROR: Index %s is beyond the size of the current list (%s). %s in %s" % (i, listLen, self.__contextDesc(), self.__json())) + return self.sections.append(cur[i]) + + def getCurrent(self): + return self.sections[-1] + + @staticmethod + def getTransStatus(trans): + cntxt=Node.Context(trans, "trans") + cntxt.add("processed") + cntxt.add("receipt") + return cntxt.add("status") + + @staticmethod + def getTransBlockNum(trans): + cntxt=Node.Context(trans, "trans") + cntxt.add("processed") + cntxt.add("action_traces") + cntxt.index(0) + return cntxt.add("block_num") + @staticmethod def stdinAndCheckOutput(cmd, subcommand): @@ -911,7 +980,7 @@ def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, tran toAccount=fromAccount cmdDesc="system delegatebw" - transferStr="--transfer" if transferTo else "" + transferStr="--transfer" if transferTo else "" cmd="%s -j %s %s \"%s %s\" \"%s %s\" %s" % ( cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); @@ -1186,7 +1255,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim cmdArr=[] myCmd=self.cmd - toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} + toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} if not newChain: skip=False swapValue=None @@ -1259,7 +1328,9 @@ def logCmdTransaction(trans, ignoreNonTrans=False): return transId=Node.getTransId(trans) - Utils.Print(" cmd returned transaction id: %s" % (transId)) + status=Node.getTransStatus(trans) + blockNum=Node.getTransBlockNum(trans) + Utils.Print(" cmd returned transaction id: %s, status: %s, (possible) block num: %s" % (transId, status, blockNum)) def reportStatus(self): Utils.Print("Node State:") From 0e0e6d1b82bc012e92dd7d063505db3e7b681f2e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 20 Sep 2018 13:18:43 -0500 Subject: [PATCH 033/161] Fixed nonparallelizable_tests label on mongodb test. GH #5674 --- tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 417eb35f07c..4ef0ca047e6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -59,7 +59,7 @@ add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_ set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) if(BUILD_MONGO_DB_PLUGIN) add_test(NAME nodeos_run_test-mongodb COMMAND tests/nodeos_run_test.py --mongodb -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) - set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) + set_property(TEST nodeos_run_test-mongodb PROPERTY LABELS nonparallelizable_tests) endif() add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) From 015b60fc08952f9c84e73cc0d24a027628400743 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 20 Sep 2018 14:13:00 -0500 Subject: [PATCH 034/161] Removed calls to Node.logCmdTransaction that were not for newly created transactions. GH #5674 --- tests/Node.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 2f0ab393b7f..5abf1a20c1e 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -282,7 +282,6 @@ def getBlockByIdMdb(self, blockId, silentErrors=False): if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd)) try: trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand) - Node.logCmdTransaction(trans) if trans is not None: return trans except subprocess.CalledProcessError as ex: @@ -1051,13 +1050,12 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ (self.endpointHttp, producer, whereInSequence, basedOnLib) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - trans=None + rtn=None try: if returnType==ReturnType.json: - trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) - Node.logCmdTransaction(trans) + rtn=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) elif returnType==ReturnType.raw: - trans=Utils.runCmdReturnStr(cmd) + rtn=Utils.runCmdReturnStr(cmd) else: unhandledEnumType(returnType) except subprocess.CalledProcessError as ex: @@ -1075,11 +1073,11 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head exitMsg=": " + exitMsg else: exitMsg="" - if exitOnError and trans is None: + if exitOnError and rtn is None: Utils.cmdError("could not \"%s\" - %s" % (cmd,exitMsg)) Utils.errorExit("Failed to \"%s\"" % (cmd)) - return trans + return rtn def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False): if not waitForTransBlock: From 1d32ecb87b8d269a2a3e47a7e39f650f90d3f886 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 20 Sep 2018 15:45:57 -0500 Subject: [PATCH 035/161] Added reporting boot log to dumped error output. GH #5674 --- tests/Cluster.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index e21c15910e7..ceea15803bb 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -30,6 +30,7 @@ class Cluster(object): __BiosHost="localhost" __BiosPort=8788 __LauncherCmdArr=[] + __bootlog="eosio-ignition-wd/bootlog.txt" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -82,6 +83,8 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.defproducerbAccount.ownerPrivateKey=defproducerbPrvtKey self.defproducerbAccount.activePrivateKey=defproducerbPrvtKey + self.useBiosBootFile=False + def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): self.__chainSyncStrategy=self.__chainSyncStrategies.get(chainSyncStrategy) @@ -333,6 +336,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("ERROR: Bootstrap failed.") return False else: + self.useBiosBootFile=True self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, dontKill) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") @@ -833,8 +837,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): return None p = re.compile('error', re.IGNORECASE) - bootlog="eosio-ignition-wd/bootlog.txt" - with open(bootlog) as bootFile: + with open(Cluster.__bootlog) as bootFile: for line in bootFile: if p.search(line): Utils.Print("ERROR: bios_boot.sh script resulted in errors. See %s" % (bootlog)) @@ -1289,6 +1292,9 @@ def dumpErrorDetails(self): fileName="var/lib/node_%02d/stderr.txt" % (i) Cluster.dumpErrorDetailImpl(fileName) + if self.useBiosBootFile: + Cluster.dumpErrorDetailImpl(Cluster.__bootlog) + def killall(self, silent=True, allInstances=False): """Kill cluster nodeos instances. allInstances will kill all nodeos instances running on the system.""" cmd="%s -k 9" % (Utils.EosLauncherPath) From de54f98bf80a1c822558113cb5a0ee5092e51029 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 20 Sep 2018 20:05:40 -0400 Subject: [PATCH 036/161] better error messages from `variant_to_binary` Most of the plumbing to support better messages for `binary_to_variant` is there, it just has not been used yet. Also added option to shorten error message output such that it is bounded by a fixed upper limit (protects against malicious user input via ABI and the JSON provided to convert to binary). The default behavior of `variant_to_binary` is to provide verbose error messages. However, when the function is called from `abi_json_to_bin` within chain_plugin, it overrides the default behavior to shorten the error messages. In the future, this behavior should be modified based on a configuration parameter to at the very least enable very useful error messages on local nodes for debugging purposes. --- libraries/chain/abi_serializer.cpp | 334 ++++++++++++++++-- .../include/eosio/chain/abi_serializer.hpp | 165 +++++---- plugins/chain_plugin/chain_plugin.cpp | 2 +- unittests/abi_tests.cpp | 95 ++++- 4 files changed, 484 insertions(+), 112 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 6c6752dad6f..59817288928 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -346,12 +346,12 @@ namespace eosio { namespace chain { } fc::variant abi_serializer::binary_to_variant(const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time)const { - impl::binary_to_variant_context ctx(max_serialization_time); + impl::binary_to_variant_context ctx(*this, max_serialization_time, type); return _binary_to_variant(type, binary, ctx); } fc::variant abi_serializer::binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const { - impl::binary_to_variant_context ctx(max_serialization_time); + impl::binary_to_variant_context ctx(*this, max_serialization_time, type); return _binary_to_variant(type, binary, ctx); } @@ -360,23 +360,29 @@ namespace eosio { namespace chain { auto h = ctx.enter_scope(); auto rtype = resolve_type(type); + auto v_itr = variants.end(); + auto s_itr = structs.end(); + auto btype = built_in_types.find(fundamental_type(rtype)); if( btype != built_in_types.end() ) { btype->second.second(var, ds, is_array(rtype), is_optional(rtype)); } else if ( is_array(rtype) ) { + ctx.hint_array_type_if_in_array(); vector vars = var.get_array(); fc::raw::pack(ds, (fc::unsigned_int)vars.size()); - auto h1 = ctx.push_to_path( fundamental_type(rtype), ctx.is_path_empty() ); + + auto h1 = ctx.push_to_path( impl::array_index_path_item{} ); + auto h2 = ctx.disallow_extensions_unless(false); + int64_t i = 0; for (const auto& var : vars) { ctx.set_array_index_of_path_back(i); - auto h2 = ctx.disallow_extensions_unless(false); _variant_to_binary(fundamental_type(rtype), var, ds, ctx); ++i; } - } else if ( variants.find(rtype) != variants.end() ) { - auto& v = variants.find(rtype)->second; - auto h1 = ctx.push_to_path( v.name, ctx.is_path_empty() ); + } else if( (v_itr = variants.find(rtype)) != variants.end() ) { + ctx.hint_variant_type_if_in_array( v_itr ); + auto& v = v_itr->second; EOS_ASSERT( var.is_array() && var.size() == 2, pack_exception, "Expected input to be an array of two items while processing variant '${p}'", ("p", ctx.get_path_string()) ); EOS_ASSERT( var[size_t(0)].is_string(), pack_exception, @@ -385,16 +391,13 @@ namespace eosio { namespace chain { auto it = find(v.types.begin(), v.types.end(), variant_type_str); EOS_ASSERT( it != v.types.end(), pack_exception, "Specified type '${t}' in input array is not valid within the variant '${p}'", - ("t", variant_type_str)("p", ctx.get_path_string()) ); + ("t", ctx.maybe_shorten(variant_type_str))("p", ctx.get_path_string()) ); fc::raw::pack(ds, fc::unsigned_int(it - v.types.begin())); - std::stringstream s; - s << ""; - auto h3 = ctx.push_to_path(s.str()); + auto h1 = ctx.push_to_path( impl::variant_path_item{ .variant_itr = v_itr, .variant_ordinal = static_cast(it - v.types.begin()) } ); _variant_to_binary( *it, var[size_t(1)], ds, ctx ); - } else { - const auto& st = get_struct(rtype); - - auto h1 = ctx.push_to_path( st.name, ctx.is_path_empty() ); + } else if( (s_itr = structs.find(rtype)) != structs.end() ) { + ctx.hint_struct_type_if_in_array( s_itr ); + const auto& st = s_itr->second; if( var.is_object() ) { const auto& vo = var.get_object(); @@ -404,22 +407,27 @@ namespace eosio { namespace chain { _variant_to_binary(resolve_type(st.base), var, ds, ctx); } bool extension_encountered = false; + uint32_t i = 0; for( const auto& field : st.fields ) { if( vo.contains( string(field.name).c_str() ) ) { if( extension_encountered ) - EOS_THROW( pack_exception, "Unexpected field '${f}' found in input object while processing struct '${p}'", ("f",field.name)("p",ctx.get_path_string()) ); + EOS_THROW( pack_exception, "Unexpected field '${f}' found in input object while processing struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); { + auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); - auto h3 = ctx.push_to_path( field.name ); _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, ctx); } } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { extension_encountered = true; } else if( extension_encountered ) { - EOS_THROW( pack_exception, "Encountered field '${f}' without binary extension designation while processing struct '${p}'", ("f",field.name)("p",ctx.get_path_string()) ); + EOS_THROW( pack_exception, "Encountered field '${f}' without binary extension designation while processing struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); } else { - EOS_THROW( pack_exception, "Missing field '${f}' in input object while processing struct '${p}'", ("f",field.name)("p",ctx.get_path_string()) ); + EOS_THROW( pack_exception, "Missing field '${f}' in input object while processing struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); } + ++i; } } else if( var.is_array() ) { const auto& va = var.get_array(); @@ -429,20 +437,22 @@ namespace eosio { namespace chain { uint32_t i = 0; for( const auto& field : st.fields ) { if( va.size() > i ) { + auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); - auto h3 = ctx.push_to_path( field.name ); _variant_to_binary(_remove_bin_extension(field.type), va[i], ds, ctx); } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { break; } else { EOS_THROW( pack_exception, "Early end to input array specifying the fields of struct '${p}'; require input for field '${f}'", - ("p", ctx.get_path_string())("f", field.name) ); + ("p", ctx.get_path_string())("f", ctx.maybe_shorten(field.name)) ); } ++i; } } else { EOS_THROW( pack_exception, "Unexpected input encountered while processing struct '${p}'", ("p",ctx.get_path_string()) ); } + } else { + EOS_THROW( invalid_type_inside_abi, "Unknown type ${type}", ("type",type) ); } } FC_CAPTURE_AND_RETHROW( (type)(var) ) } @@ -460,13 +470,15 @@ namespace eosio { namespace chain { return temp; } FC_CAPTURE_AND_RETHROW( (type)(var) ) } - bytes abi_serializer::variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time)const { - impl::variant_to_binary_context ctx(max_serialization_time); + bytes abi_serializer::variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path)const { + impl::variant_to_binary_context ctx(*this, max_serialization_time, type); + ctx.short_path = short_path; return _variant_to_binary(type, var, ctx); } - void abi_serializer::variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time)const { - impl::variant_to_binary_context ctx(max_serialization_time); + void abi_serializer::variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path)const { + impl::variant_to_binary_context ctx(*this, max_serialization_time, type); + ctx.short_path = short_path; _variant_to_binary(type, var, ds, ctx); } @@ -489,4 +501,276 @@ namespace eosio { namespace chain { return itr->second; } + namespace impl { + + void abi_traverse_context::check_deadline()const { + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + } + + fc::scoped_exit> abi_traverse_context::enter_scope() { + std::function callback = [old_recursion_depth=recursion_depth, this](){ + recursion_depth = old_recursion_depth; + }; + + ++recursion_depth; + EOS_ASSERT( recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, + "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); + + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, + "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + + return {std::move(callback)}; + } + + void abi_traverse_context_with_path::set_path_root( const type_name& type ) { + auto rtype = abis.resolve_type(type); + + if( abis.is_array(rtype) ) { + root_of_path = array_type_path_root{}; + } else { + auto itr1 = abis.structs.find(rtype); + if( itr1 != abis.structs.end() ) { + root_of_path = struct_type_path_root{ .struct_itr = itr1 }; + } else { + auto itr2 = abis.variants.find(rtype); + if( itr2 != abis.variants.end() ) { + root_of_path = variant_type_path_root{ .variant_itr = itr2 }; + } + } + } + } + + fc::scoped_exit> abi_traverse_context_with_path::push_to_path( const path_item& item ) { + std::function callback = [this](){ + EOS_ASSERT( path.size() > 0, abi_exception, + "invariant failure in variant_to_binary_context: path is empty on scope exit" ); + path.pop_back(); + }; + + path.push_back( item ); + + return {std::move(callback)}; + } + + void abi_traverse_context_with_path::set_array_index_of_path_back( uint32_t i ) { + EOS_ASSERT( path.size() > 0, abi_exception, "path is empty" ); + + auto& b = path.back(); + + EOS_ASSERT( b.contains(), abi_exception, "trying to set array index without first pushing new array index item" ); + + b.get().array_index = i; + } + + void abi_traverse_context_with_path::hint_array_type_if_in_array() { + if( path.size() == 0 || !path.back().contains() ) + return; + + path.back().get().type_hint = array_type_path_root{}; + } + + void abi_traverse_context_with_path::hint_struct_type_if_in_array( const map::const_iterator& itr ) { + if( path.size() == 0 || !path.back().contains() ) + return; + + path.back().get().type_hint = struct_type_path_root{ .struct_itr = itr }; + } + + void abi_traverse_context_with_path::hint_variant_type_if_in_array( const map::const_iterator& itr ) { + if( path.size() == 0 || !path.back().contains() ) + return; + + path.back().get().type_hint = variant_type_path_root{ .variant_itr = itr }; + } + + constexpr size_t const_strlen( const char* str ) + { + return (*str == 0) ? 0 : const_strlen(str + 1) + 1; + } + + void output_name( std::ostream& s, const string& str, bool shorten, size_t max_length = 64 ) { + constexpr size_t min_num_characters_at_ends = 4; + constexpr size_t preferred_num_tail_end_characters = 6; + constexpr const char* fill_in = "..."; + + static_assert( min_num_characters_at_ends <= preferred_num_tail_end_characters, + "preferred number of tail end characters cannot be less than the imposed absolute minimum" ); + + constexpr size_t fill_in_length = const_strlen( fill_in ); + constexpr size_t min_length = fill_in_length + 2*min_num_characters_at_ends; + constexpr size_t preferred_min_length = fill_in_length + 2*preferred_num_tail_end_characters; + + max_length = std::max( max_length, min_length ); + + if( !shorten || str.size() <= max_length ) { + s << str; + return; + } + + size_t actual_num_tail_end_characters = preferred_num_tail_end_characters; + if( max_length < preferred_min_length ) { + actual_num_tail_end_characters = min_num_characters_at_ends + (max_length - min_length)/2; + } + + s.write( str.data(), max_length - fill_in_length - actual_num_tail_end_characters ); + s.write( fill_in, fill_in_length ); + s.write( str.data() + (str.size() - actual_num_tail_end_characters), actual_num_tail_end_characters ); + } + + struct generate_path_string_visitor { + using result_type = void; + + generate_path_string_visitor( bool shorten_names, bool track_only ) + : shorten_names(shorten_names), track_only( track_only ) + {} + + std::stringstream s; + bool shorten_names = false; + bool track_only = false; + path_item last_non_array_path_item; + + void add_dot() { + s << "."; + } + + void operator()( const empty_path_item& item ) { + } + + void operator()( const array_index_path_item& item ) { + if( track_only ) { + last_non_array_path_item = item; // ????? + return; + } + + s << "[" << item.array_index << "]"; + } + + void operator()( const field_path_item& item ) { + if( track_only ) { + last_non_array_path_item = item; + return; + } + + const auto& str = item.parent_struct_itr->second.fields.at(item.field_ordinal).name; + output_name( s, str, shorten_names ); + } + + void operator()( const variant_path_item& item ) { + if( track_only ) { + last_non_array_path_item = item; + return; + } + + s << ""; + } + + void operator()( const empty_path_root& item ) { + } + + void operator()( const array_type_path_root& item ) { + s << "ARRAY"; + } + + void operator()( const struct_type_path_root& item ) { + const auto& str = item.struct_itr->first; + output_name( s, str, shorten_names ); + } + + void operator()( const variant_type_path_root& item ) { + const auto& str = item.variant_itr->first; + output_name( s, str, shorten_names ); + } + }; + + struct path_item_type_visitor { + using result_type = void; + + path_item_type_visitor( std::stringstream& s, bool shorten_names ) + : s(s), shorten_names(shorten_names) + {} + + std::stringstream& s; + bool shorten_names = false; + + void operator()( const empty_path_item& item ) { + } + + void operator()( const array_index_path_item& item ) { + const auto& th = item.type_hint; + if( th.contains() ) { + const auto& str = th.get().struct_itr->first; + output_name( s, str, shorten_names ); + } else if( th.contains() ) { + const auto& str = th.get().variant_itr->first; + output_name( s, str, shorten_names ); + } else if( th.contains() ) { + s << "ARRAY"; + } else { + s << "UNKNOWN"; + } + } + + void operator()( const field_path_item& item ) { + const auto& str = item.parent_struct_itr->second.fields.at(item.field_ordinal).type; + output_name( s, str, shorten_names ); + } + + void operator()( const variant_path_item& item ) { + const auto& str = item.variant_itr->second.types.at(item.variant_ordinal); + output_name( s, str, shorten_names ); + } + }; + + string abi_traverse_context_with_path::get_path_string()const { + bool full_path = !short_path; + bool shorten_names = short_path; + + generate_path_string_visitor visitor(shorten_names, !full_path); + if( full_path ) + root_of_path.visit( visitor ); + for( size_t i = 0, n = path.size(); i < n; ++i ) { + if( full_path && !path[i].contains() ) + visitor.add_dot(); + + path[i].visit( visitor ); + + } + + if( !full_path ) { + if( visitor.last_non_array_path_item.contains() ) { + root_of_path.visit( visitor ); + } else { + path_item_type_visitor vis2(visitor.s, shorten_names); + visitor.last_non_array_path_item.visit(vis2); + } + } + + return visitor.s.str(); + } + + string abi_traverse_context_with_path::maybe_shorten( const string& str ) { + if( !short_path ) + return str; + + std::stringstream s; + output_name( s, str, true ); + return s.str(); + } + + fc::scoped_exit> variant_to_binary_context::disallow_extensions_unless( bool condition ) { + std::function callback = [old_recursion_depth=recursion_depth, old_allow_extensions=allow_extensions, this](){ + allow_extensions = old_allow_extensions; + }; + + if( !condition ) { + allow_extensions = false; + } + + return {std::move(callback)}; + } + } + } } diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 9682f0a1506..c3e1669fb20 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -18,12 +18,13 @@ using std::pair; using namespace fc; namespace impl { - struct abi_from_variant; - struct abi_to_variant; + struct abi_from_variant; + struct abi_to_variant; - struct abi_traverse_context; - struct binary_to_variant_context; - struct variant_to_binary_context; + struct abi_traverse_context; + struct abi_traverse_context_with_path; + struct binary_to_variant_context; + struct variant_to_binary_context; } /** @@ -52,11 +53,11 @@ struct abi_serializer { optional get_error_message( uint64_t error_code )const; - fc::variant binary_to_variant(const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time)const; - fc::variant binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const; + fc::variant binary_to_variant( const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time )const; + fc::variant binary_to_variant( const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time )const; - bytes variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time)const; - void variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time)const; + bytes variant_to_binary( const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path = false )const; + void variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path = false )const; template static void to_variant( const T& o, fc::variant& vo, Resolver resolver, const fc::microseconds& max_serialization_time ); @@ -116,6 +117,7 @@ struct abi_serializer { friend struct impl::abi_from_variant; friend struct impl::abi_to_variant; + friend struct impl::abi_traverse_context_with_path; }; namespace impl { @@ -129,24 +131,9 @@ namespace impl { : max_serialization_time( max_serialization_time ), deadline( deadline ), recursion_depth(0) {} - void check_deadline()const { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - } - - fc::scoped_exit> enter_scope() { - std::function callback = [old_recursion_depth=recursion_depth, this](){ - recursion_depth = old_recursion_depth; - }; - - ++recursion_depth; - EOS_ASSERT( recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, - "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); + void check_deadline()const; - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, - "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); - - return {std::move(callback)}; - } + fc::scoped_exit> enter_scope(); protected: fc::microseconds max_serialization_time; @@ -154,85 +141,93 @@ namespace impl { size_t recursion_depth; }; - struct binary_to_variant_context : public abi_traverse_context { - using abi_traverse_context::abi_traverse_context; + struct empty_path_root {}; - binary_to_variant_context( const abi_traverse_context& ctx ) - : abi_traverse_context(ctx) - {} + struct array_type_path_root { }; - struct variant_to_binary_context : public abi_traverse_context { - using abi_traverse_context::abi_traverse_context; + struct struct_type_path_root { + map::const_iterator struct_itr; + }; - variant_to_binary_context( const abi_traverse_context& ctx ) - : abi_traverse_context(ctx) - {} + struct variant_type_path_root { + map::const_iterator variant_itr; + }; - fc::scoped_exit> disallow_extensions_unless( bool condition ) { - std::function callback = [old_recursion_depth=recursion_depth, old_allow_extensions=allow_extensions, this](){ - allow_extensions = old_allow_extensions; - }; + using path_root = static_variant; - if( !condition ) { - allow_extensions = false; - } + struct empty_path_item {}; - return {std::move(callback)}; - } + struct array_index_path_item { + path_root type_hint; + uint32_t array_index = 0; + }; - fc::scoped_exit> push_to_path( const string& n, bool condition = true ) { + struct field_path_item { + map::const_iterator parent_struct_itr; + uint32_t field_ordinal = 0; + }; - if( !condition ) { - fc::scoped_exit> h([](){}); - h.cancel(); - return h; - } + struct variant_path_item { + map::const_iterator variant_itr; + uint32_t variant_ordinal = 0; + }; - std::function callback = [this](){ - EOS_ASSERT( path.size() > 0 && path_array_index.size() > 0, abi_exception, - "invariant failure in variant_to_binary_context: path is empty on scope exit" ); - path.pop_back(); - path_array_index.pop_back(); - }; + using path_item = static_variant; - path.push_back( n ); - path_array_index.push_back( -1 ); + struct abi_traverse_context_with_path : public abi_traverse_context { + abi_traverse_context_with_path( const abi_serializer& abis, fc::microseconds max_serialization_time, const type_name& type ) + : abi_traverse_context( max_serialization_time ), abis(abis) + { + set_path_root(type); + } - return {std::move(callback)}; + abi_traverse_context_with_path( const abi_serializer& abis, fc::microseconds max_serialization_time, fc::time_point deadline, const type_name& type ) + : abi_traverse_context( max_serialization_time, deadline ), abis(abis) + { + set_path_root(type); } - void set_array_index_of_path_back( int64_t i ) { - EOS_ASSERT( path_array_index.size() > 0, abi_exception, "path is empty" ); - path_array_index.back() = i; + abi_traverse_context_with_path( const abi_serializer& abis, const abi_traverse_context& ctx, const type_name& type ) + : abi_traverse_context(ctx), abis(abis) + { + set_path_root(type); } - bool extensions_allowed()const { return allow_extensions; } + void set_path_root( const type_name& type ); - bool is_path_empty()const { return path.size() == 0; } + fc::scoped_exit> push_to_path( const path_item& item ); - string get_path_string()const { - EOS_ASSERT( path.size() == path_array_index.size(), abi_exception, - "invariant failure in variant_to_binary_context: mismatch in path vector sizes" ); + void set_array_index_of_path_back( uint32_t i ); + void hint_array_type_if_in_array(); + void hint_struct_type_if_in_array( const map::const_iterator& itr ); + void hint_variant_type_if_in_array( const map::const_iterator& itr ); - std::stringstream s; - for( size_t i = 0, n = path.size(); i < n; ++i ) { - s << path[i]; - if( path_array_index[i] >= 0 ) { - s << "[" << path_array_index[i] << "]"; - } - if( (i + 1) != n ) { // if not the last element in the path - s << "."; - } - } + string get_path_string()const; - return s.str(); - } + string maybe_shorten( const string& str ); + + protected: + const abi_serializer& abis; + path_root root_of_path; + vector path; + public: + bool short_path = false; + }; + + struct binary_to_variant_context : public abi_traverse_context_with_path { + using abi_traverse_context_with_path::abi_traverse_context_with_path; + }; + + struct variant_to_binary_context : public abi_traverse_context_with_path { + using abi_traverse_context_with_path::abi_traverse_context_with_path; + + fc::scoped_exit> disallow_extensions_unless( bool condition ); + + bool extensions_allowed()const { return allow_extensions; } protected: - bool allow_extensions = true; - vector path; - vector path_array_index; + bool allow_extensions = true; }; /** @@ -389,7 +384,7 @@ namespace impl { auto type = abi->get_action_type(act.name); if (!type.empty()) { try { - binary_to_variant_context _ctx(ctx); + binary_to_variant_context _ctx(*abi, ctx, type); mvo( "data", abi->_binary_to_variant( type, act.data, _ctx )); mvo("hex_data", act.data); } catch(...) { @@ -552,7 +547,7 @@ namespace impl { if (abi.valid()) { auto type = abi->get_action_type(act.name); if (!type.empty()) { - variant_to_binary_context _ctx(ctx); + variant_to_binary_context _ctx(*abi, ctx, type); act.data = std::move( abi->_variant_to_binary( type, data, _ctx )); valid_empty_data = act.data.empty(); } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 183e7a48ecc..cff24929a6d 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1727,7 +1727,7 @@ read_only::abi_json_to_bin_result read_only::abi_json_to_bin( const read_only::a auto action_type = abis.get_action_type(params.action); EOS_ASSERT(!action_type.empty(), action_validate_exception, "Unknown action ${action} in contract ${contract}", ("action", params.action)("contract", params.code)); try { - result.binargs = abis.variant_to_binary(action_type, params.args, abi_serializer_max_time); + result.binargs = abis.variant_to_binary(action_type, params.args, abi_serializer_max_time, true); // TODO: Allow configuration to output verbose error messages } EOS_RETHROW_EXCEPTIONS(chain::invalid_action_args_exception, "'${args}' is invalid args for action '${action}' code '${code}'. expected '${proto}'", ("args", params.args)("action", params.action)("code", params.code)("proto", action_abi_to_variant(abi, action_type))) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index f2f415825c9..dacce90cc74 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -3761,6 +3761,8 @@ BOOST_AUTO_TEST_CASE(abi_serialize_detailed_error_messages) "types": [ {"new_type_name": "foo", "type": "s2"}, {"new_type_name": "bar", "type": "foo"}, + {"new_type_name": "s1array", "type": "s1[]"}, + {"new_type_name": "s1arrayarray", "type": "s1array[]"} ], "structs": [ {"name": "s1", "base": "", "fields": [ @@ -3781,6 +3783,9 @@ BOOST_AUTO_TEST_CASE(abi_serialize_detailed_error_messages) {"name": "f0", "type": "int8[]"}, {"name": "f1", "type": "s1[]"} ]}, + {"name": "s5", "base": "", "fields": [ + {"name": "f0", "type": "v2[]"}, + ]}, ], "variants": [ {"name": "v1", "types": ["s3", "int8", "s4"]}, @@ -3812,7 +3817,95 @@ BOOST_AUTO_TEST_CASE(abi_serialize_detailed_error_messages) pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'v1..f1[1]'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), max_serialization_time), - pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's2[1].f0'") ); + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 'ARRAY[1].f0'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s5", fc::json::from_string(R"({"f0":[["bar",{"f0":{"i0":1,"i1":2},"i2":3}],["foo",{"f0":{"i0":4},"i2":6}]]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's5.f0[1]..f0'") ); + + verify_round_trip_conversion( abis, "s1arrayarray", R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":5,"i1":6},{"i0":7,"i1":8},{"i0":9,"i1":10}]])", "0202010203040305060708090a"); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s1arrayarray", fc::json::from_string(R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":6,"i1":6},{"i0":7,"i1":8},{"i1":10}]])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'ARRAY[1][2]'") ); + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(abi_serialize_short_error_messages) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.1", + "types": [ + {"new_type_name": "foo", "type": "s2"}, + {"new_type_name": "bar", "type": "foo"}, + {"new_type_name": "s1array", "type": "s1[]"}, + {"new_type_name": "s1arrayarray", "type": "s1array[]"} + ], + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"} + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "s1"}, + {"name": "i2", "type": "int8"} + ]}, + {"name": "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", "base": "s1", "fields": [ + {"name": "i2", "type": "int8"}, + {"name": "f3", "type": "v2"}, + {"name": "f4", "type": "foo$"}, + {"name": "very_very_very_very_very_very_very_very_very_very_long_field_name_f5", "type": "s1$"} + ]}, + {"name": "s4", "base": "", "fields": [ + {"name": "f0", "type": "int8[]"}, + {"name": "f1", "type": "s1[]"} + ]}, + {"name": "s5", "base": "", "fields": [ + {"name": "f0", "type": "v2[]"}, + ]}, + ], + "variants": [ + {"name": "v1", "types": ["very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", "int8", "s4"]}, + {"name": "v2", "types": ["foo", "bar"]}, + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("bar", fc::json::from_string(R"({"f0":{"i0":1},"i2":3})"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i2":3})"), max_serialization_time, true ), + pack_exception, + fc_exception_message_is("Missing field 'i1' in input object while processing struct 'very_very_very_very_very_very_very_very_very_very_long_...ame_s3'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["s2",{}]})"), max_serialization_time, true ), + pack_exception, fc_exception_message_is("Specified type 's2' in input array is not valid within the variant 'v2'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11},"i2":13}]})"), max_serialization_time, true ), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "v1", + fc::json::from_string(R"(["very_very_very_very_very_very_very_very_very_very_long_struct_name_s3",{"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}],"very_very_very_very_very_very_very_very_very_very_long_field_name_f5":0}])"), + max_serialization_time, true ), + pack_exception, + fc_exception_message_is("Unexpected field 'very_very_very_very_very_very_very_very_very_very_long_...ame_f5' found in input object while processing struct 'very_very_very_very_very_very_very_very_very_very_long_...ame_s3'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s4",{"f0":[0,1],"f1":[{"i0":2,"i1":3},{"i1":5}]}])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s5", fc::json::from_string(R"({"f0":[["bar",{"f0":{"i0":1,"i1":2},"i2":3}],["foo",{"f0":{"i0":4},"i2":6}]]})"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s1arrayarray", fc::json::from_string(R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":6,"i1":6},{"i0":7,"i1":8},{"i1":10}]])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 's1'") ); } FC_LOG_AND_RETHROW() } From 01f978d3db3ee4290417594f6a59559cc6399a98 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 20 Sep 2018 20:35:23 -0400 Subject: [PATCH 037/161] Hide unaligned warnings unless console prints on Hide the contract unaligned parameter warning unless contracts-console is enabled. These warnings are not useful except in contract development. --- .../include/eosio/chain/webassembly/wabt.hpp | 18 ++++++++++++------ .../include/eosio/chain/webassembly/wavm.hpp | 19 +++++++++++++------ 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp index 5be568d4b01..bf33448a787 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp @@ -371,7 +371,8 @@ struct intrinsic_invoker_impl, size_t, Inputs...>> size_t length = args.at((uint32_t)offset).get_i32(); T* base = array_ptr_impl(vars, ptr, length); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of const values" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned array of const values" ); std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); @@ -387,7 +388,8 @@ struct intrinsic_invoker_impl, size_t, Inputs...>> size_t length = args.at((uint32_t)offset).get_i32(); T* base = array_ptr_impl(vars, ptr, length); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of values" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned array of values" ); std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); @@ -500,7 +502,8 @@ struct intrinsic_invoker_impl> { uint32_t ptr = args.at((uint32_t)offset).get_i32(); T* base = array_ptr_impl(vars, ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const pointer" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned const pointer" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); @@ -514,7 +517,8 @@ struct intrinsic_invoker_impl> { uint32_t ptr = args.at((uint32_t)offset).get_i32(); T* base = array_ptr_impl(vars, ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned pointer" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned pointer" ); T copy; memcpy( (void*)©, (void*)base, sizeof(T) ); Ret ret = Then(vars, ©, rest..., args, (uint32_t)offset - 1); @@ -603,7 +607,8 @@ struct intrinsic_invoker_impl> { EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); T* base = array_ptr_impl(vars, ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const reference" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned const reference" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); @@ -619,7 +624,8 @@ struct intrinsic_invoker_impl> { EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); T* base = array_ptr_impl(vars, ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned reference" ); + if(vars.ctx.control.contracts_console()) + wlog( "misaligned reference" ); T copy; memcpy( (void*)©, (void*)base, sizeof(T) ); Ret ret = Then(vars, copy, rest..., args, (uint32_t)offset - 1); diff --git a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp index 4674c97e2a8..2bffe3f621f 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "Runtime/Runtime.h" #include "IR/Types.h" @@ -382,7 +383,8 @@ struct intrinsic_invoker_impl, size_t, Inputs...>, const auto length = size_t(size); T* base = array_ptr_impl(ctx, (U32)ptr, length); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of const values" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned array of const values" ); std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); @@ -397,7 +399,8 @@ struct intrinsic_invoker_impl, size_t, Inputs...>, const auto length = size_t(size); T* base = array_ptr_impl(ctx, (U32)ptr, length); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of values" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned array of values" ); std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); @@ -508,7 +511,8 @@ struct intrinsic_invoker_impl, std::tuple std::enable_if_t::value, Ret> { T* base = array_ptr_impl(ctx, (U32)ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const pointer" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned const pointer" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); @@ -521,7 +525,8 @@ struct intrinsic_invoker_impl, std::tuple std::enable_if_t::value, Ret> { T* base = array_ptr_impl(ctx, (U32)ptr, 1); if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned pointer" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned pointer" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); @@ -587,7 +592,8 @@ struct intrinsic_invoker_impl, std::tuple(&base) % alignof(T) != 0 ) { - wlog( "misaligned const reference" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned const reference" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)&base, sizeof(T) ); @@ -605,7 +611,8 @@ struct intrinsic_invoker_impl, std::tuple(&base) % alignof(T) != 0 ) { - wlog( "misaligned reference" ); + if(ctx.apply_ctx->control.contracts_console()) + wlog( "misaligned reference" ); std::remove_const_t copy; T* copy_ptr = © memcpy( (void*)copy_ptr, (void*)&base, sizeof(T) ); From 56934f015f3c1ff2473f00a481704b6b14e39427 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 21 Sep 2018 15:22:23 -0400 Subject: [PATCH 038/161] add support for better error messages in `binary_to_variant` as well; also some minor code cleanup --- libraries/chain/abi_serializer.cpp | 139 +++++++++++------- .../include/eosio/chain/abi_serializer.hpp | 4 +- plugins/chain_plugin/chain_plugin.cpp | 14 +- .../eosio/chain_plugin/chain_plugin.hpp | 4 +- unittests/abi_tests.cpp | 84 ++++++++++- 5 files changed, 183 insertions(+), 62 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 59817288928..a7370e3b1c1 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -283,14 +283,32 @@ namespace eosio { namespace chain { fc::mutable_variant_object& obj, impl::binary_to_variant_context& ctx )const { auto h = ctx.enter_scope(); - const auto& st = get_struct(type); + auto s_itr = structs.find(type); + EOS_ASSERT( s_itr != structs.end(), invalid_type_inside_abi, "Unknown type ${type}", ("type",ctx.maybe_shorten(type)) ); + ctx.hint_struct_type_if_in_array( s_itr ); + const auto& st = s_itr->second; if( st.base != type_name() ) { _binary_to_variant(resolve_type(st.base), stream, obj, ctx); } - for( const auto& field : st.fields ) { - if( !stream.remaining() && ends_with(field.type, "$") ) - continue; - obj( field.name, _binary_to_variant(resolve_type(_remove_bin_extension(field.type)), stream, ctx) ); + bool encountered_extension = false; + for( uint32_t i = 0; i < st.fields.size(); ++i ) { + const auto& field = st.fields[i]; + bool extension = ends_with(field.type, "$"); + encountered_extension |= extension; + if( !stream.remaining() ) { + if( extension ) { + continue; + } + if( encountered_extension ) { + EOS_THROW( abi_exception, "Encountered field '${f}' without binary extension designation while processing struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); + } + EOS_THROW( unpack_exception, "Stream unexpectedly ended; unable to unpack field '${f}' of struct '${p}'", + ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); + + } + auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); + obj( field.name, _binary_to_variant(resolve_type( extension ? _remove_bin_extension(field.type) : field.type ), stream, ctx) ); } } @@ -302,39 +320,60 @@ namespace eosio { namespace chain { auto ftype = fundamental_type(rtype); auto btype = built_in_types.find(ftype ); if( btype != built_in_types.end() ) { - return btype->second.first(stream, is_array(rtype), is_optional(rtype)); + try { + return btype->second.first(stream, is_array(rtype), is_optional(rtype)); + } EOS_RETHROW_EXCEPTIONS( unpack_exception, "Unable to unpack ${class} type '${type}' while processing '${p}'", + ("class", is_array(rtype) ? "array of built-in" : is_optional(rtype) ? "optional of built-in" : "built-in") + ("type", ftype)("p", ctx.get_path_string()) ) } if ( is_array(rtype) ) { - fc::unsigned_int size; - fc::raw::unpack(stream, size); - vector vars; - for( decltype(size.value) i = 0; i < size; ++i ) { - auto v = _binary_to_variant(ftype, stream, ctx); - EOS_ASSERT( !v.is_null(), unpack_exception, "Invalid packed array" ); - vars.emplace_back(std::move(v)); - } - EOS_ASSERT( vars.size() == size.value, - unpack_exception, - "packed size does not match unpacked array size, packed size ${p} actual size ${a}", - ("p", size)("a", vars.size()) ); - return fc::variant( std::move(vars) ); + ctx.hint_array_type_if_in_array(); + fc::unsigned_int size; + try { + fc::raw::unpack(stream, size); + } EOS_RETHROW_EXCEPTIONS( unpack_exception, "Unable to unpack size of array '${p}'", ("p", ctx.get_path_string()) ) + vector vars; + auto h1 = ctx.push_to_path( impl::array_index_path_item{} ); + for( decltype(size.value) i = 0; i < size; ++i ) { + ctx.set_array_index_of_path_back(i); + auto v = _binary_to_variant(ftype, stream, ctx); + // QUESTION: Is it actually desired behavior to require the returned variant to not be null? + // This would disallow arrays of optionals in general (though if all optionals in the array were present it would be allowed). + // Is there any scenario in which the returned variant would be null other than in the case of an empty optional? + EOS_ASSERT( !v.is_null(), unpack_exception, "Invalid packed array '${p}'", ("p", ctx.get_path_string()) ); + vars.emplace_back(std::move(v)); + } + // QUESTION: Why would the assert below ever fail? + EOS_ASSERT( vars.size() == size.value, + unpack_exception, + "packed size does not match unpacked array size, packed size ${p} actual size ${a}", + ("p", size)("a", vars.size()) ); + return fc::variant( std::move(vars) ); } else if ( is_optional(rtype) ) { - char flag; - fc::raw::unpack(stream, flag); - return flag ? _binary_to_variant(ftype, stream, ctx) : fc::variant(); + char flag; + try { + fc::raw::unpack(stream, flag); + } EOS_RETHROW_EXCEPTIONS( unpack_exception, "Unable to unpack presence flag of optional '${p}'", ("p", ctx.get_path_string()) ) + return flag ? _binary_to_variant(ftype, stream, ctx) : fc::variant(); } else { - auto v = variants.find(rtype); - if( v != variants.end() ) { + auto v_itr = variants.find(rtype); + if( v_itr != variants.end() ) { + ctx.hint_variant_type_if_in_array( v_itr ); fc::unsigned_int select; - fc::raw::unpack(stream, select); - EOS_ASSERT( (size_t)select < v->second.types.size(), unpack_exception, "Invalid packed variant" ); - return vector{v->second.types[select], _binary_to_variant(v->second.types[select], stream, ctx)}; + try { + fc::raw::unpack(stream, select); + } EOS_RETHROW_EXCEPTIONS( unpack_exception, "Unable to unpack tag of variant '${p}'", ("p", ctx.get_path_string()) ) + EOS_ASSERT( (size_t)select < v_itr->second.types.size(), unpack_exception, + "Unpacked invalid tag (${select}) for variant '${p}'", ("select", select.value)("p",ctx.get_path_string()) ); + auto h1 = ctx.push_to_path( impl::variant_path_item{ .variant_itr = v_itr, .variant_ordinal = static_cast(select) } ); + return vector{v_itr->second.types[select], _binary_to_variant(v_itr->second.types[select], stream, ctx)}; } } fc::mutable_variant_object mvo; _binary_to_variant(rtype, stream, mvo, ctx); - EOS_ASSERT( mvo.size() > 0, unpack_exception, "Unable to unpack stream ${type}", ("type", type) ); + // QUESTION: Is this assert actually desired? It disallows unpacking empty structs from datastream. + EOS_ASSERT( mvo.size() > 0, unpack_exception, "Unable to unpack '${p}' from stream", ("p", ctx.get_path_string()) ); return fc::variant( std::move(mvo) ); } @@ -345,13 +384,15 @@ namespace eosio { namespace chain { return _binary_to_variant(type, ds, ctx); } - fc::variant abi_serializer::binary_to_variant(const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time)const { + fc::variant abi_serializer::binary_to_variant( const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time, bool short_path )const { impl::binary_to_variant_context ctx(*this, max_serialization_time, type); + ctx.short_path = short_path; return _binary_to_variant(type, binary, ctx); } - fc::variant abi_serializer::binary_to_variant(const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time)const { + fc::variant abi_serializer::binary_to_variant( const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time, bool short_path )const { impl::binary_to_variant_context ctx(*this, max_serialization_time, type); + ctx.short_path = short_path; return _binary_to_variant(type, binary, ctx); } @@ -406,11 +447,11 @@ namespace eosio { namespace chain { auto h2 = ctx.disallow_extensions_unless(false); _variant_to_binary(resolve_type(st.base), var, ds, ctx); } - bool extension_encountered = false; - uint32_t i = 0; - for( const auto& field : st.fields ) { + bool disallow_additional_fields = false; + for( uint32_t i = 0; i < st.fields.size(); ++i ) { + const auto& field = st.fields[i]; if( vo.contains( string(field.name).c_str() ) ) { - if( extension_encountered ) + if( disallow_additional_fields ) EOS_THROW( pack_exception, "Unexpected field '${f}' found in input object while processing struct '${p}'", ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); { @@ -419,23 +460,22 @@ namespace eosio { namespace chain { _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, ctx); } } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { - extension_encountered = true; - } else if( extension_encountered ) { - EOS_THROW( pack_exception, "Encountered field '${f}' without binary extension designation while processing struct '${p}'", + disallow_additional_fields = true; + } else if( disallow_additional_fields ) { + EOS_THROW( abi_exception, "Encountered field '${f}' without binary extension designation while processing struct '${p}'", ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); } else { EOS_THROW( pack_exception, "Missing field '${f}' in input object while processing struct '${p}'", ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); } - ++i; } } else if( var.is_array() ) { const auto& va = var.get_array(); EOS_ASSERT( st.base == type_name(), invalid_type_inside_abi, "Using input array to specify the fields of the derived struct '${p}'; input arrays are currently only allowed for structs without a base", ("p",ctx.get_path_string()) ); - uint32_t i = 0; - for( const auto& field : st.fields ) { + for( uint32_t i = 0; i < st.fields.size(); ++i ) { + const auto& field = st.fields[i]; if( va.size() > i ) { auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); @@ -446,13 +486,12 @@ namespace eosio { namespace chain { EOS_THROW( pack_exception, "Early end to input array specifying the fields of struct '${p}'; require input for field '${f}'", ("p", ctx.get_path_string())("f", ctx.maybe_shorten(field.name)) ); } - ++i; } } else { EOS_THROW( pack_exception, "Unexpected input encountered while processing struct '${p}'", ("p",ctx.get_path_string()) ); } } else { - EOS_THROW( invalid_type_inside_abi, "Unknown type ${type}", ("type",type) ); + EOS_THROW( invalid_type_inside_abi, "Unknown type ${type}", ("type",ctx.maybe_shorten(type)) ); } } FC_CAPTURE_AND_RETHROW( (type)(var) ) } @@ -470,13 +509,13 @@ namespace eosio { namespace chain { return temp; } FC_CAPTURE_AND_RETHROW( (type)(var) ) } - bytes abi_serializer::variant_to_binary(const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path)const { + bytes abi_serializer::variant_to_binary( const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path )const { impl::variant_to_binary_context ctx(*this, max_serialization_time, type); ctx.short_path = short_path; return _variant_to_binary(type, var, ctx); } - void abi_serializer::variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path)const { + void abi_serializer::variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path )const { impl::variant_to_binary_context ctx(*this, max_serialization_time, type); ctx.short_path = short_path; _variant_to_binary(type, var, ds, ctx); @@ -627,7 +666,7 @@ namespace eosio { namespace chain { std::stringstream s; bool shorten_names = false; bool track_only = false; - path_item last_non_array_path_item; + path_item last_path_item; void add_dot() { s << "."; @@ -638,7 +677,7 @@ namespace eosio { namespace chain { void operator()( const array_index_path_item& item ) { if( track_only ) { - last_non_array_path_item = item; // ????? + last_path_item = item; return; } @@ -647,7 +686,7 @@ namespace eosio { namespace chain { void operator()( const field_path_item& item ) { if( track_only ) { - last_non_array_path_item = item; + last_path_item = item; return; } @@ -657,7 +696,7 @@ namespace eosio { namespace chain { void operator()( const variant_path_item& item ) { if( track_only ) { - last_non_array_path_item = item; + last_path_item = item; return; } @@ -740,11 +779,11 @@ namespace eosio { namespace chain { } if( !full_path ) { - if( visitor.last_non_array_path_item.contains() ) { + if( visitor.last_path_item.contains() ) { root_of_path.visit( visitor ); } else { path_item_type_visitor vis2(visitor.s, shorten_names); - visitor.last_non_array_path_item.visit(vis2); + visitor.last_path_item.visit(vis2); } } diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index c3e1669fb20..6d242746004 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -53,8 +53,8 @@ struct abi_serializer { optional get_error_message( uint64_t error_code )const; - fc::variant binary_to_variant( const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time )const; - fc::variant binary_to_variant( const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time )const; + fc::variant binary_to_variant( const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time, bool short_path = false )const; + fc::variant binary_to_variant( const type_name& type, fc::datastream& binary, const fc::microseconds& max_serialization_time, bool short_path = false )const; bytes variant_to_binary( const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path = false )const; void variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path = false )const; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index cff24929a6d..346fdf6e50a 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1230,7 +1230,7 @@ static fc::variant get_global_row( const database& db, const abi_def& abi, const vector data; read_only::copy_inline_row(*it, data); - return abis.binary_to_variant(abis.get_table_type(N(global)), data, abi_serializer_max_time_ms); + return abis.binary_to_variant(abis.get_table_type(N(global)), data, abi_serializer_max_time_ms, true ); // TODO: Allow configuration to output verbose error messages } read_only::get_producers_result read_only::get_producers( const read_only::get_producers_params& p ) const { @@ -1275,7 +1275,7 @@ read_only::get_producers_result read_only::get_producers( const read_only::get_p } copy_inline_row(*kv_index.find(boost::make_tuple(table_id->id, it->primary_key)), data); if (p.json) - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(N(producers)), data, abi_serializer_max_time)); + result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(N(producers)), data, abi_serializer_max_time, true)); // TODO: Allow configuration to output verbose error messages else result.rows.emplace_back(fc::variant(data)); } @@ -1668,7 +1668,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.total_resources = abis.binary_to_variant( "user_resources", data, abi_serializer_max_time ); + result.total_resources = abis.binary_to_variant( "user_resources", data, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages } } @@ -1679,7 +1679,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.self_delegated_bandwidth = abis.binary_to_variant( "delegated_bandwidth", data, abi_serializer_max_time ); + result.self_delegated_bandwidth = abis.binary_to_variant( "delegated_bandwidth", data, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages } } @@ -1690,7 +1690,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.refund_request = abis.binary_to_variant( "refund_request", data, abi_serializer_max_time ); + result.refund_request = abis.binary_to_variant( "refund_request", data, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages } } @@ -1701,7 +1701,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.voter_info = abis.binary_to_variant( "voter_info", data, abi_serializer_max_time ); + result.voter_info = abis.binary_to_variant( "voter_info", data, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages } } } @@ -1744,7 +1744,7 @@ read_only::abi_bin_to_json_result read_only::abi_bin_to_json( const read_only::a abi_def abi; if( abi_serializer::to_abi(code_account.abi, abi) ) { abi_serializer abis( abi, abi_serializer_max_time ); - result.args = abis.binary_to_variant( abis.get_action_type( params.action ), params.binargs, abi_serializer_max_time ); + result.args = abis.binary_to_variant( abis.get_action_type( params.action ), params.binargs, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages } else { EOS_ASSERT(false, abi_not_found_exception, "No ABI found for ${contract}", ("contract", params.code)); } diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 5a1f7fc3ff8..141c7703177 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -434,7 +434,7 @@ class read_only { copy_inline_row(*itr2, data); if (p.json) { - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(p.table), data, abi_serializer_max_time)); + result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(p.table), data, abi_serializer_max_time, true)); // TODO: Allow configuration to output verbose error messages); } else { result.rows.emplace_back(fc::variant(data)); } @@ -495,7 +495,7 @@ class read_only { copy_inline_row(*itr, data); if (p.json) { - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(p.table), data, abi_serializer_max_time)); + result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(p.table), data, abi_serializer_max_time, true)); // TODO: Allow configuration to output verbose error messages } else { result.rows.emplace_back(fc::variant(data)); } diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index dacce90cc74..c3b46d3f847 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -3647,7 +3647,8 @@ BOOST_AUTO_TEST_CASE(extend) verify_round_trip_conversion(abis, "s", R"([5,6,7,[8,9,10],31])", "0506070308090a011f", R"({"i0":5,"i1":6,"i2":7,"a":[8,9,10],"o":31})"); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"i0":1})"), max_serialization_time), - pack_exception, fc_exception_message_starts_with("Encountered field 'i2' without binary extension designation while processing struct") ); + abi_exception, fc_exception_message_starts_with("Encountered field 'i2' without binary extension designation while processing struct") ); + } FC_LOG_AND_RETHROW() } @@ -3909,4 +3910,85 @@ BOOST_AUTO_TEST_CASE(abi_serialize_short_error_messages) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(abi_deserialize_detailed_error_messages) +{ + using eosio::testing::fc_exception_message_is; + + auto abi = R"({ + "version": "eosio::abi/1.1", + "types": [ + {"new_type_name": "oint", "type": "int8?"}, + {"new_type_name": "os1", "type": "s1?"} + ], + "structs": [ + {"name": "s1", "base": "", "fields": [ + {"name": "i0", "type": "int8"}, + {"name": "i1", "type": "int8"} + ]}, + {"name": "s2", "base": "", "fields": [ + {"name": "f0", "type": "int8[]"}, + {"name": "f1", "type": "s1[]"} + ]}, + {"name": "s3", "base": "s1", "fields": [ + {"name": "i3", "type": "int8"}, + {"name": "i4", "type": "int8$"}, + {"name": "i5", "type": "int8"} + ]}, + {"name": "s4", "base": "", "fields": [ + {"name": "f0", "type": "oint[]"} + ]}, + {"name": "s5", "base": "", "fields": [ + {"name": "f0", "type": "os1[]"}, + {"name": "f1", "type": "v1[]"}, + ]}, + {"name": "s6", "base": "", "fields": [ + ]}, + ], + "variants": [ + {"name": "v1", "types": ["int8", "s1"]}, + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("020102").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'f1' of struct 's2'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("0201020103").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'i1' of struct 's2.f1[0]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("020102ff").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack size of array 's2.f1'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s3", fc::variant("010203").as(), max_serialization_time), + abi_exception, fc_exception_message_is("Encountered field 'i5' without binary extension designation while processing struct 's3'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s3", fc::variant("02010304").as(), max_serialization_time), + abi_exception, fc_exception_message_is("Encountered field 'i5' without binary extension designation while processing struct 's3'") ); + + // This check actually points to a problem with the current abi_serializer. + // An array of optionals (which is unfortunately not rejected in validation) leads to an unpack_exception here because one of the optional elements is not present. + // However, abis.binary_to_variant("s4", fc::variant("03010101020103").as(), max_serialization_time) would work just fine! + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s4", fc::variant("030101000103").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Invalid packed array 's4.f0[1]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s4", fc::variant("020101").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack optional of built-in type 'int8' while processing 's4.f0[1]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("02010102").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack presence flag of optional 's5.f0[1]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("0001").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack tag of variant 's5.f1[0]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("00010501").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unpacked invalid tag (5) for variant 's5.f1[0]'") ); + + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("00010101").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'i1' of struct 's5.f1[0].'") ); + + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() From 7a567f4b177b27e3a54fa6d0c6c9a9e9b0966ea6 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 21 Sep 2018 17:11:26 -0400 Subject: [PATCH 039/161] chain_plugin API calls should only return verbose ABI serialization errors if verbose-http-errors configuration flag of the http_plugin is set to true. --- .../include/eosio/chain/abi_serializer.hpp | 2 ++ plugins/chain_api_plugin/chain_api_plugin.cpp | 5 ++++- plugins/chain_plugin/chain_plugin.cpp | 20 +++++++++---------- .../eosio/chain_plugin/chain_plugin.hpp | 7 +++++-- plugins/http_plugin/http_plugin.cpp | 4 ++++ .../include/eosio/http_plugin/http_plugin.hpp | 8 +++++--- 6 files changed, 30 insertions(+), 16 deletions(-) diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 6d242746004..221424e041e 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -385,6 +385,7 @@ namespace impl { if (!type.empty()) { try { binary_to_variant_context _ctx(*abi, ctx, type); + _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place mvo( "data", abi->_binary_to_variant( type, act.data, _ctx )); mvo("hex_data", act.data); } catch(...) { @@ -548,6 +549,7 @@ namespace impl { auto type = abi->get_action_type(act.name); if (!type.empty()) { variant_to_binary_context _ctx(*abi, ctx, type); + _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place act.data = std::move( abi->_variant_to_binary( type, data, _ctx )); valid_empty_data = act.data.empty(); } diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 31e576ae4d8..aad3a52e5e6 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -79,7 +79,10 @@ void chain_api_plugin::plugin_startup() { auto ro_api = app().get_plugin().get_read_only_api(); auto rw_api = app().get_plugin().get_read_write_api(); - app().get_plugin().add_api({ + auto& _http_plugin = app().get_plugin(); + ro_api.set_shorten_abi_errors( !_http_plugin.verbose_errors() ); + + _http_plugin.add_api({ CHAIN_RO_CALL(get_info, 200l), CHAIN_RO_CALL(get_block, 200), CHAIN_RO_CALL(get_block_header_state, 200), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 346fdf6e50a..07c048114ff 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1217,7 +1217,7 @@ static float64_t to_softfloat64( double d ) { return *reinterpret_cast(&d); } -static fc::variant get_global_row( const database& db, const abi_def& abi, const abi_serializer& abis, const fc::microseconds& abi_serializer_max_time_ms ) { +fc::variant get_global_row( const database& db, const abi_def& abi, const abi_serializer& abis, const fc::microseconds& abi_serializer_max_time_ms, bool shorten_abi_errors ) { const auto table_type = get_table_type(abi, N(global)); EOS_ASSERT(table_type == read_only::KEYi64, chain::contract_table_query_exception, "Invalid table type ${type} for table global", ("type",table_type)); @@ -1230,7 +1230,7 @@ static fc::variant get_global_row( const database& db, const abi_def& abi, const vector data; read_only::copy_inline_row(*it, data); - return abis.binary_to_variant(abis.get_table_type(N(global)), data, abi_serializer_max_time_ms, true ); // TODO: Allow configuration to output verbose error messages + return abis.binary_to_variant(abis.get_table_type(N(global)), data, abi_serializer_max_time_ms, shorten_abi_errors ); } read_only::get_producers_result read_only::get_producers( const read_only::get_producers_params& p ) const { @@ -1275,12 +1275,12 @@ read_only::get_producers_result read_only::get_producers( const read_only::get_p } copy_inline_row(*kv_index.find(boost::make_tuple(table_id->id, it->primary_key)), data); if (p.json) - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(N(producers)), data, abi_serializer_max_time, true)); // TODO: Allow configuration to output verbose error messages + result.rows.emplace_back( abis.binary_to_variant( abis.get_table_type(N(producers)), data, abi_serializer_max_time, shorten_abi_errors ) ); else result.rows.emplace_back(fc::variant(data)); } - result.total_producer_vote_weight = get_global_row(d, abi, abis, abi_serializer_max_time)["total_producer_vote_weight"].as_double(); + result.total_producer_vote_weight = get_global_row(d, abi, abis, abi_serializer_max_time, shorten_abi_errors)["total_producer_vote_weight"].as_double(); return result; } @@ -1668,7 +1668,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.total_resources = abis.binary_to_variant( "user_resources", data, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages + result.total_resources = abis.binary_to_variant( "user_resources", data, abi_serializer_max_time, shorten_abi_errors ); } } @@ -1679,7 +1679,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.self_delegated_bandwidth = abis.binary_to_variant( "delegated_bandwidth", data, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages + result.self_delegated_bandwidth = abis.binary_to_variant( "delegated_bandwidth", data, abi_serializer_max_time, shorten_abi_errors ); } } @@ -1690,7 +1690,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.refund_request = abis.binary_to_variant( "refund_request", data, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages + result.refund_request = abis.binary_to_variant( "refund_request", data, abi_serializer_max_time, shorten_abi_errors ); } } @@ -1701,7 +1701,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); - result.voter_info = abis.binary_to_variant( "voter_info", data, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages + result.voter_info = abis.binary_to_variant( "voter_info", data, abi_serializer_max_time, shorten_abi_errors ); } } } @@ -1727,7 +1727,7 @@ read_only::abi_json_to_bin_result read_only::abi_json_to_bin( const read_only::a auto action_type = abis.get_action_type(params.action); EOS_ASSERT(!action_type.empty(), action_validate_exception, "Unknown action ${action} in contract ${contract}", ("action", params.action)("contract", params.code)); try { - result.binargs = abis.variant_to_binary(action_type, params.args, abi_serializer_max_time, true); // TODO: Allow configuration to output verbose error messages + result.binargs = abis.variant_to_binary( action_type, params.args, abi_serializer_max_time, shorten_abi_errors ); } EOS_RETHROW_EXCEPTIONS(chain::invalid_action_args_exception, "'${args}' is invalid args for action '${action}' code '${code}'. expected '${proto}'", ("args", params.args)("action", params.action)("code", params.code)("proto", action_abi_to_variant(abi, action_type))) @@ -1744,7 +1744,7 @@ read_only::abi_bin_to_json_result read_only::abi_bin_to_json( const read_only::a abi_def abi; if( abi_serializer::to_abi(code_account.abi, abi) ) { abi_serializer abis( abi, abi_serializer_max_time ); - result.args = abis.binary_to_variant( abis.get_action_type( params.action ), params.binargs, abi_serializer_max_time, true ); // TODO: Allow configuration to output verbose error messages + result.args = abis.binary_to_variant( abis.get_action_type( params.action ), params.binargs, abi_serializer_max_time, shorten_abi_errors ); } else { EOS_ASSERT(false, abi_not_found_exception, "No ABI found for ${contract}", ("contract", params.code)); } diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 141c7703177..47fdf4b4ffd 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -68,6 +68,7 @@ uint64_t convert_to_type(const string& str, const string& desc); class read_only { const controller& db; const fc::microseconds abi_serializer_max_time; + bool shorten_abi_errors = true; public: static const string KEYi64; @@ -77,6 +78,8 @@ class read_only { void validate() const {} + void set_shorten_abi_errors( bool f ) { shorten_abi_errors = f; } + using get_info_params = empty; struct get_info_results { @@ -434,7 +437,7 @@ class read_only { copy_inline_row(*itr2, data); if (p.json) { - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(p.table), data, abi_serializer_max_time, true)); // TODO: Allow configuration to output verbose error messages); + result.rows.emplace_back( abis.binary_to_variant( abis.get_table_type(p.table), data, abi_serializer_max_time, shorten_abi_errors ) ); } else { result.rows.emplace_back(fc::variant(data)); } @@ -495,7 +498,7 @@ class read_only { copy_inline_row(*itr, data); if (p.json) { - result.rows.emplace_back(abis.binary_to_variant(abis.get_table_type(p.table), data, abi_serializer_max_time, true)); // TODO: Allow configuration to output verbose error messages + result.rows.emplace_back( abis.binary_to_variant( abis.get_table_type(p.table), data, abi_serializer_max_time, shorten_abi_errors ) ); } else { result.rows.emplace_back(fc::variant(data)); } diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 6a380da0d03..d9be006bb45 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -606,4 +606,8 @@ namespace eosio { return (!my->listen_endpoint || my->listen_endpoint->address().is_loopback()); } + bool http_plugin::verbose_errors()const { + return verbose_http_errors; + } + } diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index e78300c6240..7f9aedb01e4 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -60,12 +60,12 @@ namespace eosio { * called with the response code and body. * * The handler will be called from the appbase application io_service - * thread. The callback can be called from any thread and will + * thread. The callback can be called from any thread and will * automatically propagate the call to the http thread. * * The HTTP service will run in its own thread with its own io_service to * make sure that HTTP request processing does not interfer with other - * plugins. + * plugins. */ class http_plugin : public appbase::plugin { @@ -85,7 +85,7 @@ namespace eosio { void add_handler(const string& url, const url_handler&); void add_api(const api_description& api) { - for (const auto& call : api) + for (const auto& call : api) add_handler(call.first, call.second); } @@ -95,6 +95,8 @@ namespace eosio { bool is_on_loopback() const; bool is_secure() const; + bool verbose_errors()const; + private: std::unique_ptr my; }; From ceba7162dddee7b386aa7c04d85fffe7f9f2bd35 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 21 Sep 2018 17:19:29 -0400 Subject: [PATCH 040/161] replace duplicated code in `abi_traverse_context::enter_scope` with call to `check_deadline()` --- libraries/chain/abi_serializer.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index a7370e3b1c1..639e2eb430d 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -543,7 +543,8 @@ namespace eosio { namespace chain { namespace impl { void abi_traverse_context::check_deadline()const { - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, + "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); } fc::scoped_exit> abi_traverse_context::enter_scope() { @@ -555,8 +556,7 @@ namespace eosio { namespace chain { EOS_ASSERT( recursion_depth < abi_serializer::max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", abi_serializer::max_recursion_depth) ); - EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, - "serialization time limit ${t}us exceeded", ("t", max_serialization_time) ); + check_deadline(); return {std::move(callback)}; } From 396dc27fc62591a6fd40cabe1fa123fdf57fe455 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 21 Sep 2018 18:54:20 -0400 Subject: [PATCH 041/161] update fc submodule to include static_variant changes; potential fix to Ubuntu 16.04 build errors? --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 4dc8375d7d3..4dae7e0fff0 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 4dc8375d7d3e02ab1177ab5c22835f75b45c845a +Subproject commit 4dae7e0fff05647ec06972bbce03b16e17cae4ac From 9c3dbebf20525772831fe73554c60e55a46a4f06 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 21 Sep 2018 15:33:45 -0500 Subject: [PATCH 042/161] Fixed logCmdTransaction to correctly handle None transaction. GH #5674 --- tests/Node.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/Node.py b/tests/Node.py index 5abf1a20c1e..93dc48282ad 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1321,8 +1321,10 @@ def logCmdTransaction(trans, ignoreNonTrans=False): if trans is None: Utils.Print(" cmd returned transaction: %s" % (trans)) + return if ignoreNonTrans and not Node.isTrans(trans): + Utils.Print(" cmd returned a non-transaction") return transId=Node.getTransId(trans) From 471d18299dd44aaa7096f4b5f917c0ea4c5375d6 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 21 Sep 2018 15:35:32 -0500 Subject: [PATCH 043/161] Added waitForTransBlock flags to ensure subsequent actions didn't occur before an action that must proceed it. GH #5674 --- tests/nodeos_forked_chain_test.py | 4 ++-- tests/nodeos_under_min_avail_ram.py | 4 ++-- tests/nodeos_voting_test.py | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 66202561c28..7e14073b629 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -212,7 +212,7 @@ def getMinHeadAndLib(prodNodes): transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") - trans=node.delegatebw(account, 20000000.0000, 20000000.0000, exitOnError=True) + trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True) # *** vote using accounts *** @@ -222,7 +222,7 @@ def getMinHeadAndLib(prodNodes): index=0 for account in accounts: Print("Vote for producers=%s" % (producers)) - trans=prodNodes[index % len(prodNodes)].vote(account, producers) + trans=prodNodes[index % len(prodNodes)].vote(account, producers, waitForTransBlock=True) index+=1 diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 99001a73158..7ef19ab25a2 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -126,7 +126,7 @@ def setName(self, num): transferAmount="70000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) nodes[0].transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") - trans=nodes[0].delegatebw(account, 1000000.0000, 68000000.0000, exitOnError=True) + trans=nodes[0].delegatebw(account, 1000000.0000, 68000000.0000, waitForTransBlock=True, exitOnError=True) contractAccount=cluster.createAccountKeys(1)[0] contractAccount.name="contracttest" @@ -136,7 +136,7 @@ def setName(self, num): transferAmount="90000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, contractAccount.name)) nodes[0].transferFunds(cluster.eosioAccount, contractAccount, transferAmount, "test transfer") - trans=nodes[0].delegatebw(contractAccount, 1000000.0000, 88000000.0000, exitOnError=True) + trans=nodes[0].delegatebw(contractAccount, 1000000.0000, 88000000.0000, waitForTransBlock=True, exitOnError=True) contractDir="contracts/integration_test" wasmFile="integration_test.wasm" diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index 20df85da346..a786846099f 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -216,7 +216,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") - trans=node.delegatebw(account, 20000000.0000, 20000000.0000, exitOnError=True) + trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True) # containers for tracking producers prodsActive={} @@ -229,7 +229,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): #first account will vote for node0 producers, all others will vote for node1 producers node=node0 for account in accounts: - trans=node.vote(account, node.producers) + trans=node.vote(account, node.producers, waitForTransBlock=True) node=node1 setActiveProducers(prodsActive, node1.producers) @@ -240,7 +240,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): # first account will vote for node2 producers, all others will vote for node3 producers node1 for account in accounts: - trans=node.vote(account, node.producers) + trans=node.vote(account, node.producers, waitForTransBlock=True) node=node2 setActiveProducers(prodsActive, node2.producers) From 8218a70f0cddf428bb75c818c10a7d0d867fb53e Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 24 Sep 2018 10:04:17 -0400 Subject: [PATCH 044/161] update fc submodule to latest master --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 4dae7e0fff0..aac546b4198 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 4dae7e0fff05647ec06972bbce03b16e17cae4ac +Subproject commit aac546b419891ef6644e0d99dba5e8d33f70401d From 63ce338ce8e5bc4cadfdc55dde8ec40f6d413274 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 24 Sep 2018 11:25:36 -0500 Subject: [PATCH 045/161] Cleanly exiting and reporting errors for wallet that fails to start. GH #5674 --- tests/WalletMgr.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index ea5ee394a1e..c1f9917879d 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -65,9 +65,11 @@ def launch(self): # Give keosd time to warm up time.sleep(2) - if Utils.Debug: + try: psOut=Utils.checkOutput(pgrepCmd.split()) - Utils.Print("Launched %s. %s - {%s}" % (Utils.EosWalletName, pgrepCmd, psOut)) + if Utils.Debug: Utils.Print("Launched %s. %s - {%s}" % (Utils.EosWalletName, pgrepCmd, psOut)) + except subprocess.CalledProcessError as ex: + Utils.errorExit("Failed to launch the wallet manager on") return True From 272a7e3102d2942e0247bb6cea4c52be97349f90 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 24 Sep 2018 11:26:57 -0500 Subject: [PATCH 046/161] Added waiting for transaction to make it in block to ensure all transactions end up being processed in correct sequence. GH #5674 --- tests/nodeos_forked_chain_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 7e14073b629..3e9b3aa0957 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -208,10 +208,10 @@ def getMinHeadAndLib(prodNodes): # create accounts via eosio as otherwise a bid is needed for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) - trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) + trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) - node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer") + node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True) From e55c7cb70449911a4aded5fb66636b4197351b62 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 24 Sep 2018 11:52:30 -0500 Subject: [PATCH 047/161] Fixed processing for paths that only have transaction id instead of transaction. GH #5674 --- tests/Node.py | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 93dc48282ad..647e34d4174 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -326,8 +326,14 @@ def isBlockFinalized(self, blockNum): return self.isBlockPresent(blockNum, blockType=BlockType.lib) class BlockWalker: - def __init__(self, node, trans, startBlockNum=None, endBlockNum=None): - self.trans=trans + def __init__(self, node, transOrTransId, startBlockNum=None, endBlockNum=None): + assert(isinstance(transOrTransId, (str,dict))) + if isinstance(transOrTransId, str): + self.trans=None + self.transId=transOrTransId + else: + self.trans=transOrTransId + self.transId=Node.getTransId(trans) self.node=node self.startBlockNum=startBlockNum self.endBlockNum=endBlockNum @@ -335,18 +341,34 @@ def __init__(self, node, trans, startBlockNum=None, endBlockNum=None): def walkBlocks(self): start=None end=None - blockNum=self.trans["processed"]["action_traces"][0]["block_num"] + if self.trans is not None: + cntxt=Node.Context(self.trans, "trans") + cntxt.add("processed") + cntxt.add("action_traces") + cntxt.index(0) + blockNum=cntxt.add("block_num") + else: + blockNum=None # it should be blockNum or later, but just in case the block leading up have any clues... + start=None if self.startBlockNum is not None: start=self.startBlockNum - else: + elif blockNum is not None: start=blockNum-5 if self.endBlockNum is not None: end=self.endBlockNum else: info=self.node.getInfo() end=info["head_block_num"] - msg="Original transaction=\n%s\nExpected block_num=%s\n" % (json.dumps(trans, indent=2, sort_keys=True), blockNum) + if start is None: + if end > 100: + start=end-100 + else: + start=0 + transDesc=" id =%s" % (self.transId) + if self.trans is not None: + transDesc="=%s" % (json.dumps(self.trans, indent=2, sort_keys=True)) + msg="Original transaction%s\nExpected block_num=%s\n" % (transDesc, blockNum) for blockNum in range(start, end+1): block=self.node.getBlock(blockNum) msg+=json.dumps(block, indent=2, sort_keys=True)+"\n" @@ -373,7 +395,7 @@ def getTransaction(self, transOrTransId, silentErrors=False, exitOnError=False, if trans is not None or not delayedRetry: return trans if blockWalker is None: - blockWalker=Node.BlockWalker(self, trans) + blockWalker=Node.BlockWalker(self, transOrTransId) if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId)) time.sleep(timeout) From 87c9c521f1476154ee9c69e5dee5af50907250a2 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 24 Sep 2018 17:50:02 -0400 Subject: [PATCH 048/161] Kernel timer based checktime Use a kernel timer for checktime in lieu of the current polled approach. It's lower overhead. --- .../eosio/chain/transaction_context.hpp | 43 +++++++++++++++++++ libraries/chain/transaction_context.cpp | 9 ++++ 2 files changed, 52 insertions(+) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 3175994dedd..572bbba275f 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -1,9 +1,50 @@ #pragma once #include #include +#include namespace eosio { namespace chain { + struct deadline_timer { + deadline_timer() { + if(initialized) + return; + struct sigaction act; + act.sa_handler = timer_expired; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + sigaction(SIGALRM, &act, NULL); + initialized = true; + } + + void start(fc::time_point tp) { + microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); + if(x.count() < 18) + expired = 1; + else if(x.count() < 1000000) { + struct itimerval enable = {{0, 0}, {0, (int)x.count()-15}}; + expired = 0; + setitimer(ITIMER_REAL, &enable, NULL); + } + } + + void stop() { + struct itimerval disable = {{0, 0}, {0, 0}}; + setitimer(ITIMER_REAL, &disable, NULL); + } + + ~deadline_timer() { + stop(); + } + + static volatile sig_atomic_t expired; + private: + static void timer_expired(int) { + expired = 1; + } + static bool initialized; + }; + class transaction_context { private: void init( uint64_t initial_net_usage); @@ -108,6 +149,8 @@ namespace eosio { namespace chain { fc::time_point pseudo_start; fc::microseconds billed_time; fc::microseconds billing_timer_duration_limit; + + deadline_timer _deadline_timer; }; } } diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index dd58f0364ec..6887ba1a2bc 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -9,6 +9,9 @@ namespace eosio { namespace chain { + volatile sig_atomic_t deadline_timer::expired = 0; + bool deadline_timer::initialized = false; + transaction_context::transaction_context( controller& c, const signed_transaction& t, const transaction_id_type& trx_id, @@ -132,6 +135,8 @@ namespace eosio { namespace chain { checktime(); // Fail early if deadline has already been exceeded + _deadline_timer.start(_deadline); + is_initialized = true; } @@ -293,6 +298,8 @@ namespace eosio { namespace chain { void transaction_context::checktime()const { if (!control.skip_trx_checks()) { + if(BOOST_LIKELY(_deadline_timer.expired == false)) + return; auto now = fc::time_point::now(); if( BOOST_UNLIKELY( now > _deadline ) ) { // edump((now-start)(now-pseudo_start)); @@ -330,6 +337,7 @@ namespace eosio { namespace chain { billed_time = now - pseudo_start; deadline_exception_code = deadline_exception::code_value; // Other timeout exceptions cannot be thrown while billable timer is paused. pseudo_start = fc::time_point(); + _deadline_timer.stop(); } void transaction_context::resume_billing_timer() { @@ -344,6 +352,7 @@ namespace eosio { namespace chain { _deadline = deadline; deadline_exception_code = deadline_exception::code_value; } + _deadline_timer.start(_deadline); } void transaction_context::validate_cpu_usage_to_bill( int64_t billed_us, bool check_minimum )const { From 2f8e1b1ab7204609ed0710e015767186f9b74fa8 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 24 Sep 2018 17:43:28 -0500 Subject: [PATCH 049/161] Changed logCmdTransaction to trackCmdTransaction and added a dictionary cache for transaction id to initial transaction lookup. GH #5674 --- tests/Cluster.py | 2 +- tests/Node.py | 39 +++++++++++++++++++++------------------ 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index ceea15803bb..f09978dcc9a 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -840,7 +840,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): with open(Cluster.__bootlog) as bootFile: for line in bootFile: if p.search(line): - Utils.Print("ERROR: bios_boot.sh script resulted in errors. See %s" % (bootlog)) + Utils.Print("ERROR: bios_boot.sh script resulted in errors. See %s" % (Cluster.__bootlog)) Utils.Print(line) return None diff --git a/tests/Node.py b/tests/Node.py index 647e34d4174..3eb6b40aaf2 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -49,6 +49,7 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.infoValid=None self.lastRetrievedHeadBlockNum=None self.lastRetrievedLIB=None + self.transCache={} if self.enableMongo: self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) @@ -565,7 +566,7 @@ def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, w account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) transId=Node.getTransId(trans) if stakedDeposit > 0: @@ -583,13 +584,13 @@ def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTran cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) transId=Node.getTransId(trans) if stakedDeposit > 0: self.waitForTransInBlock(transId) # seems like account creation needs to be finlized before transfer can happen trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init") - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) transId=Node.getTransId(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -740,7 +741,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False trans=None try: trans=Utils.runCmdArrReturnJson(cmdArr) - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") Utils.Print("ERROR: Exception during funds transfer. %s" % (msg)) @@ -922,7 +923,7 @@ def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransB trans=None try: trans=Utils.runCmdReturnJson(cmd, trace=False) - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) except subprocess.CalledProcessError as ex: if not shouldFail: msg=ex.output.decode("utf-8") @@ -980,7 +981,7 @@ def pushMessage(self, account, action, data, opts, silentErrors=False): if Utils.Debug: Utils.Print("cmd: %s" % (cmdArr)) try: trans=Utils.runCmdArrReturnJson(cmdArr) - Node.logCmdTransaction(trans, ignoreNonTrans=True) + self.trackCmdTransaction(trans, ignoreNonTrans=True) return (True, trans) except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") @@ -992,7 +993,7 @@ def setPermission(self, account, code, pType, requirement, waitForTransBlock=Fal cmdDesc="set action permission" cmd="%s -j %s %s %s %s" % (cmdDesc, account, code, pType, requirement) trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError) - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -1006,7 +1007,7 @@ def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, tran cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -1016,7 +1017,7 @@ def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnEr cmdDesc, producer.name, producer.activePublicKey, url, location) msg="producer=%s" % (producer.name); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -1026,7 +1027,7 @@ def vote(self, account, producers, waitForTransBlock=False, exitOnError=False): cmdDesc, account.name, " ".join(producers)) msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) - Node.logCmdTransaction(trans) + self.trackCmdTransaction(trans) return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) @@ -1336,23 +1337,25 @@ def isNodeAlive(): self.killed=False return True - @staticmethod - def logCmdTransaction(trans, ignoreNonTrans=False): - if not Utils.Debug: - return - + def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: - Utils.Print(" cmd returned transaction: %s" % (trans)) + if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans)) return if ignoreNonTrans and not Node.isTrans(trans): - Utils.Print(" cmd returned a non-transaction") + if Utils.Debug: Utils.Print(" cmd returned a non-transaction") return transId=Node.getTransId(trans) status=Node.getTransStatus(trans) blockNum=Node.getTransBlockNum(trans) - Utils.Print(" cmd returned transaction id: %s, status: %s, (possible) block num: %s" % (transId, status, blockNum)) + if Utils.Debug: + if transId in self.transCache.keys(): + replaceMsg="replacing previous trans=\n%s" % json.dumps(self.transCache[transId], indent=2, sort_keys=True) + else: + replaceMsg="" + Utils.Print(" cmd returned transaction id: %s, status: %s, (possible) block num: %s" % (transId, status, blockNum)) + self.transCache[transId]=trans def reportStatus(self): Utils.Print("Node State:") From 60f4b01c4d2ab32cc2cc5aa50ec98100f9aa2210 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 24 Sep 2018 17:47:32 -0500 Subject: [PATCH 050/161] Changed back to only use transaction id for Node.getTransaction and NOde.getBlockIdByTransId and using transCache to get information for BlockWalker. GH #5674 --- tests/Node.py | 50 +++++++------------ ...onsensus-validation-malicious-producers.py | 2 +- tests/launcher_test.py | 2 +- tests/nodeos_run_test.py | 4 +- 4 files changed, 23 insertions(+), 35 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 3eb6b40aaf2..abab85f15c9 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -327,14 +327,10 @@ def isBlockFinalized(self, blockNum): return self.isBlockPresent(blockNum, blockType=BlockType.lib) class BlockWalker: - def __init__(self, node, transOrTransId, startBlockNum=None, endBlockNum=None): - assert(isinstance(transOrTransId, (str,dict))) - if isinstance(transOrTransId, str): - self.trans=None - self.transId=transOrTransId - else: - self.trans=transOrTransId - self.transId=Node.getTransId(trans) + def __init__(self, node, transId, startBlockNum=None, endBlockNum=None): + assert(isinstance(transId, str)) + self.trans=None + self.transId=transId self.node=node self.startBlockNum=startBlockNum self.endBlockNum=endBlockNum @@ -342,6 +338,8 @@ def __init__(self, node, transOrTransId, startBlockNum=None, endBlockNum=None): def walkBlocks(self): start=None end=None + if self.trans is None and self.transId in self.transCache: + self.trans=self.transCache[self.transId] if self.trans is not None: cntxt=Node.Context(self.trans, "trans") cntxt.add("processed") @@ -374,16 +372,11 @@ def walkBlocks(self): block=self.node.getBlock(blockNum) msg+=json.dumps(block, indent=2, sort_keys=True)+"\n" + return msg + # pylint: disable=too-many-branches - def getTransaction(self, transOrTransId, silentErrors=False, exitOnError=False, delayedRetry=True): - transId=None - trans=None - assert(isinstance(transOrTransId, (str,dict))) - if isinstance(transOrTransId, str): - transId=transOrTransId - else: - trans=transOrTransId - transId=Node.getTransId(trans) + def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayedRetry=True): + assert(isinstance(transId, str)) exitOnErrorForDelayed=not delayedRetry and exitOnError timeout=3 blockWalker=None @@ -396,7 +389,7 @@ def getTransaction(self, transOrTransId, silentErrors=False, exitOnError=False, if trans is not None or not delayedRetry: return trans if blockWalker is None: - blockWalker=Node.BlockWalker(self, transOrTransId) + blockWalker=Node.BlockWalker(self, transId) if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId)) time.sleep(timeout) @@ -467,16 +460,11 @@ def isTransInBlock(self, transId, blockId): return False - def getBlockIdByTransId(self, transOrTransId, delayedRetry=True): - """Given a transaction (dictionary) or transaction Id (string), will return the actual block id (int) containing the transaction""" - assert(transOrTransId) - transId=None - assert(isinstance(transOrTransId, (str,dict))) - if isinstance(transOrTransId, str): - transId=transOrTransId - else: - transId=Node.getTransId(transOrTransId) - trans=self.getTransaction(transOrTransId, exitOnError=True, delayedRetry=delayedRetry) + def getBlockIdByTransId(self, transId, delayedRetry=True): + """Given a transaction Id (string), will return the actual block id (int) containing the transaction""" + assert(transId) + assert(isinstance(transId, str)) + trans=self.getTransaction(transId, exitOnError=True, delayedRetry=delayedRetry) refBlockNum=None key="" @@ -1347,14 +1335,14 @@ def trackCmdTransaction(self, trans, ignoreNonTrans=False): return transId=Node.getTransId(trans) - status=Node.getTransStatus(trans) - blockNum=Node.getTransBlockNum(trans) if Utils.Debug: + status=Node.getTransStatus(trans) + blockNum=Node.getTransBlockNum(trans) if transId in self.transCache.keys(): replaceMsg="replacing previous trans=\n%s" % json.dumps(self.transCache[transId], indent=2, sort_keys=True) else: replaceMsg="" - Utils.Print(" cmd returned transaction id: %s, status: %s, (possible) block num: %s" % (transId, status, blockNum)) + Utils.Print(" cmd returned transaction id: %s, status: %s, (possible) block num: %s %s" % (transId, status, blockNum, replaceMsg)) self.transCache[transId]=trans def reportStatus(self): diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index 52e1bc27bcf..c92acfde04d 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -328,7 +328,7 @@ def myTest(transWillEnterBlock): return False Print("Get details for transaction %s" % (transId)) - transaction=node2.getTransaction(trans[1], exitOnError=True) + transaction=node2.getTransaction(transId, exitOnError=True) signature=transaction["transaction"]["signatures"][0] blockNum=int(transaction["transaction"]["ref_block_num"]) diff --git a/tests/launcher_test.py b/tests/launcher_test.py index 65d963c9d2e..999bd33d9af 100755 --- a/tests/launcher_test.py +++ b/tests/launcher_test.py @@ -191,7 +191,7 @@ node.waitForTransInBlock(transId) - transaction=node.getTransaction(trans, exitOnError=True, delayedRetry=False) + transaction=node.getTransaction(transId, exitOnError=True, delayedRetry=False) typeVal=None amountVal=None diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index aab17129c54..ad674dea5c8 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -282,7 +282,7 @@ node.waitForTransInBlock(transId) - transaction=node.getTransaction(trans, exitOnError=True, delayedRetry=False) + transaction=node.getTransaction(transId, exitOnError=True, delayedRetry=False) typeVal=None amountVal=None @@ -467,7 +467,7 @@ raise Print("Test for block decoded packed transaction (issue 2932)") - blockId=node.getBlockIdByTransId(trans[1]) + blockId=node.getBlockIdByTransId(transId) assert(blockId) block=node.getBlock(blockId, exitOnError=True) From 7f042eb1837ef0f5f3098703594258a13da9a1f5 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 24 Sep 2018 18:11:21 -0500 Subject: [PATCH 051/161] Fixed peer review error. GH #5674 --- tests/Node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index abab85f15c9..c7e348efdcf 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -338,7 +338,7 @@ def __init__(self, node, transId, startBlockNum=None, endBlockNum=None): def walkBlocks(self): start=None end=None - if self.trans is None and self.transId in self.transCache: + if self.trans is None and self.transId in self.transCache.keys(): self.trans=self.transCache[self.transId] if self.trans is not None: cntxt=Node.Context(self.trans, "trans") From da7fc221137f449abd488b6ea3e879363066b0a1 Mon Sep 17 00:00:00 2001 From: Paul Calabrese Date: Tue, 25 Sep 2018 11:43:34 -0500 Subject: [PATCH 052/161] Add specific-nodeos options to bounce command --- programs/eosio-launcher/main.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 73cba43f337..3895b0c0c83 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1707,6 +1707,13 @@ launcher_def::bounce (const string& node_numbers) { const string node_num = node.get_node_num(); cout << "Bouncing " << node.name << endl; string cmd = "./scripts/eosio-tn_bounce.sh " + eosd_extra_args; + if (node_num != "bios" && !specific_nodeos_args.empty()) { + const auto node_num_i = boost::lexical_cast(node_num); + if (specific_nodeos_args.count(node_num_i)) { + cmd += " " + specific_nodeos_args[node_num_i]; + } + } + do_command(host, node.name, { { "EOSIO_HOME", host.eosio_home }, { "EOSIO_NODE", node_num } }, cmd); } } From b6d889582619dfbdaf88094ee76f3ff31d6e7133 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 25 Sep 2018 22:32:07 -0400 Subject: [PATCH 053/161] remove binaryen runtime from nodeos --- CMakeModules/EosioTester.cmake.in | 2 - CMakeModules/EosioTesterBuild.cmake.in | 2 - libraries/chain/CMakeLists.txt | 4 +- .../include/eosio/chain/wasm_interface.hpp | 3 +- .../eosio/chain/wasm_interface_private.hpp | 4 - .../eosio/chain/webassembly/binaryen.hpp | 701 ------------------ libraries/chain/wasm_interface.cpp | 2 - libraries/chain/webassembly/binaryen.cpp | 105 --- .../testing/include/eosio/testing/tester.hpp | 4 +- libraries/testing/tester.cpp | 6 +- plugins/chain_plugin/chain_plugin.cpp | 2 +- unittests/CMakeLists.txt | 5 +- unittests/whitelist_blacklist_tests.cpp | 6 +- 13 files changed, 7 insertions(+), 839 deletions(-) delete mode 100644 libraries/chain/include/eosio/chain/webassembly/binaryen.hpp delete mode 100644 libraries/chain/webassembly/binaryen.cpp diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index d2ca3afdf41..11289731508 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -47,7 +47,6 @@ else() find_library(libfc fc @CMAKE_INSTALL_FULL_LIBDIR@) endif() -find_library(libbinaryen binaryen @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwasm WASM @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwast WAST @CMAKE_INSTALL_FULL_LIBDIR@) find_library(libwabt wabt @CMAKE_INSTALL_FULL_LIBDIR@) @@ -75,7 +74,6 @@ macro(add_eosio_test test_name) ${libtester} ${libchain} ${libfc} - ${libbinaryen} ${libwast} ${libwasm} ${libwabt} diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index fecd6c081ca..63fc7220aa0 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -47,7 +47,6 @@ else() find_library(libfc fc @CMAKE_BINARY_DIR@/libraries/fc) endif() -find_library(libbinaryen binaryen @CMAKE_BINARY_DIR@/externals/binaryen/lib) find_library(libwasm WASM @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WASM) find_library(libwast WAST @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WAST) find_library(libir IR @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/IR) @@ -75,7 +74,6 @@ macro(add_eosio_test test_name) ${libtester} ${libchain} ${libfc} - ${libbinaryen} ${libwast} ${libwasm} ${libwabt} diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index cf4c1be184d..8d7d9a775c2 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -37,7 +37,6 @@ add_library( eosio_chain asset.cpp webassembly/wavm.cpp - webassembly/binaryen.cpp webassembly/wabt.cpp # get_config.cpp @@ -51,12 +50,11 @@ add_library( eosio_chain ) target_link_libraries( eosio_chain eos_utilities fc chainbase Logging IR WAST WASM Runtime - wasm asmjs passes cfg ast emscripten-optimizer support softfloat builtins wabt + softfloat builtins wabt ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../wasm-jit/Include" - "${CMAKE_CURRENT_SOURCE_DIR}/../../externals/binaryen/src" "${CMAKE_SOURCE_DIR}/libraries/wabt" "${CMAKE_BINARY_DIR}/libraries/wabt" ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 17ac03fddfe..974ee92e5ac 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -53,7 +53,6 @@ namespace eosio { namespace chain { public: enum class vm_type { wavm, - binaryen, wabt }; @@ -77,4 +76,4 @@ namespace eosio{ namespace chain { std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime); }} -FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(binaryen)(wabt) ) +FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(wabt) ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index df28d79a21b..c3af34d79ea 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -27,8 +26,6 @@ namespace eosio { namespace chain { wasm_interface_impl(wasm_interface::vm_type vm) { if(vm == wasm_interface::vm_type::wavm) runtime_interface = std::make_unique(); - else if(vm == wasm_interface::vm_type::binaryen) - runtime_interface = std::make_unique(); else if(vm == wasm_interface::vm_type::wabt) runtime_interface = std::make_unique(); else @@ -98,7 +95,6 @@ namespace eosio { namespace chain { #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_WAVM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - _REGISTER_BINARYEN_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #define _REGISTER_INTRINSIC4(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ diff --git a/libraries/chain/include/eosio/chain/webassembly/binaryen.hpp b/libraries/chain/include/eosio/chain/webassembly/binaryen.hpp deleted file mode 100644 index 51e908edbbd..00000000000 --- a/libraries/chain/include/eosio/chain/webassembly/binaryen.hpp +++ /dev/null @@ -1,701 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - - -namespace eosio { namespace chain { namespace webassembly { namespace binaryen { - -using namespace fc; -using namespace wasm; -using namespace eosio::chain::webassembly::common; - - -using linear_memory_type = fc::array; -using call_indirect_table_type = vector; - -struct interpreter_interface; - -struct intrinsic_registrator { - using intrinsic_fn = Literal(*)(interpreter_interface*, LiteralList&); - - static auto& get_map(){ - static map _map; - return _map; - }; - - intrinsic_registrator(const char* name, intrinsic_fn fn) - { - get_map()[string(name)] = fn; - } -}; - -using import_lut_type = unordered_map; - - -struct interpreter_interface : ModuleInstance::ExternalInterface { - interpreter_interface(linear_memory_type& memory, call_indirect_table_type& table, import_lut_type& import_lut, const unsigned& initial_memory_size, apply_context& context) - :memory(memory),table(table),import_lut(import_lut), current_memory_size(initial_memory_size), context(context) - {} - - void importGlobals(std::map& globals, Module& wasm) override - { - - } - - void init(Module& wasm, ModuleInstance& instance) override { - - } - - Literal callImport(Import *import, LiteralList &args) override - { - auto fn_iter = import_lut.find((uintptr_t)import); - EOS_ASSERT(fn_iter != import_lut.end(), wasm_execution_error, "unknown import ${m}:${n}", ("m", import->module.c_str())("n", import->module.c_str())); - return fn_iter->second(this, args); - } - - Literal callTable(Index index, LiteralList& arguments, WasmType result, ModuleInstance& instance) override - { - EOS_ASSERT(index < table.size(), wasm_execution_error, "callIndirect: bad pointer"); - auto* func = instance.wasm.getFunctionOrNull(table[index]); - EOS_ASSERT(func, wasm_execution_error, "callIndirect: uninitialized element"); - EOS_ASSERT(func->params.size() == arguments.size(), wasm_execution_error, "callIndirect: bad # of arguments"); - - for (size_t i = 0; i < func->params.size(); i++) { - EOS_ASSERT(func->params[i] == arguments[i].type, wasm_execution_error, "callIndirect: bad argument type"); - } - EOS_ASSERT(func->result == result, wasm_execution_error, "callIndirect: bad result type"); - return instance.callFunctionInternal(func->name, arguments); - } - - void trap(const char* why) override { - FC_THROW_EXCEPTION(wasm_execution_error, why); - } - - void assert_memory_is_accessible(uint32_t offset, uint32_t size) { - EOS_ASSERT(offset + size <= current_memory_size && offset + size >= offset, - wasm_execution_error, "access violation"); - } - - char* get_validated_pointer(uint32_t offset, uint32_t size) { - assert_memory_is_accessible(offset, size); - return memory.data + offset; - } - - template - static bool aligned_for(const void* address) { - return 0 == (reinterpret_cast(address) & (std::alignment_of::value - 1)); - } - - template - T load_memory(uint32_t offset) { - char *base = get_validated_pointer(offset, sizeof(T)); - if (aligned_for(base)) { - return *reinterpret_cast(base); - } else { - T temp; - memcpy(&temp, base, sizeof(T)); - return temp; - } - } - - template - void store_memory(uint32_t offset, T value) { - char *base = get_validated_pointer(offset, sizeof(T)); - if (aligned_for(base)) { - *reinterpret_cast(base) = value; - } else { - memcpy(base, &value, sizeof(T)); - } - } - - void growMemory(Address old_size, Address new_size) override { - memset(memory.data + old_size.addr, 0, new_size.addr - old_size.addr); - current_memory_size += new_size.addr - old_size.addr; - } - - int8_t load8s(Address addr) override { return load_memory(addr); } - uint8_t load8u(Address addr) override { return load_memory(addr); } - int16_t load16s(Address addr) override { return load_memory(addr); } - uint16_t load16u(Address addr) override { return load_memory(addr); } - int32_t load32s(Address addr) override { return load_memory(addr); } - uint32_t load32u(Address addr) override { return load_memory(addr); } - int64_t load64s(Address addr) override { return load_memory(addr); } - uint64_t load64u(Address addr) override { return load_memory(addr); } - - void store8(Address addr, int8_t value) override { store_memory(addr, value); } - void store16(Address addr, int16_t value) override { store_memory(addr, value); } - void store32(Address addr, int32_t value) override { store_memory(addr, value); } - void store64(Address addr, int64_t value) override { store_memory(addr, value); } - - linear_memory_type& memory; - call_indirect_table_type& table; - import_lut_type& import_lut; - unsigned current_memory_size; - apply_context& context; -}; - -class binaryen_runtime : public eosio::chain::wasm_runtime_interface { - public: - binaryen_runtime(); - std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; - - private: - linear_memory_type _memory __attribute__ ((aligned (4096))); -}; - -/** - * class to represent an in-wasm-memory array - * it is a hint to the transcriber that the next parameter will - * be a size (data bytes length) and that the pair are validated together - * This triggers the template specialization of intrinsic_invoker_impl - * @tparam T - */ -template -inline array_ptr array_ptr_impl (interpreter_interface* interface, uint32_t ptr, uint32_t length) -{ - EOS_ASSERT( length < INT_MAX/(uint32_t)sizeof(T), binaryen_exception, "length will overflow" ); - return array_ptr((T*)(interface->get_validated_pointer(ptr, length * (uint32_t)sizeof(T)))); -} - -/** - * class to represent an in-wasm-memory char array that must be null terminated - */ -inline null_terminated_ptr null_terminated_ptr_impl(interpreter_interface* interface, uint32_t ptr) -{ - char *value = interface->get_validated_pointer(ptr, 1); - const char* p = value; - const char* const top_of_memory = interface->memory.data + interface->current_memory_size; - while(p < top_of_memory) - if(*p++ == '\0') - return null_terminated_ptr(value); - - FC_THROW_EXCEPTION(wasm_execution_error, "unterminated string"); -} - - -template -struct is_reference_from_value { - static constexpr bool value = false; -}; - -template<> -struct is_reference_from_value { - static constexpr bool value = true; -}; - -template<> -struct is_reference_from_value { - static constexpr bool value = true; -}; - -template -constexpr bool is_reference_from_value_v = is_reference_from_value::value; - -template -T convert_literal_to_native(Literal& v); - -template<> -inline double convert_literal_to_native(Literal& v) { - return v.getf64(); -} - -template<> -inline float convert_literal_to_native(Literal& v) { - return v.getf32(); -} - -template<> -inline int64_t convert_literal_to_native(Literal& v) { - return v.geti64(); -} - -template<> -inline uint64_t convert_literal_to_native(Literal& v) { - return v.geti64(); -} - -template<> -inline int32_t convert_literal_to_native(Literal& v) { - return v.geti32(); -} - -template<> -inline uint32_t convert_literal_to_native(Literal& v) { - return v.geti32(); -} - -template<> -inline bool convert_literal_to_native(Literal& v) { - return v.geti32(); -} - -template<> -inline name convert_literal_to_native(Literal& v) { - int64_t val = v.geti64(); - return name(val); -} - -template -inline auto convert_native_to_literal(const interpreter_interface*, T val) { - return Literal(val); -} - -inline auto convert_native_to_literal(const interpreter_interface*, const name &val) { - return Literal(val.value); -} - -inline auto convert_native_to_literal(const interpreter_interface*, const fc::time_point_sec &val) { - return Literal(val.sec_since_epoch()); -} - -inline auto convert_native_to_literal(const interpreter_interface* interface, char* ptr) { - const char* base = interface->memory.data; - const char* top_of_memory = base + interface->current_memory_size; - EOS_ASSERT(ptr >= base && ptr < top_of_memory, wasm_execution_error, "returning pointer not in linear memory"); - return Literal((int)(ptr - base)); -} - -struct void_type { -}; - -/** - * Forward declaration of the invoker type which transcribes arguments to/from a native method - * and injects the appropriate checks - * - * @tparam Ret - the return type of the native function - * @tparam NativeParameters - a std::tuple of the remaining native parameters to transcribe - * @tparam WasmParameters - a std::tuple of the transribed parameters - */ -template -struct intrinsic_invoker_impl; - -/** - * Specialization for the fully transcribed signature - * @tparam Ret - the return type of the native function - */ -template -struct intrinsic_invoker_impl> { - using next_method_type = Ret (*)(interpreter_interface*, LiteralList&, int); - - template - static Literal invoke(interpreter_interface* interface, LiteralList& args) { - return convert_native_to_literal(interface, Method(interface, args, args.size() - 1)); - } - - template - static const auto fn() { - return invoke; - } -}; - -/** - * specialization of the fully transcribed signature for void return values - * @tparam Translated - the arguments to the wasm function - */ -template<> -struct intrinsic_invoker_impl> { - using next_method_type = void_type (*)(interpreter_interface*, LiteralList&, int); - - template - static Literal invoke(interpreter_interface* interface, LiteralList& args) { - Method(interface, args, args.size() - 1); - return Literal(); - } - - template - static const auto fn() { - return invoke; - } -}; - -/** - * Sepcialization for transcribing a simple type in the native method signature - * @tparam Ret - the return type of the native method - * @tparam Input - the type of the native parameter to transcribe - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, Input, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - auto& last = args.at(offset); - auto native = convert_literal_to_native(last); - return Then(interface, native, rest..., args, (uint32_t)offset - 1); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a array_ptr type in the native method signature - * This type transcribes into 2 wasm parameters: a pointer and byte length and checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl, size_t, Inputs...>> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(interpreter_interface*, array_ptr, size_t, Inputs..., LiteralList&, int); - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); - uint32_t ptr = args.at((uint32_t)offset - 1).geti32(); - size_t length = args.at((uint32_t)offset).geti32(); - T* base = array_ptr_impl(interface, ptr, length); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of const values" ); - std::vector > copy(length > 0 ? length : 1); - T* copy_ptr = ©[0]; - memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); - return Then(interface, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); - } - return Then(interface, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); - }; - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); - uint32_t ptr = args.at((uint32_t)offset - 1).geti32(); - size_t length = args.at((uint32_t)offset).geti32(); - T* base = array_ptr_impl(interface, ptr, length); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned array of values" ); - std::vector > copy(length > 0 ? length : 1); - T* copy_ptr = ©[0]; - memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); - Ret ret = Then(interface, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); - memcpy( (void*)base, (void*)copy_ptr, length * sizeof(T) ); - return ret; - } - return Then(interface, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a null_terminated_ptr type in the native method signature - * This type transcribes 1 wasm parameters: a char pointer which is validated to contain - * a null value before the end of the allocated memory. - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(interpreter_interface*, null_terminated_ptr, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - uint32_t ptr = args.at((uint32_t)offset).geti32(); - return Then(interface, null_terminated_ptr_impl(interface, ptr), rest..., args, (uint32_t)offset - 1); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a pair of array_ptr types in the native method signature that share size - * This type transcribes into 3 wasm parameters: 2 pointers and byte length and checks the validity of those memory - * ranges before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl, array_ptr, size_t, Inputs...>> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(interpreter_interface*, array_ptr, array_ptr, size_t, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - uint32_t ptr_t = args.at((uint32_t)offset - 2).geti32(); - uint32_t ptr_u = args.at((uint32_t)offset - 1).geti32(); - size_t length = args.at((uint32_t)offset).geti32(); - static_assert(std::is_same, char>::value && std::is_same, char>::value, "Currently only support array of (const)chars"); - return Then(interface, array_ptr_impl(interface, ptr_t, length), array_ptr_impl(interface, ptr_u, length), length, args, (uint32_t)offset - 3); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing memset parameters - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl, int, size_t>> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(interpreter_interface*, array_ptr, int, size_t, LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, LiteralList& args, int offset) { - uint32_t ptr = args.at((uint32_t)offset - 2).geti32(); - uint32_t value = args.at((uint32_t)offset - 1).geti32(); - size_t length = args.at((uint32_t)offset).geti32(); - return Then(interface, array_ptr_impl(interface, ptr, length), value, length, args, (uint32_t)offset - 3); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a pointer type in the native method signature - * This type transcribes into an int32 pointer checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, T *, Inputs..., LiteralList&, int); - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - uint32_t ptr = args.at((uint32_t)offset).geti32(); - T* base = array_ptr_impl(interface, ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const pointer" ); - std::remove_const_t copy; - T* copy_ptr = © - memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); - return Then(interface, copy_ptr, rest..., args, (uint32_t)offset - 1); - } - return Then(interface, base, rest..., args, (uint32_t)offset - 1); - }; - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - uint32_t ptr = args.at((uint32_t)offset).geti32(); - T* base = array_ptr_impl(interface, ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned pointer" ); - T copy; - memcpy( (void*)©, (void*)base, sizeof(T) ); - Ret ret = Then(interface, ©, rest..., args, (uint32_t)offset - 1); - memcpy( (void*)base, (void*)©, sizeof(T) ); - return ret; - } - return Then(interface, base, rest..., args, (uint32_t)offset - 1); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a reference to a name which can be passed as a native value - * This type transcribes into a native type which is loaded by value into a - * variable on the stack and then passed by reference to the intrinsic. - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, const name&, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - uint64_t wasm_value = args.at((uint32_t)offset).geti64(); - auto value = name(wasm_value); - return Then(interface, value, rest..., args, (uint32_t)offset - 1); - } - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a reference to a fc::time_point_sec which can be passed as a native value - * This type transcribes into a native type which is loaded by value into a - * variable on the stack and then passed by reference to the intrinsic. - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, const fc::time_point_sec&, Inputs..., LiteralList&, int); - - template - static Ret translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) { - uint32_t wasm_value = args.at((uint32_t)offset).geti32(); - auto value = fc::time_point_sec(wasm_value); - return Then(interface, value, rest..., args, (uint32_t)offset - 1); - } - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - - -/** - * Specialization for transcribing a reference type in the native method signature - * This type transcribes into an int32 pointer checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - */ -template -struct intrinsic_invoker_impl> { - using next_step = intrinsic_invoker_impl>; - using then_type = Ret (*)(interpreter_interface*, T &, Inputs..., LiteralList&, int); - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - // references cannot be created for null pointers - uint32_t ptr = args.at((uint32_t)offset).geti32(); - EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); - T* base = array_ptr_impl(interface, ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned const reference" ); - std::remove_const_t copy; - T* copy_ptr = © - memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); - return Then(interface, *copy_ptr, rest..., args, (uint32_t)offset - 1); - } - return Then(interface, *base, rest..., args, (uint32_t)offset - 1); - } - - template - static auto translate_one(interpreter_interface* interface, Inputs... rest, LiteralList& args, int offset) -> std::enable_if_t::value, Ret> { - // references cannot be created for null pointers - uint32_t ptr = args.at((uint32_t)offset).geti32(); - EOS_ASSERT(ptr != 0, binaryen_exception, "references cannot be created for null pointers"); - T* base = array_ptr_impl(interface, ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - wlog( "misaligned reference" ); - T copy; - memcpy( (void*)©, (void*)base, sizeof(T) ); - Ret ret = Then(interface, copy, rest..., args, (uint32_t)offset - 1); - memcpy( (void*)base, (void*)©, sizeof(T) ); - return ret; - } - return Then(interface, *base, rest..., args, (uint32_t)offset - 1); - } - - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * forward declaration of a wrapper class to call methods of the class - */ -template -struct intrinsic_function_invoker { - using impl = intrinsic_invoker_impl>; - - template - static Ret wrapper(interpreter_interface* interface, Params... params, LiteralList&, int) { - class_from_wasm::value(interface->context).checktime(); - return (class_from_wasm::value(interface->context).*Method)(params...); - } - - template - static const intrinsic_registrator::intrinsic_fn fn() { - return impl::template fn>(); - } -}; - -template -struct intrinsic_function_invoker { - using impl = intrinsic_invoker_impl>; - - template - static void_type wrapper(interpreter_interface* interface, Params... params, LiteralList& args, int offset) { - class_from_wasm::value(interface->context).checktime(); - (class_from_wasm::value(interface->context).*Method)(params...); - return void_type(); - } - - template - static const intrinsic_registrator::intrinsic_fn fn() { - return impl::template fn>(); - } - -}; - -template -struct intrinsic_function_invoker_wrapper; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -#define _ADD_PAREN_1(...) ((__VA_ARGS__)) _ADD_PAREN_2 -#define _ADD_PAREN_2(...) ((__VA_ARGS__)) _ADD_PAREN_1 -#define _ADD_PAREN_1_END -#define _ADD_PAREN_2_END -#define _WRAPPED_SEQ(SEQ) BOOST_PP_CAT(_ADD_PAREN_1 SEQ, _END) - -#define __INTRINSIC_NAME(LABEL, SUFFIX) LABEL##SUFFIX -#define _INTRINSIC_NAME(LABEL, SUFFIX) __INTRINSIC_NAME(LABEL,SUFFIX) - -#define _REGISTER_BINARYEN_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - static eosio::chain::webassembly::binaryen::intrinsic_registrator _INTRINSIC_NAME(__binaryen_intrinsic_fn, __COUNTER__) (\ - MOD "." NAME,\ - eosio::chain::webassembly::binaryen::intrinsic_function_invoker_wrapper::type::fn<&CLS::METHOD>()\ - );\ - - -} } } }// eosio::chain::webassembly::wavm diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index ac580045277..67bf07430b1 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -1911,8 +1911,6 @@ std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime) { in >> s; if (s == "wavm") runtime = eosio::chain::wasm_interface::vm_type::wavm; - else if (s == "binaryen") - runtime = eosio::chain::wasm_interface::vm_type::binaryen; else if (s == "wabt") runtime = eosio::chain::wasm_interface::vm_type::wabt; else diff --git a/libraries/chain/webassembly/binaryen.cpp b/libraries/chain/webassembly/binaryen.cpp deleted file mode 100644 index ca3138db806..00000000000 --- a/libraries/chain/webassembly/binaryen.cpp +++ /dev/null @@ -1,105 +0,0 @@ -#include -#include - -#include - - -namespace eosio { namespace chain { namespace webassembly { namespace binaryen { - -class binaryen_instantiated_module : public wasm_instantiated_module_interface { - public: - binaryen_instantiated_module(linear_memory_type& shared_linear_memory, - std::vector initial_memory, - call_indirect_table_type table, - import_lut_type import_lut, - unique_ptr&& module) : - _shared_linear_memory(shared_linear_memory), - _initial_memory(initial_memory), - _table(forward(table)), - _import_lut(forward(import_lut)), - _module(forward(module)) { - - } - - void apply(apply_context& context) override { - LiteralList args = {Literal(uint64_t(context.receiver)), - Literal(uint64_t(context.act.account)), - Literal(uint64_t(context.act.name))}; - call("apply", args, context); - } - - private: - linear_memory_type& _shared_linear_memory; - std::vector _initial_memory; - call_indirect_table_type _table; - import_lut_type _import_lut; - unique_ptr _module; - - void call(const string& entry_point, LiteralList& args, apply_context& context){ - const unsigned initial_memory_size = _module->memory.initial*Memory::kPageSize; - interpreter_interface local_interface(_shared_linear_memory, _table, _import_lut, initial_memory_size, context); - - //zero out the initial pages - memset(_shared_linear_memory.data, 0, initial_memory_size); - //copy back in the initial data - memcpy(_shared_linear_memory.data, _initial_memory.data(), _initial_memory.size()); - - //be aware that construction of the ModuleInstance implictly fires the start function - ModuleInstance instance(*_module.get(), &local_interface); - instance.callExport(Name(entry_point), args); - } -}; - -binaryen_runtime::binaryen_runtime() { - -} - -std::unique_ptr binaryen_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) { - try { - vector code(code_bytes, code_bytes + code_size); - unique_ptr module(new Module()); - WasmBinaryBuilder builder(*module, code, false); - builder.read(); - - EOS_ASSERT(module->memory.initial * Memory::kPageSize <= wasm_constraints::maximum_linear_memory, binaryen_exception, "exceeds maximum linear memory"); - - // create a temporary globals to use - TrivialGlobalManager globals; - for (auto& global : module->globals) { - globals[global->name] = ConstantExpressionRunner(globals).visit(global->init).value; - } - - call_indirect_table_type table; - table.resize(module->table.initial); - for (auto& segment : module->table.segments) { - Address offset = ConstantExpressionRunner(globals).visit(segment.offset).value.geti32(); - EOS_ASSERT( uint64_t(offset) + segment.data.size() <= module->table.initial, binaryen_exception, ""); - for (size_t i = 0; i != segment.data.size(); ++i) { - table[offset + i] = segment.data[i]; - } - } - - // initialize the import lut - import_lut_type import_lut; - import_lut.reserve(module->imports.size()); - for (auto& import : module->imports) { - std::string full_name = string(import->module.c_str()) + "." + string(import->base.c_str()); - if (import->kind == ExternalKind::Function) { - auto& intrinsic_map = intrinsic_registrator::get_map(); - auto intrinsic_itr = intrinsic_map.find(full_name); - if (intrinsic_itr != intrinsic_map.end()) { - import_lut.emplace(make_pair((uintptr_t)import.get(), intrinsic_itr->second)); - continue; - } - } - - EOS_ASSERT( !"unresolvable", wasm_exception, "${module}.${export} unresolveable", ("module",import->module.c_str())("export",import->base.c_str()) ); - } - - return std::make_unique(_memory, initial_memory, move(table), move(import_lut), move(module)); - } catch (const ParseException &e) { - FC_THROW_EXCEPTION(wasm_execution_error, "Error building interpreter: ${s}", ("s", e.text)); - } -} - -}}}} diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 38e0827cdb1..82a4cb636e1 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -332,9 +332,7 @@ namespace eosio { namespace testing { vcfg.genesis.initial_key = get_public_key( config::system_account_name, "active" ); for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--binaryen")) - vcfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) + if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) vcfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) vcfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 19faea3e420..d91d55d3fb1 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -96,14 +96,10 @@ namespace eosio { namespace testing { cfg.genesis.initial_key = get_public_key( config::system_account_name, "active" ); for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--binaryen")) - cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) + if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; - else - cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; } open(); diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 07c048114ff..d2a25e777dd 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -212,7 +212,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("blocks-dir", bpo::value()->default_value("blocks"), "the location of the blocks directory (absolute path or relative to application data dir)") ("checkpoint", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") - ("wasm-runtime", bpo::value()->value_name("wavm/binaryen/wabt"), "Override default WASM runtime") + ("wasm-runtime", bpo::value()->value_name("wavm/wabt"), "Override default WASM runtime") ("abi-serializer-max-time-ms", bpo::value()->default_value(config::default_abi_serializer_max_time_ms), "Override default maximum ABI serialization time allowed in ms") ("chain-state-db-size-mb", bpo::value()->default_value(config::default_state_size / (1024 * 1024)), "Maximum size (in MiB) of the chain state database") diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 328b66a4462..c21c9597312 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -32,9 +32,6 @@ add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_l #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose -add_test(NAME unit_test_binaryen COMMAND unit_test - -t \!wasm_tests/weighted_cpu_limit_tests - --report_level=detailed --color_output -- --binaryen) add_test(NAME unit_test_wavm COMMAND unit_test -t \!wasm_tests/weighted_cpu_limit_tests --report_level=detailed --color_output --catch_system_errors=no -- --wavm) @@ -59,7 +56,7 @@ if(ENABLE_COVERAGE_TESTING) endif() # NOT GENHTML_PATH # no spaces allowed within tests list - set(ctest_tests 'unit_test_binaryen|unit_test_wavm') + set(ctest_tests 'unit_test_wabt|unit_test_wavm') set(ctest_exclude_tests '') # Setup target diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index 1621e2ef916..9957b4ccccc 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -41,14 +41,10 @@ class whitelist_blacklist_tester { cfg.genesis.initial_key = base_tester::get_public_key( config::system_account_name, "active" ); for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--binaryen")) - cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) + if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; - else - cfg.wasm_runtime = chain::wasm_interface::vm_type::binaryen; } return cfg; From a83c8e3c27bf313eb09dff48e371c63bc293a082 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 25 Sep 2018 16:09:42 -0400 Subject: [PATCH 054/161] allow only read-only access to state DB for consumers outside of the chain library Currently an exception is made to the deprecated history_plugin until either that plugin is removed or redesigned to use its own independent chainbase DB rather than piggybacking off the main state DB. --- contracts/eosio.bios/eosio.bios.abi | 32 +++++++++++++++++++ contracts/eosio.bios/eosio.bios.cpp | 2 +- contracts/eosio.bios/eosio.bios.hpp | 5 +++ libraries/chain/apply_context.cpp | 7 ++-- libraries/chain/controller.cpp | 8 +++-- .../include/eosio/chain/apply_context.hpp | 2 +- .../chain/include/eosio/chain/controller.hpp | 8 +++-- libraries/chain/transaction_context.cpp | 6 ++-- .../db_size_api_plugin/db_size_api_plugin.cpp | 2 +- plugins/history_plugin/history_plugin.cpp | 16 ++++++---- unittests/api_tests.cpp | 20 ++---------- unittests/auth_tests.cpp | 2 +- unittests/database_tests.cpp | 4 ++- unittests/delay_tests.cpp | 14 +++----- unittests/special_accounts_tests.cpp | 2 +- 15 files changed, 78 insertions(+), 52 deletions(-) diff --git a/contracts/eosio.bios/eosio.bios.abi b/contracts/eosio.bios/eosio.bios.abi index c81d774c689..2dd3310fc67 100644 --- a/contracts/eosio.bios/eosio.bios.abi +++ b/contracts/eosio.bios/eosio.bios.abi @@ -53,6 +53,28 @@ {"name":"accounts", "type":"permission_level_weight[]"}, {"name":"waits", "type":"wait_weight[]"} ] + },{ + "name": "blockchain_parameters", + "base": "", + "fields": [ + {"name":"max_block_net_usage", "type":"uint64"}, + {"name":"target_block_net_usage_pct", "type":"uint32"}, + {"name":"max_transaction_net_usage", "type":"uint32"}, + {"name":"base_per_transaction_net_usage", "type":"uint32"}, + {"name":"net_usage_leeway", "type":"uint32"}, + {"name":"context_free_discount_net_usage_num", "type":"uint32"}, + {"name":"context_free_discount_net_usage_den", "type":"uint32"}, + {"name":"max_block_cpu_usage", "type":"uint32"}, + {"name":"target_block_cpu_usage_pct", "type":"uint32"}, + {"name":"max_transaction_cpu_usage", "type":"uint32"}, + {"name":"min_transaction_cpu_usage", "type":"uint32"}, + {"name":"max_transaction_lifetime", "type":"uint32"}, + {"name":"deferred_trx_expiration_window", "type":"uint32"}, + {"name":"max_transaction_delay", "type":"uint32"}, + {"name":"max_inline_action_size", "type":"uint32"}, + {"name":"max_inline_action_depth", "type":"uint16"}, + {"name":"max_authority_depth", "type":"uint16"} + ] },{ "name": "newaccount", "base": "", @@ -160,6 +182,12 @@ "fields": [ {"name":"schedule", "type":"producer_key[]"} ] + },{ + "name": "setparams", + "base": "", + "fields": [ + {"name":"params", "type":"blockchain_parameters"} + ] },{ "name": "require_auth", "base": "", @@ -219,6 +247,10 @@ "name": "setprods", "type": "set_producers", "ricardian_contract": "" + },{ + "name": "setparams", + "type": "setparams", + "ricardian_contract": "" },{ "name": "reqauth", "type": "require_auth", diff --git a/contracts/eosio.bios/eosio.bios.cpp b/contracts/eosio.bios/eosio.bios.cpp index 70279d6e460..66d70f0c47e 100644 --- a/contracts/eosio.bios/eosio.bios.cpp +++ b/contracts/eosio.bios/eosio.bios.cpp @@ -1,3 +1,3 @@ #include -EOSIO_ABI( eosio::bios, (setpriv)(setalimits)(setglimits)(setprods)(reqauth) ) +EOSIO_ABI( eosio::bios, (setpriv)(setalimits)(setglimits)(setprods)(setparams)(reqauth) ) diff --git a/contracts/eosio.bios/eosio.bios.hpp b/contracts/eosio.bios/eosio.bios.hpp index 99807d811c1..0abca64c90e 100644 --- a/contracts/eosio.bios/eosio.bios.hpp +++ b/contracts/eosio.bios/eosio.bios.hpp @@ -34,6 +34,11 @@ namespace eosio { set_proposed_producers(buffer, size); } + void setparams( const eosio::blockchain_parameters& params ) { + require_auth( _self ); + set_blockchain_parameters( params ); + } + void reqauth( action_name from ) { require_auth( from ); } diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index f7513debf62..9b92b8d2a1f 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -272,8 +272,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a } uint32_t trx_size = 0; - auto& d = control.db(); - if ( auto ptr = d.find(boost::make_tuple(receiver, sender_id)) ) { + if ( auto ptr = db.find(boost::make_tuple(receiver, sender_id)) ) { EOS_ASSERT( replace_existing, deferred_tx_duplicate, "deferred transaction with the same sender_id and payer already exists" ); // TODO: Remove the following subjective check when the deferred trx replacement RAM bug has been fixed with a hard fork. @@ -283,7 +282,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a // TODO: The logic of the next line needs to be incorporated into the next hard fork. // add_ram_usage( ptr->payer, -(config::billable_size_v + ptr->packed_trx.size()) ); - d.modify( *ptr, [&]( auto& gtx ) { + db.modify( *ptr, [&]( auto& gtx ) { gtx.sender = receiver; gtx.sender_id = sender_id; gtx.payer = payer; @@ -294,7 +293,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a trx_size = gtx.set( trx ); }); } else { - d.create( [&]( auto& gtx ) { + db.create( [&]( auto& gtx ) { gtx.trx_id = trx.id(); gtx.sender = receiver; gtx.sender_id = sender_id; diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 91decde652b..1aa20aa70ee 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1051,7 +1051,7 @@ struct controller_impl { // on replay irreversible is not emitted by fork database, so emit it explicitly here if( s == controller::block_status::irreversible ) emit( self.irreversible_block, new_header_state ); - + } FC_LOG_AND_RETHROW( ) } @@ -1399,9 +1399,11 @@ void controller::startup() { my->init(); } -chainbase::database& controller::db()const { return my->db; } +const chainbase::database& controller::db()const { return my->db; } + +chainbase::database& controller::mutable_db()const { return my->db; } -fork_database& controller::fork_db()const { return my->fork_db; } +const fork_database& controller::fork_db()const { return my->fork_db; } void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 8a4f98a7caa..70fb198b1e7 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -453,7 +453,7 @@ class apply_context { public: apply_context(controller& con, transaction_context& trx_ctx, const action& a, uint32_t depth=0) :control(con) - ,db(con.db()) + ,db(con.mutable_db()) ,trx_context(trx_ctx) ,act(a) ,receiver(act.account) diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 6b5cf3b613f..6d2baa9165f 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -146,9 +146,9 @@ namespace eosio { namespace chain { */ void push_confirmation( const header_confirmation& c ); - chainbase::database& db()const; + const chainbase::database& db()const; - fork_database& fork_db()const; + const fork_database& fork_db()const; const account_object& get_account( account_name n )const; const global_property_object& get_global_properties()const; @@ -286,6 +286,10 @@ namespace eosio { namespace chain { } private: + friend class apply_context; + friend class transaction_context; + + chainbase::database& mutable_db()const; std::unique_ptr my; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index dd58f0364ec..806ce5d3f8e 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -23,7 +23,7 @@ namespace eosio { namespace chain { ,pseudo_start(s) { if (!c.skip_db_sessions()) { - undo_session = c.db().start_undo_session(true); + undo_session = c.mutable_db().start_undo_session(true); } trace->id = id; trace->block_num = c.pending_block_state()->block_num; @@ -451,7 +451,7 @@ namespace eosio { namespace chain { auto first_auth = trx.first_authorizor(); uint32_t trx_size = 0; - const auto& cgto = control.db().create( [&]( auto& gto ) { + const auto& cgto = control.mutable_db().create( [&]( auto& gto ) { gto.trx_id = id; gto.payer = first_auth; gto.sender = account_name(); /// delayed transactions have no sender @@ -467,7 +467,7 @@ namespace eosio { namespace chain { void transaction_context::record_transaction( const transaction_id_type& id, fc::time_point_sec expire ) { try { - control.db().create([&](transaction_object& transaction) { + control.mutable_db().create([&](transaction_object& transaction) { transaction.trx_id = id; transaction.expiration = expire; }); diff --git a/plugins/db_size_api_plugin/db_size_api_plugin.cpp b/plugins/db_size_api_plugin/db_size_api_plugin.cpp index 8dd5b50f48e..13b717c0789 100644 --- a/plugins/db_size_api_plugin/db_size_api_plugin.cpp +++ b/plugins/db_size_api_plugin/db_size_api_plugin.cpp @@ -36,7 +36,7 @@ void db_size_api_plugin::plugin_startup() { } db_size_stats db_size_api_plugin::get() { - chainbase::database& db = app().get_plugin().chain().db(); + const chainbase::database& db = app().get_plugin().chain().db(); db_size_stats ret; ret.free_bytes = db.get_segment_manager()->get_free_memory(); diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 3c3650b9e59..e3292672e44 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -206,7 +206,7 @@ namespace eosio { void record_account_action( account_name n, const base_action_trace& act ) { auto& chain = chain_plug->chain(); - auto& db = chain.db(); + chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) const auto& idx = db.get_index(); auto itr = idx.lower_bound( boost::make_tuple( name(n.value+1), 0 ) ); @@ -227,7 +227,7 @@ namespace eosio { void on_system_action( const action_trace& at ) { auto& chain = chain_plug->chain(); - auto& db = chain.db(); + chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) if( at.act.name == N(newaccount) ) { const auto create = at.act.data_as(); @@ -256,7 +256,7 @@ namespace eosio { if( filter( at ) ) { //idump((fc::json::to_pretty_string(at))); auto& chain = chain_plug->chain(); - auto& db = chain.db(); + chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) db.create( [&]( auto& aho ) { auto ps = fc::raw::pack_size( at ); @@ -344,10 +344,12 @@ namespace eosio { EOS_ASSERT( my->chain_plug, chain::missing_chain_plugin_exception, "" ); auto& chain = my->chain_plug->chain(); - chain.db().add_index(); - chain.db().add_index(); - chain.db().add_index(); - chain.db().add_index(); + chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) + // TODO: Use separate chainbase database for managing the state of the history_plugin (or remove deprecated history_plugin entirely) + db.add_index(); + db.add_index(); + db.add_index(); + db.add_index(); my->applied_transaction_connection.emplace( chain.applied_transaction.connect( [&]( const transaction_trace_ptr& p ) { diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index c9811adf071..60bde1edd99 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1964,23 +1964,9 @@ BOOST_FIXTURE_TEST_CASE(new_api_feature_tests, TESTER) { try { }); // change privilege - { - chainbase::database &db = control->db(); - const account_object &account = db.get(N(testapi)); - db.modify(account, [&](account_object &v) { - v.privileged = true; - }); - } - -#ifndef NON_VALIDATING_TEST - { - chainbase::database &db = validating_node->db(); - const account_object &account = db.get(N(testapi)); - db.modify(account, [&](account_object &v) { - v.privileged = true; - }); - } -#endif + push_action(config::system_account_name, N(setpriv), config::system_account_name, mutable_variant_object() + ("account", "testapi") + ("is_priv", 1)); CALL_TEST_FUNCTION( *this, "test_transaction", "new_feature", {} ); diff --git a/unittests/auth_tests.cpp b/unittests/auth_tests.cpp index bec8ab67b18..e54964b87b4 100644 --- a/unittests/auth_tests.cpp +++ b/unittests/auth_tests.cpp @@ -383,7 +383,7 @@ try { chain.create_account(acc1a); chain.produce_block(); - chainbase::database &db = chain.control->db(); + const chainbase::database &db = chain.control->db(); using resource_usage_object = eosio::chain::resource_limits::resource_usage_object; using by_owner = eosio::chain::resource_limits::by_owner; diff --git a/unittests/database_tests.cpp b/unittests/database_tests.cpp index d2192d980f2..ac97f6c21a6 100644 --- a/unittests/database_tests.cpp +++ b/unittests/database_tests.cpp @@ -26,7 +26,9 @@ BOOST_AUTO_TEST_SUITE(database_tests) BOOST_AUTO_TEST_CASE(undo_test) { try { TESTER test; - auto &db = test.control->db(); + + // Bypass read-only restriction on state DB access for this unit test which really needs to mutate the DB to properly conduct its test. + eosio::chain::database& db = const_cast( test.control->db() ); auto ses = db.start_undo_session(true); diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index 6d2d0ea63cc..095ba93dfbe 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -2317,16 +2317,10 @@ BOOST_AUTO_TEST_CASE( max_transaction_delay_execute ) { try { chain.produce_blocks(); //change max_transaction_delay to 60 sec - chain.control->db().modify( chain.control->get_global_properties(), - [&]( auto& gprops ) { - gprops.configuration.max_transaction_delay = 60; - }); -#ifndef NON_VALIDATING_TEST - chain.validating_node->db().modify( chain.validating_node->get_global_properties(), - [&]( auto& gprops ) { - gprops.configuration.max_transaction_delay = 60; - }); -#endif + auto params = chain.control->get_global_properties().configuration; + params.max_transaction_delay = 60; + chain.push_action( config::system_account_name, N(setparams), config::system_account_name, mutable_variant_object() + ("params", params) ); chain.produce_blocks(); //should be able to create transaction with delay 60 sec, despite permission delay being 30 days, because max_transaction_delay is 60 sec diff --git a/unittests/special_accounts_tests.cpp b/unittests/special_accounts_tests.cpp index 9bdbc588e3a..5f5ebe198e8 100644 --- a/unittests/special_accounts_tests.cpp +++ b/unittests/special_accounts_tests.cpp @@ -35,7 +35,7 @@ BOOST_FIXTURE_TEST_CASE(accounts_exists, tester) tester test; chain::controller *control = test.control.get(); - chain::database &chain1_db = control->db(); + const chain::database& chain1_db = control->db(); auto nobody = chain1_db.find(config::null_account_name); BOOST_CHECK(nobody != nullptr); From 1c3b4c169d02212fe1e543e35f94c41eec66f42a Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 26 Sep 2018 16:39:54 -0400 Subject: [PATCH 055/161] Remove unused lambda capture in chain_api_plugin warning b-gone --- plugins/chain_api_plugin/chain_api_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 93197140e51..f07e48a7f04 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -50,7 +50,7 @@ struct async_result_visitor : public fc::visitor { #define CALL_ASYNC(api_name, api_handle, api_namespace, call_name, call_result, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ - [this, api_handle](string, string body, url_response_callback cb) mutable { \ + [api_handle](string, string body, url_response_callback cb) mutable { \ if (body.empty()) body = "{}"; \ api_handle.validate(); \ api_handle.call_name(fc::json::from_string(body).as(),\ From ae3206ac4e9c934fa3f3eade5e4d262c87c3033c Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 26 Sep 2018 17:03:50 -0400 Subject: [PATCH 056/161] Install license (and third party licenses) in filesystem & binary packages --- CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 376cdf972ea..06033ab2962 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -232,6 +232,13 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/EosioTester.cmake.in ${C configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/EosioTesterBuild.cmake.in ${CMAKE_BINARY_DIR}/lib/cmake/EosioTester.cmake @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/EosioTester.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/) +install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/) +install(FILES libraries/wabt/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wabt) +install(FILES libraries/softfloat/COPYING.txt DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.softfloat) +install(FILES libraries/wasm-jit/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wavm) +install(FILES libraries/fc/secp256k1/upstream/COPYING DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.secp256k1) +install(FILES externals/binaryen/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.binaryen) + include(installer) include(doxygen) From a4f7f85f9dead08b22d04312093f850a7c60ee3b Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 28 Sep 2018 14:15:50 -0400 Subject: [PATCH 057/161] restored fc changes lost in merge --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index bc6e6b75de1..8114051b088 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit bc6e6b75de1862c01c88e3d5c0e46db9122468ee +Subproject commit 8114051b088d8242babdf3678ac45dc7ea84edec From 6146f95c28e8dd8ff377db0207bf1cb40744984e Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Sat, 29 Sep 2018 03:42:21 +0900 Subject: [PATCH 058/161] Add options for buying ram in bytes to newaccount, delegatebw and buyram --- programs/cleos/main.cpp | 47 +++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index c28502aab53..a8d2eaf418c 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -890,6 +890,7 @@ struct create_account_subcommand { string stake_net; string stake_cpu; uint32_t buy_ram_bytes_in_kbytes = 0; + uint32_t buy_ram_bytes = 0; string buy_ram_eos; bool transfer; bool simple; @@ -907,7 +908,9 @@ struct create_account_subcommand { createAccount->add_option("--stake-cpu", stake_cpu, (localized("The amount of EOS delegated for CPU bandwidth")))->required(); createAccount->add_option("--buy-ram-kbytes", buy_ram_bytes_in_kbytes, - (localized("The amount of RAM bytes to purchase for the new account in kibibytes (KiB), default is 8 KiB"))); + (localized("The amount of RAM bytes to purchase for the new account in kibibytes (KiB)"))); + createAccount->add_option("--buy-ram-bytes", buy_ram_bytes, + (localized("The amount of RAM bytes to purchase for the new account in bytes"))); createAccount->add_option("--buy-ram", buy_ram_eos, (localized("The amount of RAM bytes to purchase for the new account in EOS"))); createAccount->add_flag("--transfer", transfer, @@ -928,12 +931,10 @@ struct create_account_subcommand { } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid active public key: ${public_key}", ("public_key", active_key_str)); auto create = create_newaccount(creator, account_name, owner_key, active_key); if (!simple) { - if ( buy_ram_eos.empty() && buy_ram_bytes_in_kbytes == 0) { - std::cerr << "ERROR: Either --buy-ram or --buy-ram-kbytes with non-zero value is required" << std::endl; - return; - } + EOSC_ASSERT( buy_ram_eos.size() || buy_ram_bytes_in_kbytes || buy_ram_bytes, "ERROR: One of --buy-ram, --buy-ram-kbytes or --buy-ram-bytes should have non-zero value" ); + EOSC_ASSERT( !buy_ram_bytes_in_kbytes || !buy_ram_bytes, "ERROR: --buy-ram-kbytes and --buy-ram-bytes cannot be set at the same time" ); action buyram = !buy_ram_eos.empty() ? create_buyram(creator, account_name, to_asset(buy_ram_eos)) - : create_buyrambytes(creator, account_name, buy_ram_bytes_in_kbytes * 1024); + : create_buyrambytes(creator, account_name, (buy_ram_bytes_in_kbytes) ? (buy_ram_bytes_in_kbytes * 1024) : buy_ram_bytes); auto net = to_asset(stake_net); auto cpu = to_asset(stake_cpu); if ( net.get_amount() != 0 || cpu.get_amount() != 0 ) { @@ -1194,6 +1195,7 @@ struct delegate_bandwidth_subcommand { string stake_cpu_amount; string stake_storage_amount; string buy_ram_amount; + uint32_t buy_ram_bytes = 0; bool transfer = false; delegate_bandwidth_subcommand(CLI::App* actionRoot) { @@ -1203,6 +1205,7 @@ struct delegate_bandwidth_subcommand { delegate_bandwidth->add_option("stake_net_quantity", stake_net_amount, localized("The amount of EOS to stake for network bandwidth"))->required(); delegate_bandwidth->add_option("stake_cpu_quantity", stake_cpu_amount, localized("The amount of EOS to stake for CPU bandwidth"))->required(); delegate_bandwidth->add_option("--buyram", buy_ram_amount, localized("The amount of EOS to buyram")); + delegate_bandwidth->add_option("--buy-ram-bytes", buy_ram_bytes, localized("The amount of RAM to buy in number of bytes")); delegate_bandwidth->add_flag("--transfer", transfer, localized("Transfer voting power and right to unstake EOS to receiver")); add_standard_transaction_options(delegate_bandwidth); @@ -1214,12 +1217,11 @@ struct delegate_bandwidth_subcommand { ("stake_cpu_quantity", to_asset(stake_cpu_amount)) ("transfer", transfer); std::vector acts{create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(delegatebw), act_payload)}; - if (buy_ram_amount.length()) { - fc::variant act_payload2 = fc::mutable_variant_object() - ("payer", from_str) - ("receiver", receiver_str) - ("quant", to_asset(buy_ram_amount)); - acts.push_back(create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyram), act_payload2)); + EOSC_ASSERT( !(buy_ram_amount.size()) || !buy_ram_bytes, "ERROR: --buyram and --buy-ram-bytes cannot be set at the same time" ); + if (buy_ram_amount.size()) { + acts.push_back( create_buyram(from_str, receiver_str, to_asset(buy_ram_amount)) ); + } else if (buy_ram_bytes) { + acts.push_back( create_buyrambytes(from_str, receiver_str, buy_ram_bytes) ); } send_actions(std::move(acts)); }); @@ -1347,27 +1349,22 @@ struct buyram_subcommand { string receiver_str; string amount; bool kbytes = false; + bool bytes = false; buyram_subcommand(CLI::App* actionRoot) { auto buyram = actionRoot->add_subcommand("buyram", localized("Buy RAM")); buyram->add_option("payer", from_str, localized("The account paying for RAM"))->required(); buyram->add_option("receiver", receiver_str, localized("The account receiving bought RAM"))->required(); - buyram->add_option("amount", amount, localized("The amount of EOS to pay for RAM, or number of kbytes of RAM if --kbytes is set"))->required(); - buyram->add_flag("--kbytes,-k", kbytes, localized("buyram in number of kbytes")); + buyram->add_option("amount", amount, localized("The amount of EOS to pay for RAM, or number of bytes/kibibytes of RAM if --bytes/--kbytes is set"))->required(); + buyram->add_flag("--kbytes,-k", kbytes, localized("buyram in number of kibibytes (KiB)")); + buyram->add_flag("--bytes,-b", bytes, localized("buyram in number of bytes")); add_standard_transaction_options(buyram); buyram->set_callback([this] { - if (kbytes) { - fc::variant act_payload = fc::mutable_variant_object() - ("payer", from_str) - ("receiver", receiver_str) - ("bytes", fc::to_uint64(amount) * 1024ull); - send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyrambytes), act_payload)}); + EOSC_ASSERT( !kbytes || !bytes, "ERROR: --kbytes and --bytes cannot be set at the same time" ); + if (kbytes || bytes) { + send_actions( { create_buyrambytes(from_str, receiver_str, fc::to_uint64(amount) * ((kbytes) ? 1024ull : 1ull)) } ); } else { - fc::variant act_payload = fc::mutable_variant_object() - ("payer", from_str) - ("receiver", receiver_str) - ("quant", to_asset(amount)); - send_actions({create_action({permission_level{from_str,config::active_name}}, config::system_account_name, N(buyram), act_payload)}); + send_actions( { create_buyram(from_str, receiver_str, to_asset(amount)) } ); } }); } From 650e0a9682e3c1a21396a8863e3be33b813ca888 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 26 Sep 2018 12:51:04 -0500 Subject: [PATCH 059/161] Fixed launcher to use --genesis file for the initial genesis file. GH #5674 --- programs/eosio-launcher/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 3895b0c0c83..ad283dca94a 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1157,7 +1157,7 @@ launcher_def::write_logging_config_file(tn_node_def &node) { void launcher_def::init_genesis () { - bfs::path genesis_path = bfs::current_path() / "genesis.json"; + bfs::path genesis_path = genesis.is_complete() ? genesis : bfs::current_path() / genesis; bfs::ifstream src(genesis_path); if (!src.good()) { cout << "generating default genesis file " << genesis_path << endl; From 458e19468ac24eeae23dae50b0816c636966ce88 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 26 Sep 2018 12:52:45 -0500 Subject: [PATCH 060/161] Editing genesis.json to increase max_block_cpu_usage. GH #5674 --- tests/Cluster.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index f09978dcc9a..70c77965e06 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -84,6 +84,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.defproducerbAccount.activePrivateKey=defproducerbPrvtKey self.useBiosBootFile=False + self.filesToCleanup=[] def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): @@ -146,7 +147,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time 990000 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: @@ -171,6 +172,23 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--specific-nodeos") cmdArr.append(arg) + genesisFile=open("./genesis.json", "r") + genesisJsonStr=genesisFile.read() + genesisFile.close() + genesisObject=json.loads(genesisJsonStr) + initialConfiguration=genesisObject["initial_configuration"] + maxBlockCpuUsage=initialConfiguration.get("max_block_cpu_usage",200000) + initialConfiguration["max_block_cpu_usage"]=maxBlockCpuUsage*10 + + + tempGenesisFileName="./tempGenesis.json" + genesisFile=open(tempGenesisFileName,"w") + genesisFile.write(json.dumps(genesisObject, indent=2)) + genesisFile.close() + self.filesToCleanup.append(tempGenesisFileName) + cmdArr.append("--genesis") + cmdArr.append(tempGenesisFileName) + # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" if topo=="bridge": @@ -1366,6 +1384,9 @@ def cleanup(self): for f in glob.glob("etc/eosio/node_*"): shutil.rmtree(f) + for f in self.filesToCleanup: + os.remove(f) + if self.enableMongo: cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs) subcommand="db.dropDatabase()" From 69cf2a280cb120cd6bcb5faf6f2e6153b7facb31 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 27 Sep 2018 08:20:11 -0500 Subject: [PATCH 061/161] Changed to only create one wallet manager per test run. GH #5674 --- tests/Cluster.py | 492 +++++++++--------- tests/Node.py | 5 +- tests/WalletMgr.py | 36 +- ...onsensus-validation-malicious-producers.py | 5 - tests/distributed-transactions-test.py | 14 +- tests/launcher_test.py | 19 +- tests/nodeos_forked_chain_test.py | 9 +- tests/nodeos_run_remote_test.py | 2 +- tests/nodeos_run_test.py | 20 +- tests/nodeos_under_min_avail_ram.py | 9 +- tests/nodeos_voting_test.py | 9 +- tests/restart-scenarios-test.py | 6 +- 12 files changed, 299 insertions(+), 327 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 70c77965e06..a9519fe6c13 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -100,8 +100,8 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontKill=False - , dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): + def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, + totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -348,14 +348,16 @@ def connectGroup(group, producerNodes, bridgeNodes) : return True Utils.Print("Bootstrap cluster.") + if self.walletMgr is None: + self.walletMgr=WalletMgr(True) if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, dontKill, onlyBios) + self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False else: self.useBiosBootFile=True - self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, dontKill) + self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False @@ -839,7 +841,7 @@ def parseClusterKeys(totalNodes): return producerKeys @staticmethod - def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): + def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr): """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") @@ -868,66 +870,60 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, dontKill=False): Utils.Print("ERROR: Failed to parse private keys from cluster config files.") return None - walletMgr=WalletMgr(True) walletMgr.killall() walletMgr.cleanup() if not walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - biosNode.setWalletEndpointArgs(walletMgr.walletEndpointArgs) + biosNode.setWalletEndpointArgs(walletMgr.getWalletEndpointArgs()) - try: - ignWallet=walletMgr.create("ignition") - if ignWallet is None: - Utils.Print("ERROR: Failed to create ignition wallet.") - return None + ignWallet=walletMgr.create("ignition") + if ignWallet is None: + Utils.Print("ERROR: Failed to create ignition wallet.") + return None - eosioName="eosio" - eosioKeys=producerKeys[eosioName] - eosioAccount=Account(eosioName) - eosioAccount.ownerPrivateKey=eosioKeys["private"] - eosioAccount.ownerPublicKey=eosioKeys["public"] - eosioAccount.activePrivateKey=eosioKeys["private"] - eosioAccount.activePublicKey=eosioKeys["public"] - producerKeys.pop(eosioName) - - if not walletMgr.importKey(eosioAccount, ignWallet): - Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) - return None + eosioName="eosio" + eosioKeys=producerKeys[eosioName] + eosioAccount=Account(eosioName) + eosioAccount.ownerPrivateKey=eosioKeys["private"] + eosioAccount.ownerPublicKey=eosioKeys["public"] + eosioAccount.activePrivateKey=eosioKeys["private"] + eosioAccount.activePublicKey=eosioKeys["public"] + producerKeys.pop(eosioName) + + if not walletMgr.importKey(eosioAccount, ignWallet): + Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) + return None - initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) - Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) - trans=None - contract="eosio.token" - action="transfer" - for name, keys in producerKeys.items(): - data="{\"from\":\"eosio\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (name, initialFunds, "init transfer") - opts="--permission eosio@active" - if name != "eosio": - trans=biosNode.pushMessage(contract, action, data, opts) - if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to transfer funds from eosio.token to %s." % (name)) - return None + initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) + Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) + trans=None + contract="eosio.token" + action="transfer" + for name, keys in producerKeys.items(): + data="{\"from\":\"eosio\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (name, initialFunds, "init transfer") + opts="--permission eosio@active" + if name != "eosio": + trans=biosNode.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to transfer funds from eosio.token to %s." % (name)) + return None - Node.validateTransaction(trans[1]) + Node.validateTransaction(trans[1]) - Utils.Print("Wait for last transfer transaction to become finalized.") - transId=Node.getTransId(trans[1]) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + Utils.Print("Wait for last transfer transaction to become finalized.") + transId=Node.getTransId(trans[1]) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None - Utils.Print("Cluster bootstrap done.") - finally: - if not dontKill: - walletMgr.killall() - walletMgr.cleanup() + Utils.Print("Cluster bootstrap done.") return biosNode @staticmethod - def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKill=False, onlyBios=False): + def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -949,247 +945,241 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, dontKil Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalProducers+1,len(producerKeys))) return None - walletMgr=WalletMgr(True) walletMgr.killall() walletMgr.cleanup() if not walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - biosNode.setWalletEndpointArgs(walletMgr.walletEndpointArgs) + biosNode.setWalletEndpointArgs(walletMgr.getWalletEndpointArgs()) - try: - ignWallet=walletMgr.create("ignition") - - eosioName="eosio" - eosioKeys=producerKeys[eosioName] - eosioAccount=Account(eosioName) - eosioAccount.ownerPrivateKey=eosioKeys["private"] - eosioAccount.ownerPublicKey=eosioKeys["public"] - eosioAccount.activePrivateKey=eosioKeys["private"] - eosioAccount.activePublicKey=eosioKeys["public"] - - if not walletMgr.importKey(eosioAccount, ignWallet): - Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) - return None + ignWallet=walletMgr.create("ignition") + + eosioName="eosio" + eosioKeys=producerKeys[eosioName] + eosioAccount=Account(eosioName) + eosioAccount.ownerPrivateKey=eosioKeys["private"] + eosioAccount.ownerPublicKey=eosioKeys["public"] + eosioAccount.activePrivateKey=eosioKeys["private"] + eosioAccount.activePublicKey=eosioKeys["public"] + + if not walletMgr.importKey(eosioAccount, ignWallet): + Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) + return None + + contract="eosio.bios" + contractDir="contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None + + Node.validateTransaction(trans) - contract="eosio.bios" - contractDir="contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + Utils.Print("Creating accounts: %s " % ", ".join(producerKeys.keys())) + producerKeys.pop(eosioName) + accounts=[] + for name, keys in producerKeys.items(): + initx = None + initx = Account(name) + initx.ownerPrivateKey=keys["private"] + initx.ownerPublicKey=keys["public"] + initx.activePrivateKey=keys["private"] + initx.activePublicKey=keys["public"] + trans=biosNode.createAccount(initx, eosioAccount, 0) if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + Utils.Print("ERROR: Failed to create account %s" % (name)) return None - Node.validateTransaction(trans) + accounts.append(initx) + + transId=Node.getTransId(trans) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None + + Utils.Print("Validating system accounts within bootstrap") + biosNode.validateAccounts(accounts) - Utils.Print("Creating accounts: %s " % ", ".join(producerKeys.keys())) - producerKeys.pop(eosioName) - accounts=[] - for name, keys in producerKeys.items(): - initx = None - initx = Account(name) - initx.ownerPrivateKey=keys["private"] - initx.ownerPublicKey=keys["public"] - initx.activePrivateKey=keys["private"] - initx.activePublicKey=keys["public"] - trans=biosNode.createAccount(initx, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (name)) + if not onlyBios: + if prodCount == -1: + setProdsFile="setprods.json" + if Utils.Debug: Utils.Print("Reading in setprods file %s." % (setProdsFile)) + with open(setProdsFile, "r") as f: + setProdsStr=f.read() + + Utils.Print("Setting producers.") + opts="--permission eosio@active" + myTrans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) + if myTrans is None or not myTrans[0]: + Utils.Print("ERROR: Failed to set producers.") + return None + else: + counts=dict.fromkeys(range(totalNodes), 0) #initialize node prods count to 0 + setProdsStr='{"schedule": [' + firstTime=True + prodNames=[] + for name, keys in producerKeys.items(): + if counts[keys["node"]] >= prodCount: + continue + if firstTime: + firstTime = False + else: + setProdsStr += ',' + + setProdsStr += ' { "producer_name": "%s", "block_signing_key": "%s" }' % (keys["name"], keys["public"]) + prodNames.append(keys["name"]) + counts[keys["node"]] += 1 + + setProdsStr += ' ] }' + if Utils.Debug: Utils.Print("setprods: %s" % (setProdsStr)) + Utils.Print("Setting producers: %s." % (", ".join(prodNames))) + opts="--permission eosio@active" + # pylint: disable=redefined-variable-type + trans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to set producer %s." % (keys["name"])) return None - Node.validateTransaction(trans) - accounts.append(initx) + trans=trans[1] transId=Node.getTransId(trans) if not biosNode.waitForTransInBlock(transId): Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) return None - Utils.Print("Validating system accounts within bootstrap") - biosNode.validateAccounts(accounts) - - if not onlyBios: - if prodCount == -1: - setProdsFile="setprods.json" - if Utils.Debug: Utils.Print("Reading in setprods file %s." % (setProdsFile)) - with open(setProdsFile, "r") as f: - setProdsStr=f.read() - - Utils.Print("Setting producers.") - opts="--permission eosio@active" - myTrans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) - if myTrans is None or not myTrans[0]: - Utils.Print("ERROR: Failed to set producers.") - return None - else: - counts=dict.fromkeys(range(totalNodes), 0) #initialize node prods count to 0 - setProdsStr='{"schedule": [' - firstTime=True - prodNames=[] - for name, keys in producerKeys.items(): - if counts[keys["node"]] >= prodCount: - continue - if firstTime: - firstTime = False - else: - setProdsStr += ',' + # wait for block production handover (essentially a block produced by anyone but eosio). + lam = lambda: biosNode.getInfo(exitOnError=True)["head_block_producer"] != "eosio" + ret=Utils.waitForBool(lam) + if not ret: + Utils.Print("ERROR: Block production handover failed.") + return None - setProdsStr += ' { "producer_name": "%s", "block_signing_key": "%s" }' % (keys["name"], keys["public"]) - prodNames.append(keys["name"]) - counts[keys["node"]] += 1 + eosioTokenAccount=copy.deepcopy(eosioAccount) + eosioTokenAccount.name="eosio.token" + trans=biosNode.createAccount(eosioTokenAccount, eosioAccount, 0) + if trans is None: + Utils.Print("ERROR: Failed to create account %s" % (eosioTokenAccount.name)) + return None - setProdsStr += ' ] }' - if Utils.Debug: Utils.Print("setprods: %s" % (setProdsStr)) - Utils.Print("Setting producers: %s." % (", ".join(prodNames))) - opts="--permission eosio@active" - # pylint: disable=redefined-variable-type - trans=biosNode.pushMessage("eosio", "setprods", setProdsStr, opts) - if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to set producer %s." % (keys["name"])) - return None + eosioRamAccount=copy.deepcopy(eosioAccount) + eosioRamAccount.name="eosio.ram" + trans=biosNode.createAccount(eosioRamAccount, eosioAccount, 0) + if trans is None: + Utils.Print("ERROR: Failed to create account %s" % (eosioRamAccount.name)) + return None - trans=trans[1] - transId=Node.getTransId(trans) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + eosioRamfeeAccount=copy.deepcopy(eosioAccount) + eosioRamfeeAccount.name="eosio.ramfee" + trans=biosNode.createAccount(eosioRamfeeAccount, eosioAccount, 0) + if trans is None: + Utils.Print("ERROR: Failed to create account %s" % (eosioRamfeeAccount.name)) + return None - # wait for block production handover (essentially a block produced by anyone but eosio). - lam = lambda: biosNode.getInfo(exitOnError=True)["head_block_producer"] != "eosio" - ret=Utils.waitForBool(lam) - if not ret: - Utils.Print("ERROR: Block production handover failed.") - return None + eosioStakeAccount=copy.deepcopy(eosioAccount) + eosioStakeAccount.name="eosio.stake" + trans=biosNode.createAccount(eosioStakeAccount, eosioAccount, 0) + if trans is None: + Utils.Print("ERROR: Failed to create account %s" % (eosioStakeAccount.name)) + return None - eosioTokenAccount=copy.deepcopy(eosioAccount) - eosioTokenAccount.name="eosio.token" - trans=biosNode.createAccount(eosioTokenAccount, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (eosioTokenAccount.name)) - return None + Node.validateTransaction(trans) + transId=Node.getTransId(trans) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None - eosioRamAccount=copy.deepcopy(eosioAccount) - eosioRamAccount.name="eosio.ram" - trans=biosNode.createAccount(eosioRamAccount, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (eosioRamAccount.name)) - return None + contract="eosio.token" + contractDir="contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioTokenAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None - eosioRamfeeAccount=copy.deepcopy(eosioAccount) - eosioRamfeeAccount.name="eosio.ramfee" - trans=biosNode.createAccount(eosioRamfeeAccount, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (eosioRamfeeAccount.name)) - return None + # Create currency0000, followed by issue currency0000 + contract=eosioTokenAccount.name + Utils.Print("push create action to %s contract" % (contract)) + action="create" + data="{\"issuer\":\"%s\",\"maximum_supply\":\"1000000000.0000 %s\",\"can_freeze\":\"0\",\"can_recall\":\"0\",\"can_whitelist\":\"0\"}" % (eosioTokenAccount.name, CORE_SYMBOL) + opts="--permission %s@active" % (contract) + trans=biosNode.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to push create action to eosio contract.") + return None - eosioStakeAccount=copy.deepcopy(eosioAccount) - eosioStakeAccount.name="eosio.stake" - trans=biosNode.createAccount(eosioStakeAccount, eosioAccount, 0) - if trans is None: - Utils.Print("ERROR: Failed to create account %s" % (eosioStakeAccount.name)) - return None + Node.validateTransaction(trans[1]) + transId=Node.getTransId(trans[1]) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None - Node.validateTransaction(trans) - transId=Node.getTransId(trans) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + contract=eosioTokenAccount.name + Utils.Print("push issue action to %s contract" % (contract)) + action="issue" + data="{\"to\":\"%s\",\"quantity\":\"1000000000.0000 %s\",\"memo\":\"initial issue\"}" % (eosioAccount.name, CORE_SYMBOL) + opts="--permission %s@active" % (contract) + trans=biosNode.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to push issue action to eosio contract.") + return None - contract="eosio.token" - contractDir="contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioTokenAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None + Node.validateTransaction(trans[1]) + Utils.Print("Wait for issue action transaction to become finalized.") + transId=Node.getTransId(trans[1]) + # biosNode.waitForTransInBlock(transId) + # guesstimating block finalization timeout. Two production rounds of 12 blocks per node, plus 60 seconds buffer + timeout = .5 * 12 * 2 * len(producerKeys) + 60 + if not biosNode.waitForTransFinalization(transId, timeout=timeout): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a finalized block on server port %d." % (transId, biosNode.port)) + return None - # Create currency0000, followed by issue currency0000 - contract=eosioTokenAccount.name - Utils.Print("push create action to %s contract" % (contract)) - action="create" - data="{\"issuer\":\"%s\",\"maximum_supply\":\"1000000000.0000 %s\",\"can_freeze\":\"0\",\"can_recall\":\"0\",\"can_whitelist\":\"0\"}" % (eosioTokenAccount.name, CORE_SYMBOL) - opts="--permission %s@active" % (contract) - trans=biosNode.pushMessage(contract, action, data, opts) - if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to push create action to eosio contract.") - return None + expectedAmount="1000000000.0000 {0}".format(CORE_SYMBOL) + Utils.Print("Verify eosio issue, Expected: %s" % (expectedAmount)) + actualAmount=biosNode.getAccountEosBalanceStr(eosioAccount.name) + if expectedAmount != actualAmount: + Utils.Print("ERROR: Issue verification failed. Excepted %s, actual: %s" % + (expectedAmount, actualAmount)) + return None - Node.validateTransaction(trans[1]) - transId=Node.getTransId(trans[1]) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + contract="eosio.system" + contractDir="contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None + + Node.validateTransaction(trans) - contract=eosioTokenAccount.name - Utils.Print("push issue action to %s contract" % (contract)) - action="issue" - data="{\"to\":\"%s\",\"quantity\":\"1000000000.0000 %s\",\"memo\":\"initial issue\"}" % (eosioAccount.name, CORE_SYMBOL) - opts="--permission %s@active" % (contract) + initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) + Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) + trans=None + contract=eosioTokenAccount.name + action="transfer" + for name, keys in producerKeys.items(): + data="{\"from\":\"%s\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (eosioAccount.name, name, initialFunds, "init transfer") + opts="--permission %s@active" % (eosioAccount.name) trans=biosNode.pushMessage(contract, action, data, opts) if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to push issue action to eosio contract.") + Utils.Print("ERROR: Failed to transfer funds from %s to %s." % (eosioTokenAccount.name, name)) return None Node.validateTransaction(trans[1]) - Utils.Print("Wait for issue action transaction to become finalized.") - transId=Node.getTransId(trans[1]) - # biosNode.waitForTransInBlock(transId) - # guesstimating block finalization timeout. Two production rounds of 12 blocks per node, plus 60 seconds buffer - timeout = .5 * 12 * 2 * len(producerKeys) + 60 - if not biosNode.waitForTransFinalization(transId, timeout=timeout): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a finalized block on server port %d." % (transId, biosNode.port)) - return None - - expectedAmount="1000000000.0000 {0}".format(CORE_SYMBOL) - Utils.Print("Verify eosio issue, Expected: %s" % (expectedAmount)) - actualAmount=biosNode.getAccountEosBalanceStr(eosioAccount.name) - if expectedAmount != actualAmount: - Utils.Print("ERROR: Issue verification failed. Excepted %s, actual: %s" % - (expectedAmount, actualAmount)) - return None - - contract="eosio.system" - contractDir="contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None - Node.validateTransaction(trans) - - initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) - Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) - trans=None - contract=eosioTokenAccount.name - action="transfer" - for name, keys in producerKeys.items(): - data="{\"from\":\"%s\",\"to\":\"%s\",\"quantity\":\"%s\",\"memo\":\"%s\"}" % (eosioAccount.name, name, initialFunds, "init transfer") - opts="--permission %s@active" % (eosioAccount.name) - trans=biosNode.pushMessage(contract, action, data, opts) - if trans is None or not trans[0]: - Utils.Print("ERROR: Failed to transfer funds from %s to %s." % (eosioTokenAccount.name, name)) - return None - - Node.validateTransaction(trans[1]) - - Utils.Print("Wait for last transfer transaction to become finalized.") - transId=Node.getTransId(trans[1]) - if not biosNode.waitForTransInBlock(transId): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) - return None + Utils.Print("Wait for last transfer transaction to become finalized.") + transId=Node.getTransId(trans[1]) + if not biosNode.waitForTransInBlock(transId): + Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) + return None - Utils.Print("Cluster bootstrap done.") - finally: - if not dontKill: - walletMgr.killall() - walletMgr.cleanup() + Utils.Print("Cluster bootstrap done.") return biosNode diff --git a/tests/Node.py b/tests/Node.py index c7e348efdcf..4a081021ce4 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -50,11 +50,12 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.lastRetrievedHeadBlockNum=None self.lastRetrievedLIB=None self.transCache={} + self.walletEndpointArgs="" if self.enableMongo: self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) def eosClientArgs(self): - return self.endpointArgs + " " + self.miscEosClientArgs + return self.endpointArgs + self.walletEndpointArgs + " " + self.miscEosClientArgs def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) @@ -227,7 +228,7 @@ def byteArrToStr(arr): return arr.decode("utf-8") def setWalletEndpointArgs(self, args): - self.endpointArgs="--url http://%s:%d %s" % (self.host, self.port, args) + self.walletEndpointArgs=args def validateAccounts(self, accounts): assert(accounts) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index c1f9917879d..74460fb0a17 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -25,17 +25,27 @@ def __init__(self, walletd, nodeosPort=8888, nodeosHost="localhost", port=9899, self.host=host self.wallets={} self.__walletPid=None - self.endpointArgs="--url http://%s:%d" % (self.nodeosHost, self.nodeosPort) - self.walletEndpointArgs="" - if self.walletd: - self.walletEndpointArgs += " --wallet-url http://%s:%d" % (self.host, self.port) - self.endpointArgs += self.walletEndpointArgs + + def getWalletEndpointArgs(self): + if not self.walletd: + return "" + + return " --wallet-url http://%s:%d" % (self.host, self.port) + + def getEndpointArgs(self): + return " --url http://%s:%d%s" % (self.nodeosHost, self.nodeosPort, self.getWalletEndpointArgs()) + + def isLaunched(self): + return self.__walletPid is not None def launch(self): if not self.walletd: Utils.Print("ERROR: Wallet Manager wasn't configured to launch keosd") return False + if self.isLaunched(): + return True + if Utils.Debug: portStatus="N/A" portTaken=False @@ -80,7 +90,7 @@ def create(self, name, accounts=None, exitOnError=True): return wallet p = re.compile(r'\n\"(\w+)\"\n', re.MULTILINE) cmdDesc="wallet create" - cmd="%s %s %s --name %s --to-console" % (Utils.EosClientPath, self.endpointArgs, cmdDesc, name) + cmd="%s %s %s --name %s --to-console" % (Utils.EosClientPath, self.getEndpointArgs(), cmdDesc, name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=None maxRetryCount=4 @@ -139,7 +149,7 @@ def importKeys(self, accounts, wallet, ignoreDupKeyWarning=False): def importKey(self, account, wallet, ignoreDupKeyWarning=False): warningMsg="Key already in wallet" cmd="%s %s wallet import --name %s --private-key %s" % ( - Utils.EosClientPath, self.endpointArgs, wallet.name, account.ownerPrivateKey) + Utils.EosClientPath, self.getEndpointArgs(), wallet.name, account.ownerPrivateKey) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) try: Utils.checkOutput(cmd.split()) @@ -156,7 +166,7 @@ def importKey(self, account, wallet, ignoreDupKeyWarning=False): Utils.Print("WARNING: Active private key is not defined for account \"%s\"" % (account.name)) else: cmd="%s %s wallet import --name %s --private-key %s" % ( - Utils.EosClientPath, self.endpointArgs, wallet.name, account.activePrivateKey) + Utils.EosClientPath, self.getEndpointArgs(), wallet.name, account.activePrivateKey) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) try: Utils.checkOutput(cmd.split()) @@ -173,7 +183,7 @@ def importKey(self, account, wallet, ignoreDupKeyWarning=False): return True def lockWallet(self, wallet): - cmd="%s %s wallet lock --name %s" % (Utils.EosClientPath, self.endpointArgs, wallet.name) + cmd="%s %s wallet lock --name %s" % (Utils.EosClientPath, self.getEndpointArgs(), wallet.name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): Utils.Print("ERROR: Failed to lock wallet %s." % (wallet.name)) @@ -182,7 +192,7 @@ def lockWallet(self, wallet): return True def unlockWallet(self, wallet): - cmd="%s %s wallet unlock --name %s" % (Utils.EosClientPath, self.endpointArgs, wallet.name) + cmd="%s %s wallet unlock --name %s" % (Utils.EosClientPath, self.getEndpointArgs(), wallet.name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) popen=subprocess.Popen(cmd.split(), stdout=Utils.FNull, stdin=subprocess.PIPE) _, errs = popen.communicate(input=wallet.password.encode("utf-8")) @@ -193,7 +203,7 @@ def unlockWallet(self, wallet): return True def lockAllWallets(self): - cmd="%s %s wallet lock_all" % (Utils.EosClientPath, self.endpointArgs) + cmd="%s %s wallet lock_all" % (Utils.EosClientPath, self.getEndpointArgs()) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): Utils.Print("ERROR: Failed to lock all wallets.") @@ -205,7 +215,7 @@ def getOpenWallets(self): wallets=[] p = re.compile(r'\s+\"(\w+)\s\*\",?\n', re.MULTILINE) - cmd="%s %s wallet list" % (Utils.EosClientPath, self.endpointArgs) + cmd="%s %s wallet list" % (Utils.EosClientPath, self.getEndpointArgs()) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=None try: @@ -227,7 +237,7 @@ def getKeys(self, wallet): keys=[] p = re.compile(r'\n\s+\"(\w+)\"\n', re.MULTILINE) - cmd="%s %s wallet private_keys --name %s --password %s " % (Utils.EosClientPath, self.endpointArgs, wallet.name, wallet.password) + cmd="%s %s wallet private_keys --name %s --password %s " % (Utils.EosClientPath, self.getEndpointArgs(), wallet.name, wallet.password) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=None try: diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index c92acfde04d..971228854d9 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -257,11 +257,6 @@ def myTest(transWillEnterBlock): currencyAccount=accounts[0] currencyAccount.name="currency0000" - Print("Stand up walletd") - if walletMgr.launch() is False: - error("Failed to stand up eos walletd.") - return False - testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) testWallet=walletMgr.create(testWalletName) diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index f83cb3aa0e7..c6192774c79 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -47,11 +47,15 @@ if not cluster.initializeNodesFromJson(jsonStr): errorExit("Failed to initilize nodes from Json string.") total_nodes=len(cluster.getNodes()) + + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + print("Stand up walletd") + if walletMgr.launch() is False: + errorExit("Failed to stand up keosd.") else: cluster.killall(allInstances=killAll) cluster.cleanup() - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) @@ -65,12 +69,6 @@ if not cluster.waitOnClusterBlockNumSync(3): errorExit("Cluster never stabilized") - Print("Stand up EOS wallet keosd") - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - errorExit("Failed to stand up keosd.") - accountsCount=total_nodes walletName="MyWallet-%d" % (random.randrange(10000)) Print("Creating wallet %s if one doesn't already exist." % walletName) diff --git a/tests/launcher_test.py b/tests/launcher_test.py index 999bd33d9af..9179a69b58d 100755 --- a/tests/launcher_test.py +++ b/tests/launcher_test.py @@ -46,20 +46,26 @@ try: TestHelper.printSystemInfo("BEGIN") - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() + cluster.setWalletMgr(walletMgr) if not dontLaunch: cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(pnodes=4, dontKill=dontKill, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=4, p2pPlugin=p2pPlugin) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey) killEosInstances=False + print("Stand up walletd") + if walletMgr.launch() is False: + cmdError("%s" % (WalletdName)) + errorExit("Failed to stand up eos walletd.") + Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) @@ -86,13 +92,6 @@ exchangeAccount.ownerPrivateKey=PRV_KEY2 exchangeAccount.ownerPublicKey=PUB_KEY2 - Print("Stand up %s" % (WalletdName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") - testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,cluster.defproduceraAccount]) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 3e9b3aa0957..36420654dd5 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -128,6 +128,7 @@ def getMinHeadAndLib(prodNodes): try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") @@ -141,7 +142,7 @@ def getMinHeadAndLib(prodNodes): # "bridge" shape connects defprocera through defproducerk (in node0) to each other and defproducerl through defproduceru (in node01) # and the only connection between those 2 groups is through the bridge node - if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, topo="bridge", pnodes=totalProducerNodes, + if cluster.launch(prodCount=prodCount, onlyBios=False, topo="bridge", pnodes=totalProducerNodes, totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") @@ -164,12 +165,6 @@ def getMinHeadAndLib(prodNodes): testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - Utils.cmdError("%s" % (WalletdName)) - Utils.errorExit("Failed to stand up eos walletd.") - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) for _, account in cluster.defProducerAccounts.items(): diff --git a/tests/nodeos_run_remote_test.py b/tests/nodeos_run_remote_test.py index 5b3459e780c..0aaf3cc502e 100755 --- a/tests/nodeos_run_remote_test.py +++ b/tests/nodeos_run_remote_test.py @@ -42,7 +42,7 @@ Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, prodCount, topo, delay, onlyBios=onlyBios, dontKill=dontKill) is False: + if cluster.launch(pnodes, total_nodes, prodCount, topo, delay, onlyBios=onlyBios) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index ad674dea5c8..c50d4cb5dce 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -55,25 +55,30 @@ try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) Print("SERVER: %s" % (server)) Print("PORT: %d" % (port)) if enableMongo and not cluster.isMongodDbRunning(): errorExit("MongoDb doesn't seem to be running.") - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if localTest and not dontLaunch: cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontKill=dontKill, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin) is False: + if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) killEosInstances=False + Print("Stand up %s" % (WalletdName)) + walletMgr.killall(allInstances=killAll) + walletMgr.cleanup() + print("Stand up walletd") + if walletMgr.launch() is False: + cmdError("%s" % (WalletdName)) + errorExit("Failed to stand up eos walletd.") if sanityTest: testSuccessful=True @@ -105,13 +110,6 @@ exchangeAccount.ownerPrivateKey=PRV_KEY2 exchangeAccount.ownerPublicKey=PUB_KEY2 - Print("Stand up %s" % (WalletdName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") - testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,cluster.defproduceraAccount,cluster.defproducerbAccount]) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 7ef19ab25a2..e3ca6325338 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -73,6 +73,7 @@ def setName(self, num): try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) cluster.killall(allInstances=killAll) cluster.cleanup() @@ -82,7 +83,7 @@ def setName(self, num): maxRAMFlag="--chain-state-db-size-mb" maxRAMValue=1010 extraNodeosArgs=" %s %d %s %d " % (minRAMFlag, minRAMValue, maxRAMFlag, maxRAMValue) - if cluster.launch(onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs, useBiosBootFile=False) is False: + if cluster.launch(onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs, useBiosBootFile=False) is False: Utils.cmdError("launcher") errorExit("Failed to stand up eos cluster.") @@ -96,12 +97,6 @@ def setName(self, num): testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - Utils.cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount]) for _, account in cluster.defProducerAccounts.items(): diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index a786846099f..f709f646a8e 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -161,11 +161,12 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, dontKill=dontKill, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") @@ -184,12 +185,6 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - Utils.cmdError("%s" % (WalletdName)) - Utils.errorExit("Failed to stand up eos walletd.") - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]]) for _, account in cluster.defProducerAccounts.items(): diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index d3329c8b65f..a8ae17d78b7 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -52,6 +52,7 @@ try: TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) cluster.setChainStrategy(chainSyncStrategyStr) cluster.setWalletMgr(walletMgr) @@ -74,11 +75,6 @@ errorExit("Cluster never stabilized") Print("Stand up EOS wallet keosd") - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - if walletMgr.launch() is False: - errorExit("Failed to stand up keosd.") - accountsCount=total_nodes walletName="MyWallet" Print("Creating wallet %s if one doesn't already exist." % walletName) From 90dab4351280a76881a7e10036f46d61b57b8359 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 27 Sep 2018 08:24:45 -0500 Subject: [PATCH 062/161] Adding --no-auto-keosd flag to wallet methods. GH #5674 --- tests/Node.py | 3 +-- tests/WalletMgr.py | 20 ++++++++++---------- tests/testUtils.py | 1 + 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 4a081021ce4..1948408772f 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -44,7 +44,6 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.mongoDb=mongoDb self.endpointHttp="http://%s:%d" % (self.host, self.port) self.endpointArgs="--url %s" % (self.endpointHttp) - self.miscEosClientArgs="--no-auto-keosd" self.mongoEndpointArgs="" self.infoValid=None self.lastRetrievedHeadBlockNum=None @@ -55,7 +54,7 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) def eosClientArgs(self): - return self.endpointArgs + self.walletEndpointArgs + " " + self.miscEosClientArgs + return self.endpointArgs + self.walletEndpointArgs + " " + Utils.MiscEosClientArgs def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 74460fb0a17..87062fb81e8 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -32,8 +32,8 @@ def getWalletEndpointArgs(self): return " --wallet-url http://%s:%d" % (self.host, self.port) - def getEndpointArgs(self): - return " --url http://%s:%d%s" % (self.nodeosHost, self.nodeosPort, self.getWalletEndpointArgs()) + def getArgs(self): + return " --url http://%s:%d%s %s" % (self.nodeosHost, self.nodeosPort, self.getWalletEndpointArgs(), Utils.MiscEosClientArgs) def isLaunched(self): return self.__walletPid is not None @@ -90,7 +90,7 @@ def create(self, name, accounts=None, exitOnError=True): return wallet p = re.compile(r'\n\"(\w+)\"\n', re.MULTILINE) cmdDesc="wallet create" - cmd="%s %s %s --name %s --to-console" % (Utils.EosClientPath, self.getEndpointArgs(), cmdDesc, name) + cmd="%s %s %s --name %s --to-console" % (Utils.EosClientPath, self.getArgs(), cmdDesc, name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=None maxRetryCount=4 @@ -149,7 +149,7 @@ def importKeys(self, accounts, wallet, ignoreDupKeyWarning=False): def importKey(self, account, wallet, ignoreDupKeyWarning=False): warningMsg="Key already in wallet" cmd="%s %s wallet import --name %s --private-key %s" % ( - Utils.EosClientPath, self.getEndpointArgs(), wallet.name, account.ownerPrivateKey) + Utils.EosClientPath, self.getArgs(), wallet.name, account.ownerPrivateKey) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) try: Utils.checkOutput(cmd.split()) @@ -166,7 +166,7 @@ def importKey(self, account, wallet, ignoreDupKeyWarning=False): Utils.Print("WARNING: Active private key is not defined for account \"%s\"" % (account.name)) else: cmd="%s %s wallet import --name %s --private-key %s" % ( - Utils.EosClientPath, self.getEndpointArgs(), wallet.name, account.activePrivateKey) + Utils.EosClientPath, self.getArgs(), wallet.name, account.activePrivateKey) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) try: Utils.checkOutput(cmd.split()) @@ -183,7 +183,7 @@ def importKey(self, account, wallet, ignoreDupKeyWarning=False): return True def lockWallet(self, wallet): - cmd="%s %s wallet lock --name %s" % (Utils.EosClientPath, self.getEndpointArgs(), wallet.name) + cmd="%s %s wallet lock --name %s" % (Utils.EosClientPath, self.getArgs(), wallet.name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): Utils.Print("ERROR: Failed to lock wallet %s." % (wallet.name)) @@ -192,7 +192,7 @@ def lockWallet(self, wallet): return True def unlockWallet(self, wallet): - cmd="%s %s wallet unlock --name %s" % (Utils.EosClientPath, self.getEndpointArgs(), wallet.name) + cmd="%s %s wallet unlock --name %s" % (Utils.EosClientPath, self.getArgs(), wallet.name) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) popen=subprocess.Popen(cmd.split(), stdout=Utils.FNull, stdin=subprocess.PIPE) _, errs = popen.communicate(input=wallet.password.encode("utf-8")) @@ -203,7 +203,7 @@ def unlockWallet(self, wallet): return True def lockAllWallets(self): - cmd="%s %s wallet lock_all" % (Utils.EosClientPath, self.getEndpointArgs()) + cmd="%s %s wallet lock_all" % (Utils.EosClientPath, self.getArgs()) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): Utils.Print("ERROR: Failed to lock all wallets.") @@ -215,7 +215,7 @@ def getOpenWallets(self): wallets=[] p = re.compile(r'\s+\"(\w+)\s\*\",?\n', re.MULTILINE) - cmd="%s %s wallet list" % (Utils.EosClientPath, self.getEndpointArgs()) + cmd="%s %s wallet list" % (Utils.EosClientPath, self.getArgs()) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=None try: @@ -237,7 +237,7 @@ def getKeys(self, wallet): keys=[] p = re.compile(r'\n\s+\"(\w+)\"\n', re.MULTILINE) - cmd="%s %s wallet private_keys --name %s --password %s " % (Utils.EosClientPath, self.getEndpointArgs(), wallet.name, wallet.password) + cmd="%s %s wallet private_keys --name %s --password %s " % (Utils.EosClientPath, self.getArgs(), wallet.name, wallet.password) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) retStr=None try: diff --git a/tests/testUtils.py b/tests/testUtils.py index 88a3d9e1279..f1225ba1fe9 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -18,6 +18,7 @@ class Utils: FNull = open(os.devnull, 'w') EosClientPath="programs/cleos/cleos" + MiscEosClientArgs="--no-auto-keosd" EosWalletName="keosd" EosWalletPath="programs/keosd/"+ EosWalletName From a10b34680f5e5a03ef09813c804ca1e5c0276284 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 27 Sep 2018 11:59:31 -0500 Subject: [PATCH 063/161] Changing WalletMgr to try the next port if a port is not available. GH #5674 --- tests/Cluster.py | 27 +++++++++------------------ tests/Node.py | 10 ++++------ tests/WalletMgr.py | 24 +++++++++++++++++++++--- tests/testUtils.py | 1 + 4 files changed, 35 insertions(+), 27 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index a9519fe6c13..561b9f3e61f 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -11,7 +11,6 @@ import sys import random import json -import errno from core_symbol import CORE_SYMBOL from testUtils import Utils @@ -63,9 +62,6 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.port=port self.walletHost=walletHost self.walletPort=walletPort - self.walletEndpointArgs="" - if self.walletd: - self.walletEndpointArgs += " --wallet-url http://%s:%d" % (self.walletHost, self.walletPort) self.mongoEndpointArgs="" self.mongoUri="" if self.enableMongo: @@ -127,6 +123,9 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if len(self.nodes) > 0: raise RuntimeError("Cluster already running.") + if self.walletMgr is None: + self.walletMgr=WalletMgr(True) + producerFlag="" if totalProducers: assert(isinstance(totalProducers, (str,int))) @@ -329,8 +328,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : self.nodes=nodes if onlyBios: - biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort) - biosNode.setWalletEndpointArgs(self.walletEndpointArgs) + biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort, walletMgr=self.walletMgr) if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return False @@ -348,8 +346,6 @@ def connectGroup(group, producerNodes, bridgeNodes) : return True Utils.Print("Bootstrap cluster.") - if self.walletMgr is None: - self.walletMgr=WalletMgr(True) if onlyBios or not useBiosBootFile: self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios) if self.biosNode is None: @@ -392,8 +388,7 @@ def initAccountKeys(account, keys): def initializeNodes(self, defproduceraPrvtKey=None, defproducerbPrvtKey=None, onlyBios=False): port=Cluster.__BiosPort if onlyBios else self.port host=Cluster.__BiosHost if onlyBios else self.host - node=Node(host, port, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - node.setWalletEndpointArgs(self.walletEndpointArgs) + node=Node(host, port, walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) if Utils.Debug: Utils.Print("Node: %s", str(node)) node.checkPulse(exitOnError=True) @@ -432,8 +427,7 @@ def initializeNodesFromJson(self, nodesJsonStr): for n in nArr: port=n["port"] host=n["host"] - node=Node(host, port) - node.setWalletEndpointArgs(self.walletEndpointArgs) + node=Node(host, port, walletMgr=self.walletMgr) if Utils.Debug: Utils.Print("Node:", node) node.checkPulse(exitOnError=True) @@ -845,7 +839,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr): """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") - biosNode=Node(biosHost, biosPort) + biosNode=Node(biosHost, biosPort, walletMgr=walletMgr) if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return None @@ -876,7 +870,6 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr): if not walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - biosNode.setWalletEndpointArgs(walletMgr.getWalletEndpointArgs()) ignWallet=walletMgr.create("ignition") if ignWallet is None: @@ -931,7 +924,7 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM if totalProducers is None: totalProducers=totalNodes - biosNode=Node(biosHost, biosPort) + biosNode=Node(biosHost, biosPort, walletMgr=walletMgr) if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return None @@ -951,7 +944,6 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM if not walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - biosNode.setWalletEndpointArgs(walletMgr.getWalletEndpointArgs()) ignWallet=walletMgr.create("ignition") @@ -1229,8 +1221,7 @@ def discoverLocalNodes(self, totalNodes, timeout=None): if m is None: Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) break - instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - instance.setWalletEndpointArgs(self.walletEndpointArgs) + instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) diff --git a/tests/Node.py b/tests/Node.py index 1948408772f..93ab8502555 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -31,7 +31,7 @@ class Node(object): # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-arguments - def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="EOStest"): + def __init__(self, host, port, pid=None, cmd=None, walletMgr=None, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="EOStest"): self.host=host self.port=port self.pid=pid @@ -49,12 +49,13 @@ def __init__(self, host, port, pid=None, cmd=None, enableMongo=False, mongoHost= self.lastRetrievedHeadBlockNum=None self.lastRetrievedLIB=None self.transCache={} - self.walletEndpointArgs="" + self.walletMgr=walletMgr if self.enableMongo: self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) def eosClientArgs(self): - return self.endpointArgs + self.walletEndpointArgs + " " + Utils.MiscEosClientArgs + walletArgs=" " + self.walletMgr.getWalletEndpointArgs() if self.walletMgr is not None else "" + return self.endpointArgs + walletArgs + " " + Utils.MiscEosClientArgs def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) @@ -226,9 +227,6 @@ def isTrans(obj): def byteArrToStr(arr): return arr.decode("utf-8") - def setWalletEndpointArgs(self, args): - self.walletEndpointArgs=args - def validateAccounts(self, accounts): assert(accounts) assert(isinstance(accounts, list)) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 87062fb81e8..4a4524b233c 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -14,6 +14,7 @@ class WalletMgr(object): __walletLogFile="test_keosd_output.log" __walletDataDir="test_wallet_0" + __MaxPort=9999 # pylint: disable=too-many-arguments # walletd [True|False] True=Launch wallet(keosd) process; False=Manage launch process externally. @@ -27,7 +28,7 @@ def __init__(self, walletd, nodeosPort=8888, nodeosHost="localhost", port=9899, self.__walletPid=None def getWalletEndpointArgs(self): - if not self.walletd: + if not self.walletd or not self.isLaunched(): return "" return " --wallet-url http://%s:%d" % (self.host, self.port) @@ -38,6 +39,20 @@ def getArgs(self): def isLaunched(self): return self.__walletPid is not None + def isLocal(self): + return self.host=="localhost" or self.host=="127.0.0.1" + + def findAvailablePort(self): + for i in range(WalletMgr.__MaxPort): + port=self.port+i + if port > WalletMgr.__MaxPort: + port-=WalletMgr.__MaxPort + if Utils.arePortsAvailable(port): + return port + if Utils.Debug: Utils.Print("Port %d not available for %s" % (port, Utils.EosWalletPath)) + + Utils.errorExit("Failed to find free port to use for %s" % (Utils.EosWalletPath)) + def launch(self): if not self.walletd: Utils.Print("ERROR: Wallet Manager wasn't configured to launch keosd") @@ -46,10 +61,13 @@ def launch(self): if self.isLaunched(): return True + if self.isLocal(): + self.port=self.findAvailablePort() + if Utils.Debug: portStatus="N/A" portTaken=False - if self.host=="localhost" or self.host=="127.0.0.1": + if self.isLocal(): if Utils.arePortsAvailable(self.port): portStatus="AVAILABLE" portTaken=True @@ -106,7 +124,7 @@ def create(self, name, accounts=None, exitOnError=True): pgrepCmd=Utils.pgrepCmd(Utils.EosWalletName) psOut=Utils.checkOutput(pgrepCmd.split()) portStatus="N/A" - if self.host=="localhost" or self.host=="127.0.0.1": + if self.isLocal(): if Utils.arePortsAvailable(self.port): portStatus="AVAILABLE" else: diff --git a/tests/testUtils.py b/tests/testUtils.py index f1225ba1fe9..9c52fe3796a 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,3 +1,4 @@ +import errno import subprocess import time import os From 959d0c1e08e183f416d2a3752b5fa5070efdbfe4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 28 Sep 2018 08:11:12 -0500 Subject: [PATCH 064/161] Reverted manipulating genesis file in script and added flag to launcher to edit genesis file. GH #5674 --- programs/eosio-launcher/main.cpp | 175 +++++++++++++++---------------- tests/Cluster.py | 18 +--- 2 files changed, 87 insertions(+), 106 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index ad283dca94a..dac65624c7c 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -387,63 +388,64 @@ string producer_names::producer_name(unsigned int producer_number) { } struct launcher_def { - bool force_overwrite; - size_t total_nodes; - size_t prod_nodes; - size_t producers; - size_t next_node; - string shape; - p2p_plugin p2p; - allowed_connection allowed_connections = PC_NONE; - bfs::path genesis; - bfs::path output; - bfs::path host_map_file; - bfs::path server_ident_file; - bfs::path stage; - - string erd; - bfs::path config_dir_base; - bfs::path data_dir_base; - bool skip_transaction_signatures = false; - string eosd_extra_args; - std::map specific_nodeos_args; - testnet_def network; - string gelf_endpoint; - vector aliases; - vector bindings; - int per_host = 0; - last_run_def last_run; - int start_delay = 0; - bool gelf_enabled; - bool nogen; - bool boot; - bool add_enable_stale_production = false; - string launch_name; - string launch_time; - server_identities servers; - producer_set_def producer_set; - vector genesis_block; + bool force_overwrite; + size_t total_nodes; + size_t prod_nodes; + size_t producers; + size_t next_node; + string shape; + p2p_plugin p2p; + allowed_connection allowed_connections = PC_NONE; + bfs::path genesis; + bfs::path output; + bfs::path host_map_file; + bfs::path server_ident_file; + bfs::path stage; + + string erd; + bfs::path config_dir_base; + bfs::path data_dir_base; + bool skip_transaction_signatures = false; + string eosd_extra_args; + std::map specific_nodeos_args; + testnet_def network; + string gelf_endpoint; + vector aliases; + vector bindings; + int per_host = 0; + last_run_def last_run; + int start_delay = 0; + bool gelf_enabled; + bool nogen; + bool boot; + bool add_enable_stale_production = false; + string launch_name; + string launch_time; + server_identities servers; + producer_set_def producer_set; string start_temp; string start_script; + fc::optional max_block_cpu_usage; + eosio::chain::genesis_state genesis_from_file; void assign_name (eosd_def &node, bool is_bios); - void set_options (bpo::options_description &cli); - void initialize (const variables_map &vmap); + void set_options (bpo::options_description &cli); + void initialize (const variables_map &vmap); void init_genesis (); - void load_servers (); - bool generate (); - void define_network (); - void bind_nodes (); - host_def *find_host (const string &name); - host_def *find_host_by_name_or_address (const string &name); - host_def *deploy_config_files (tn_node_def &node); - string compose_scp_command (const host_def &host, const bfs::path &source, - const bfs::path &destination); - void write_config_file (tn_node_def &node); - void write_logging_config_file (tn_node_def &node); - void write_genesis_file (tn_node_def &node); - void write_setprods_file (); + void load_servers (); + bool generate (); + void define_network (); + void bind_nodes (); + host_def *find_host (const string &name); + host_def *find_host_by_name_or_address (const string &name); + host_def *deploy_config_files (tn_node_def &node); + string compose_scp_command (const host_def &host, const bfs::path &source, + const bfs::path &destination); + void write_config_file (tn_node_def &node); + void write_logging_config_file (tn_node_def &node); + void write_genesis_file (tn_node_def &node); + void write_setprods_file (); void write_bios_boot (); bool is_bios_ndx (size_t ndx); @@ -451,25 +453,25 @@ struct launcher_def { bool next_ndx(size_t &ndx); size_t skip_ndx (size_t from, size_t offset); - void make_ring (); - void make_star (); - void make_mesh (); - void make_custom (); - void write_dot_file (); - void format_ssh (const string &cmd, const string &host_name, string &ssh_cmd_line); - void do_command(const host_def& host, const string& name, vector> env_pairs, const string& cmd); - bool do_ssh (const string &cmd, const string &host_name); - void prep_remote_config_dir (eosd_def &node, host_def *host); - void launch (eosd_def &node, string >s); - void kill (launch_modes mode, string sig_opt); - static string get_node_num(uint16_t node_num); - pair find_node(uint16_t node_num); - vector> get_nodes(const string& node_number_list); - void bounce (const string& node_numbers); - void down (const string& node_numbers); - void roll (const string& host_names); - void start_all (string >s, launch_modes mode); - void ignite (); + void make_ring (); + void make_star (); + void make_mesh (); + void make_custom (); + void write_dot_file (); + void format_ssh (const string &cmd, const string &host_name, string &ssh_cmd_line); + void do_command(const host_def& host, const string& name, vector> env_pairs, const string& cmd); + bool do_ssh (const string &cmd, const string &host_name); + void prep_remote_config_dir (eosd_def &node, host_def *host); + void launch (eosd_def &node, string >s); + void kill (launch_modes mode, string sig_opt); + static string get_node_num(uint16_t node_num); + pair find_node(uint16_t node_num); + vector> get_nodes(const string& node_number_list); + void bounce (const string& node_numbers); + void down (const string& node_numbers); + void roll (const string& host_names); + void start_all (string >s, launch_modes mode); + void ignite (); }; void @@ -498,6 +500,7 @@ launcher_def::set_options (bpo::options_description &cfg) { ("gelf-endpoint",bpo::value(&gelf_endpoint)->default_value("10.160.11.21:12201"),"hostname:port or ip:port of GELF endpoint") ("template",bpo::value(&start_temp)->default_value("testnet.template"),"the startup script template") ("script",bpo::value(&start_script)->default_value("bios_boot.sh"),"the generated startup script name") + ("max-block-cpu-usage",bpo::value(),"") ; } @@ -529,6 +532,10 @@ launcher_def::initialize (const variables_map &vmap) { } } + if (vmap.count("max-block-cpu-usage")) { + max_block_cpu_usage = vmap["max-block-cpu-usage"].as(); + } + if (vmap.count("specific-num")) { const auto specific_nums = vmap["specific-num"].as>(); const auto specific_args = vmap["specific-nodeos"].as>(); @@ -1157,27 +1164,18 @@ launcher_def::write_logging_config_file(tn_node_def &node) { void launcher_def::init_genesis () { - bfs::path genesis_path = genesis.is_complete() ? genesis : bfs::current_path() / genesis; - bfs::ifstream src(genesis_path); - if (!src.good()) { + const bfs::path genesis_path = genesis.is_complete() ? genesis : bfs::current_path() / genesis; + if (!bfs::exists(genesis_path)) { cout << "generating default genesis file " << genesis_path << endl; eosio::chain::genesis_state default_genesis; fc::json::save_to_file( default_genesis, genesis_path, true ); - src.open(genesis_path); } string bioskey = string(network.nodes["bios"].keys[0].get_public_key()); - string str; - string prefix("initial_key"); - while(getline(src,str)) { - size_t pos = str.find(prefix); - if (pos != string::npos) { - size_t cut = str.find("EOS",pos); - genesis_block.push_back(str.substr(0,cut) + bioskey + "\","); - } - else { - genesis_block.push_back(str); - } - } + + fc::json::from_file(genesis_path).as(genesis_from_file); + genesis_from_file.initial_key = public_key_type(bioskey); + if (max_block_cpu_usage) + genesis_from_file.initial_configuration.max_block_cpu_usage = *max_block_cpu_usage; } void @@ -1191,10 +1189,7 @@ launcher_def::write_genesis_file(tn_node_def &node) { } filename = dd / "genesis.json"; - bfs::ofstream gf ( dd / "genesis.json"); - for (auto &line : genesis_block) { - gf << line << "\n"; - } + fc::json::save_to_file( genesis_from_file, dd / "genesis.json", true ); } void diff --git a/tests/Cluster.py b/tests/Cluster.py index 561b9f3e61f..e31e0476346 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -171,22 +171,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--specific-nodeos") cmdArr.append(arg) - genesisFile=open("./genesis.json", "r") - genesisJsonStr=genesisFile.read() - genesisFile.close() - genesisObject=json.loads(genesisJsonStr) - initialConfiguration=genesisObject["initial_configuration"] - maxBlockCpuUsage=initialConfiguration.get("max_block_cpu_usage",200000) - initialConfiguration["max_block_cpu_usage"]=maxBlockCpuUsage*10 - - - tempGenesisFileName="./tempGenesis.json" - genesisFile=open(tempGenesisFileName,"w") - genesisFile.write(json.dumps(genesisObject, indent=2)) - genesisFile.close() - self.filesToCleanup.append(tempGenesisFileName) - cmdArr.append("--genesis") - cmdArr.append(tempGenesisFileName) + cmdArr.append("--max-block-cpu-usage") + cmdArr.append(str(2000000)) # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" From fbef5f12ecfedd062651fdca03f19aaa07f6971d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 28 Sep 2018 18:00:50 -0500 Subject: [PATCH 065/161] Fixed config parameters to use strings and then load into boost filesystem paths to work around problem with spaces in file names. GH #5674 --- programs/eosio-launcher/main.cpp | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index dac65624c7c..c4378d99ac9 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -484,7 +484,7 @@ launcher_def::set_options (bpo::options_description &cfg) { ("mode,m",bpo::value>()->multitoken()->default_value({"any"}, "any"),"connection mode, combination of \"any\", \"producers\", \"specified\", \"none\"") ("shape,s",bpo::value(&shape)->default_value("star"),"network topology, use \"star\" \"mesh\" or give a filename for custom") ("p2p-plugin", bpo::value()->default_value("net"),"select a p2p plugin to use (either net or bnet). Defaults to net.") - ("genesis,g",bpo::value(&genesis)->default_value("./genesis.json"),"set the path to genesis.json") + ("genesis,g",bpo::value()->default_value("./genesis.json"),"set the path to genesis.json") ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag") @@ -492,8 +492,8 @@ launcher_def::set_options (bpo::options_description &cfg) { ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") ("nogen",bpo::bool_switch(&nogen)->default_value(false),"launch nodes without writing new config files") - ("host-map",bpo::value(&host_map_file)->default_value(""),"a file containing mapping specific nodes to hosts. Used to enhance the custom shape argument") - ("servers",bpo::value(&server_ident_file)->default_value(""),"a file containing ip addresses and names of individual servers to deploy as producers or non-producers ") + ("host-map",bpo::value(),"a file containing mapping specific nodes to hosts. Used to enhance the custom shape argument") + ("servers",bpo::value(),"a file containing ip addresses and names of individual servers to deploy as producers or non-producers ") ("per-host",bpo::value(&per_host)->default_value(0),"specifies how many nodeos instances will run on a single host. Use 0 to indicate all on one.") ("network-name",bpo::value(&network.name)->default_value("testnet_"),"network name prefix used in GELF logging source") ("enable-gelf-logging",bpo::value(&gelf_enabled)->default_value(true),"enable gelf logging appender in logging configuration file") @@ -536,6 +536,14 @@ launcher_def::initialize (const variables_map &vmap) { max_block_cpu_usage = vmap["max-block-cpu-usage"].as(); } + genesis = vmap["genesis"].as(); + if (vmap.count("host-map")) { + host_map_file = vmap["host-map"].as(); + } + if (vmap.count("servers")) { + server_ident_file = vmap["servers"].as(); + } + if (vmap.count("specific-num")) { const auto specific_nums = vmap["specific-num"].as>(); const auto specific_args = vmap["specific-nodeos"].as>(); From 75aa8ff6c562e55c09dc5876a9c1a27637d4fecc Mon Sep 17 00:00:00 2001 From: Ville Sundell Date: Sat, 29 Sep 2018 23:22:28 +0300 Subject: [PATCH 066/161] Improved language: "in micro second" -> "in microseconds" Microsecond is the correct word: https://en.wiktionary.org/wiki/microsecond I would suggest plural here, since these are not happening in a microsecond, these are specified in microseconds. --- plugins/producer_plugin/producer_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index b9fbd0489d0..85f1d4615e7 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -514,9 +514,9 @@ void producer_plugin::set_program_options( ("greylist-account", boost::program_options::value>()->composing()->multitoken(), "account that can not access to extended CPU/NET virtual resources") ("produce-time-offset-us", boost::program_options::value()->default_value(0), - "offset of non last block producing time in micro second. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") + "offset of non last block producing time in microseconds. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") ("last-block-time-offset-us", boost::program_options::value()->default_value(0), - "offset of last block producing time in micro second. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") + "offset of last block producing time in microseconds. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") ("incoming-defer-ratio", bpo::value()->default_value(1.0), "ratio between incoming transations and deferred transactions when both are exhausted") ; From 4876647af4fe798ffb8390bb4b41ca0e794e2b2d Mon Sep 17 00:00:00 2001 From: mooninwater Date: Sun, 30 Sep 2018 21:35:37 +0800 Subject: [PATCH 067/161] modify the sync numbers of start_sync --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index bb3b33a8f23..f6b2fc014d9 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1537,7 +1537,7 @@ namespace eosio { else { c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; reset_lib_num (c); - start_sync(c, msg.known_blocks.pending); + start_sync(c, msg.known_trx.pending); } } From 97e4d3ae7f3686870da53b87396757cecc3d2e29 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 29 Sep 2018 07:20:02 -0500 Subject: [PATCH 068/161] Split up wallet stdout and stderr to go to separate files. GH #5674 --- tests/WalletMgr.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 4a4524b233c..084a7e9ca55 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -12,7 +12,8 @@ Wallet=namedtuple("Wallet", "name password host port") # pylint: disable=too-many-instance-attributes class WalletMgr(object): - __walletLogFile="test_keosd_output.log" + __walletLogOutFile="test_keosd_out.log" + __walletLogErrFile="test_keosd_err.log" __walletDataDir="test_wallet_0" __MaxPort=9999 @@ -70,9 +71,9 @@ def launch(self): if self.isLocal(): if Utils.arePortsAvailable(self.port): portStatus="AVAILABLE" - portTaken=True else: portStatus="NOT AVAILABLE" + portTaken=True pgrepCmd=Utils.pgrepCmd(Utils.EosWalletName) psOut=Utils.checkOutput(pgrepCmd.split(), ignoreError=True) if psOut or portTaken: @@ -86,7 +87,7 @@ def launch(self): cmd="%s --data-dir %s --config-dir %s --http-server-address=%s:%d --verbose-http-errors" % ( Utils.EosWalletPath, WalletMgr.__walletDataDir, WalletMgr.__walletDataDir, self.host, self.port) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - with open(WalletMgr.__walletLogFile, 'w') as sout, open(WalletMgr.__walletLogFile, 'w') as serr: + with open(WalletMgr.__walletLogOutFile, 'w') as sout, open(WalletMgr.__walletLogErrFile, 'w') as serr: popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) self.__walletPid=popen.pid @@ -276,9 +277,13 @@ def getKeys(self, wallet): def dumpErrorDetails(self): Utils.Print("=================================================================") if self.__walletPid is not None: - Utils.Print("Contents of %s:" % (WalletMgr.__walletLogFile)) + Utils.Print("Contents of %s:" % (WalletMgr.__walletLogOutFile)) + Utils.Print("=================================================================") + with open(WalletMgr.__walletLogOutFile, "r") as f: + shutil.copyfileobj(f, sys.stdout) + Utils.Print("Contents of %s:" % (WalletMgr.__walletLogErrFile)) Utils.Print("=================================================================") - with open(WalletMgr.__walletLogFile, "r") as f: + with open(WalletMgr.__walletLogErrFile, "r") as f: shutil.copyfileobj(f, sys.stdout) def killall(self, allInstances=False): From fd6d31a7942e158de9a68e65091de5617c9b845d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 1 Oct 2018 16:02:06 -0500 Subject: [PATCH 069/161] Added flag for launcher to set max_transaction_cpu_usage in genesis file and increased parameters for cluster launch. GH #5674 --- programs/eosio-launcher/main.cpp | 10 +++++++++- tests/Cluster.py | 6 +++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index c4378d99ac9..597e473b677 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -426,6 +426,7 @@ struct launcher_def { string start_temp; string start_script; fc::optional max_block_cpu_usage; + fc::optional max_transaction_cpu_usage; eosio::chain::genesis_state genesis_from_file; void assign_name (eosd_def &node, bool is_bios); @@ -500,7 +501,8 @@ launcher_def::set_options (bpo::options_description &cfg) { ("gelf-endpoint",bpo::value(&gelf_endpoint)->default_value("10.160.11.21:12201"),"hostname:port or ip:port of GELF endpoint") ("template",bpo::value(&start_temp)->default_value("testnet.template"),"the startup script template") ("script",bpo::value(&start_script)->default_value("bios_boot.sh"),"the generated startup script name") - ("max-block-cpu-usage",bpo::value(),"") + ("max-block-cpu-usage",bpo::value(),"Provide the \"max-block-cpu-usage\" value to use in the genesis.json file") + ("max-transaction-cpu-usage",bpo::value(),"Provide the \"max-transaction-cpu-usage\" value to use in the genesis.json file") ; } @@ -536,6 +538,10 @@ launcher_def::initialize (const variables_map &vmap) { max_block_cpu_usage = vmap["max-block-cpu-usage"].as(); } + if (vmap.count("max-transaction-cpu-usage")) { + max_transaction_cpu_usage = vmap["max-transaction-cpu-usage"].as(); + } + genesis = vmap["genesis"].as(); if (vmap.count("host-map")) { host_map_file = vmap["host-map"].as(); @@ -1184,6 +1190,8 @@ launcher_def::init_genesis () { genesis_from_file.initial_key = public_key_type(bioskey); if (max_block_cpu_usage) genesis_from_file.initial_configuration.max_block_cpu_usage = *max_block_cpu_usage; + if (max_transaction_cpu_usage) + genesis_from_file.initial_configuration.max_transaction_cpu_usage = *max_transaction_cpu_usage; } void diff --git a/tests/Cluster.py b/tests/Cluster.py index e31e0476346..b8041227c3f 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -172,7 +172,9 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append(arg) cmdArr.append("--max-block-cpu-usage") - cmdArr.append(str(2000000)) + cmdArr.append(str(160000000)) + cmdArr.append("--max-transaction-cpu-usage") + cmdArr.append(str(150000000)) # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" @@ -1274,6 +1276,8 @@ def dumpErrorDetails(self): for i in range(0, len(self.nodes)): fileName="etc/eosio/node_%02d/config.ini" % (i) Cluster.dumpErrorDetailImpl(fileName) + fileName="etc/eosio/node_%02d/genesis.json" % (i) + Cluster.dumpErrorDetailImpl(fileName) fileName="var/lib/node_%02d/stderr.txt" % (i) Cluster.dumpErrorDetailImpl(fileName) From 0c5c9e5de0ce5fea0670e6587dfbaf37bb6ca64a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 2 Oct 2018 13:44:36 -0500 Subject: [PATCH 070/161] Added code to allow setting wallet port for some tests. GH #5674 --- tests/TestHelper.py | 7 +++++++ tests/WalletMgr.py | 13 +++++-------- tests/nodeos_forked_chain_test.py | 6 ++++-- tests/nodeos_run_test.py | 7 ++++--- tests/nodeos_under_min_avail_ram.py | 5 +++-- tests/nodeos_voting_test.py | 6 ++++-- 6 files changed, 27 insertions(+), 17 deletions(-) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 6e00645e9dc..471c397beff 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -26,6 +26,7 @@ def add(self, flag, type, help, default, choices=None): class TestHelper(object): LOCAL_HOST="localhost" DEFAULT_PORT=8888 + DEFAULT_WALLET_PORT=9899 @staticmethod # pylint: disable=too-many-branches @@ -70,6 +71,12 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): if "--port" in includeArgs: parser.add_argument("-p", "--port", type=int, help="%s host port" % Utils.EosServerName, default=TestHelper.DEFAULT_PORT) + if "--wallet-host" in includeArgs: + parser.add_argument("--wallet-host", type=str, help="%s host" % Utils.EosWalletName, + default=TestHelper.LOCAL_HOST) + if "--wallet-port" in includeArgs: + parser.add_argument("--wallet-port", type=int, help="%s port" % Utils.EosWalletName, + default=TestHelper.DEFAULT_WALLET_PORT) if "--prod-count" in includeArgs: parser.add_argument("-c", "--prod-count", type=int, help="Per node producer count", default=1) if "--defproducera_prvt_key" in includeArgs: diff --git a/tests/WalletMgr.py b/tests/WalletMgr.py index 084a7e9ca55..8b7e4957277 100644 --- a/tests/WalletMgr.py +++ b/tests/WalletMgr.py @@ -65,16 +65,12 @@ def launch(self): if self.isLocal(): self.port=self.findAvailablePort() + pgrepCmd=Utils.pgrepCmd(Utils.EosWalletName) if Utils.Debug: - portStatus="N/A" portTaken=False if self.isLocal(): - if Utils.arePortsAvailable(self.port): - portStatus="AVAILABLE" - else: - portStatus="NOT AVAILABLE" + if not Utils.arePortsAvailable(self.port): portTaken=True - pgrepCmd=Utils.pgrepCmd(Utils.EosWalletName) psOut=Utils.checkOutput(pgrepCmd.split(), ignoreError=True) if psOut or portTaken: statusMsg="" @@ -95,10 +91,11 @@ def launch(self): time.sleep(2) try: + if Utils.Debug: Utils.Print("Checking if %s launched. %s" % (Utils.EosWalletName, pgrepCmd)) psOut=Utils.checkOutput(pgrepCmd.split()) - if Utils.Debug: Utils.Print("Launched %s. %s - {%s}" % (Utils.EosWalletName, pgrepCmd, psOut)) + if Utils.Debug: Utils.Print("Launched %s. {%s}" % (Utils.EosWalletName, psOut)) except subprocess.CalledProcessError as ex: - Utils.errorExit("Failed to launch the wallet manager on") + Utils.errorExit("Failed to launch the wallet manager") return True diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 36420654dd5..13d2d5cf70e 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -102,7 +102,8 @@ def getMinHeadAndLib(prodNodes): -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"}) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "--p2p-plugin","--wallet-port"}) Utils.Debug=args.v totalProducerNodes=2 totalNonProducerNodes=1 @@ -116,8 +117,9 @@ def getMinHeadAndLib(prodNodes): prodCount=args.prod_count killAll=args.clean_run p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port -walletMgr=WalletMgr(True) +walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill killWallet=not dontKill diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index c50d4cb5dce..c2f59234bd8 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -22,7 +22,7 @@ args = TestHelper.parse_args({"--host","--port","--prod-count","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb" ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" - ,"--sanity-test","--p2p-plugin"}) + ,"--sanity-test","--p2p-plugin","--wallet-port"}) server=args.host port=args.port debug=args.v @@ -38,15 +38,16 @@ killAll=args.clean_run sanityTest=args.sanity_test p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port Utils.Debug=debug localTest=True if server == TestHelper.LOCAL_HOST else False cluster=Cluster(walletd=True, enableMongo=enableMongo, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) -walletMgr=WalletMgr(True) +walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill killWallet=not dontKill -dontBootstrap=sanityTest +dontBootstrap=sanityTest # intent is to limit the scope of the sanity test to just verifying that nodes can be started WalletdName=Utils.EosWalletName ClientName="cleos" diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index e3ca6325338..064296ed282 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -54,7 +54,7 @@ def setName(self, num): # --keep-logs ############################################################### -args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}) +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--wallet-port"}) Utils.Debug=args.v totalNodes=4 cluster=Cluster(walletd=True) @@ -62,8 +62,9 @@ def setName(self, num): keepLogs=args.keep_logs dontKill=args.leave_running killAll=args.clean_run +walletPort=args.wallet_port -walletMgr=WalletMgr(True) +walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill killWallet=not dontKill diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index f709f646a8e..d4781d0eefe 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -140,7 +140,8 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): from core_symbol import CORE_SYMBOL -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"}) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "--p2p-plugin","--wallet-port"}) Utils.Debug=args.v totalNodes=4 cluster=Cluster(walletd=True) @@ -150,8 +151,9 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): prodCount=args.prod_count killAll=args.clean_run p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port -walletMgr=WalletMgr(True) +walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False killEosInstances=not dontKill killWallet=not dontKill From 2d27e5b96f994e9ba0a84a4691a72690f0a2949d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 2 Oct 2018 13:46:34 -0500 Subject: [PATCH 071/161] Changed long_running_tests to use different ports to identify why keosd is not starting on mac. GH #5674 --- tests/CMakeLists.txt | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 4ef0ca047e6..7b5bf7311ed 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -81,20 +81,24 @@ add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) add_test(NAME bnet_nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST bnet_nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_run_check2_lr_test COMMAND tests/nodeos_run_test.py -v --wallet-port 9900 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_run_check2_lr_test PROPERTY LABELS long_running_tests) #add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) #set_property(TEST distributed_transactions_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --wallet-port 9901 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME bnet_nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME bnet_nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST bnet_nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) From 2fbb5129e3dc2dd38166caf24277fa6d617a9830 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 2 Oct 2018 16:29:58 -0400 Subject: [PATCH 072/161] On macOS populate root CAs via system cert store Previously on macOS the HTTPS root CAs there were trusted was loaded from the /private/etc/ssl/cert.pem file. This is 1. Not documented or supported 2. Out of date 3. Doesn't honor distrust of certs 4. Doesn't know of user added certs in the keychain Now, populate the root CAs from the keychain. This solves all of the above. This takes affect for both cleos and the http_client used by producer_plugin for out-of-process keosd signing. Note the behavior for http_client has markedly changed: previously it would not consider any root CAs other then the one the user explicitly provided. Now on macOS and Linux it will consider the system certificate store. This behavior can be reverted if it is controversial. Much of the logic behind the macOS keychain cert loading was taken from golang's library. License included as appropriate. --- CMakeLists.txt | 1 + libraries/fc | 2 +- programs/cleos/httpc.cpp | 10 ++-------- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 06033ab2962..3985b8fc7c2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -238,6 +238,7 @@ install(FILES libraries/softfloat/COPYING.txt DESTINATION ${CMAKE_INSTALL_FULL_D install(FILES libraries/wasm-jit/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wavm) install(FILES libraries/fc/secp256k1/upstream/COPYING DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.secp256k1) install(FILES externals/binaryen/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.binaryen) +install(FILES libraries/fc/src/network/LICENSE.go DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ ) include(installer) diff --git a/libraries/fc b/libraries/fc index aac546b4198..6a83237f9e3 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit aac546b419891ef6644e0d99dba5e8d33f70401d +Subproject commit 6a83237f9e3e71160bbd64d3c87c3418d057624c diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index e2196d2bc9c..6379ee564c6 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -229,14 +230,7 @@ namespace eosio { namespace client { namespace http { } else { //https boost::asio::ssl::context ssl_context(boost::asio::ssl::context::sslv23_client); -#if defined( __APPLE__ ) - //TODO: this is undocumented/not supported; fix with keychain based approach - ssl_context.load_verify_file("/private/etc/ssl/cert.pem"); -#elif defined( _WIN32 ) - EOS_THROW(http_exception, "HTTPS on Windows not supported"); -#else - ssl_context.set_default_verify_paths(); -#endif + fc::add_platform_root_cas_to_context(ssl_context); boost::asio::ssl::stream socket(cp.context->ios, ssl_context); SSL_set_tlsext_host_name(socket.native_handle(), url.server.c_str()); From 910a9cd146fe238e635f9a0bfa45fa163d89c5bb Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 3 Oct 2018 12:35:46 -0400 Subject: [PATCH 073/161] (chainbase sync) ifdef out unused LOCKING variables to squelch warnings --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index 4724baf2095..8ca96ad6b18 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 4724baf2095cdc1bb1722254874b51070adf0e74 +Subproject commit 8ca96ad6b18709d65a7d1f67f8893978f25babcf From df391f0706d59d88eb34003839a5c3386a8049d6 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 3 Oct 2018 12:53:53 -0400 Subject: [PATCH 074/161] Remove unused eosio::sort_names() --- libraries/chain/include/eosio/chain/name.hpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/libraries/chain/include/eosio/chain/name.hpp b/libraries/chain/include/eosio/chain/name.hpp index af5af89ec23..81c13145dde 100644 --- a/libraries/chain/include/eosio/chain/name.hpp +++ b/libraries/chain/include/eosio/chain/name.hpp @@ -91,12 +91,6 @@ namespace eosio { namespace chain { operator unsigned __int128()const { return value; } }; - - inline std::vector sort_names( std::vector&& names ) { - fc::deduplicate(names); - return names; - } - } } // eosio::chain namespace std { From cdbe373494584b50d659e01f2885a1fbdbdb327f Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Thu, 4 Oct 2018 11:49:11 -0400 Subject: [PATCH 075/161] add basic snapshot test, modify block log to support partial logs, fix several bugs that result from that concept --- libraries/chain/authorization_manager.cpp | 8 +- libraries/chain/block_log.cpp | 123 ++++++---- libraries/chain/controller.cpp | 211 +++++++----------- .../chain/include/eosio/chain/block_log.hpp | 14 +- .../chain/include/eosio/chain/controller.hpp | 1 + .../chain/include/eosio/chain/snapshot.hpp | 76 +++++++ libraries/chain/resource_limits.cpp | 8 +- .../testing/include/eosio/testing/tester.hpp | 8 +- libraries/testing/tester.cpp | 10 +- unittests/CMakeLists.txt | 2 +- unittests/snapshot_tests.cpp | 71 +++++- 11 files changed, 345 insertions(+), 187 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index eb84d856cad..61f094d816e 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -56,10 +56,10 @@ namespace eosio { namespace chain { void authorization_manager::read_from_snapshot( const snapshot_reader_ptr& snapshot ) { authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ snapshot->read_section([this]( auto& section ) { - bool done = section.empty(); - while(!done) { - decltype(utils)::create(_db, [§ion]( auto &row ) { - section.read_row(row); + bool more = !section.empty(); + while(more) { + decltype(utils)::create(_db, [§ion, &more]( auto &row ) { + more = section.read_row(row); }); } }); diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index fc96d309354..1167b478570 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -12,7 +12,15 @@ namespace eosio { namespace chain { - const uint32_t block_log::supported_version = 1; + const uint32_t block_log::min_supported_version = 1; + + /** + * History: + * Version 1: complete block log from genesis + * Version 2: adds optional partial block log, cannot be used for replay without snapshot + * this is in the form of an first_block_num that is written immediately after the version + */ + const uint32_t block_log::max_supported_version = 2; namespace detail { class block_log_impl { @@ -26,6 +34,8 @@ namespace eosio { namespace chain { bool block_write; bool index_write; bool genesis_written_to_block_log = false; + uint32_t version = 0; + uint32_t first_block_num = 0; inline void check_block_read() { if (block_write) { @@ -124,14 +134,21 @@ namespace eosio { namespace chain { ilog("Log is nonempty"); my->check_block_read(); my->block_stream.seekg( 0 ); - uint32_t version = 0; - my->block_stream.read( (char*)&version, sizeof(version) ); - EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly with genesis information." ); - EOS_ASSERT( version == block_log::supported_version, block_log_unsupported_version, - "Unsupported version of block log. Block log version is ${version} while code supports version ${supported}", - ("version", version)("supported", block_log::supported_version) ); + my->version = 0; + my->block_stream.read( (char*)&my->version, sizeof(my->version) ); + EOS_ASSERT( my->version > 0, block_log_exception, "Block log was not setup properly" ); + EOS_ASSERT( my->version >= min_supported_version && my->version <= max_supported_version, block_log_unsupported_version, + "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", + ("version", my->version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); + my->genesis_written_to_block_log = true; // Assume it was constructed properly. + if (my->version > 1){ + my->first_block_num = 0; + my->block_stream.read( (char*)&my->first_block_num, sizeof(my->first_block_num) ); + EOS_ASSERT(my->first_block_num > 0, block_log_exception, "Block log is malformed, first recorded block number is 0 but must be greater than or equal to 1"); + } + my->head = read_head(); my->head_id = my->head->id(); @@ -176,11 +193,11 @@ namespace eosio { namespace chain { my->check_index_write(); uint64_t pos = my->block_stream.tellp(); - EOS_ASSERT(my->index_stream.tellp() == sizeof(uint64_t) * (b->block_num() - 1), + EOS_ASSERT(my->index_stream.tellp() == sizeof(uint64_t) * (b->block_num() - my->first_block_num), block_log_append_fail, "Append to index file occuring at wrong position.", ("position", (uint64_t) my->index_stream.tellp()) - ("expected", (b->block_num() - 1) * sizeof(uint64_t))); + ("expected", (b->block_num() - my->first_block_num) * sizeof(uint64_t))); auto data = fc::raw::pack(*b); my->block_stream.write(data.data(), data.size()); my->block_stream.write((char*)&pos, sizeof(pos)); @@ -200,44 +217,50 @@ namespace eosio { namespace chain { my->index_stream.flush(); } - uint64_t block_log::reset_to_genesis( const genesis_state& gs, const signed_block_ptr& genesis_block ) { - if( my->block_stream.is_open() ) + void block_log::reset( const genesis_state& gs, const signed_block_ptr& first_block, uint32_t first_block_num ) { + if (my->block_stream.is_open()) my->block_stream.close(); - if( my->index_stream.is_open() ) + if (my->index_stream.is_open()) my->index_stream.close(); - fc::remove_all( my->block_file ); - fc::remove_all( my->index_file ); + fc::remove_all(my->block_file); + fc::remove_all(my->index_file); my->block_stream.open(my->block_file.generic_string().c_str(), LOG_WRITE); my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); my->block_write = true; my->index_write = true; - auto data = fc::raw::pack( gs ); - uint32_t version = 0; // version of 0 is invalid; it indicates that the genesis was not properly written to the block log - my->block_stream.write( (char*)&version, sizeof(version) ); - my->block_stream.write( data.data(), data.size() ); + auto data = fc::raw::pack(gs); + my->version = 0; // version of 0 is invalid; it indicates that the genesis was not properly written to the block log + my->first_block_num = first_block_num; + my->block_stream.write((char*)&my->version, sizeof(my->version)); + my->block_stream.write((char*)&my->first_block_num, sizeof(my->first_block_num)); + my->block_stream.write(data.data(), data.size()); my->genesis_written_to_block_log = true; - auto ret = append( genesis_block ); + // append a totem to indicate the division between blocks and header + auto totem = npos; + my->block_stream.write((char*)&totem, sizeof(totem)); + + if (first_block) { + auto ret = append(first_block); + } auto pos = my->block_stream.tellp(); my->block_stream.close(); my->block_stream.open(my->block_file.generic_string().c_str(), std::ios::in | std::ios::out | std::ios::binary ); // Bypass append-only writing just once - static_assert( block_log::supported_version > 0, "a version number of zero is not supported" ); - version = block_log::supported_version; + static_assert( block_log::max_supported_version > 0, "a version number of zero is not supported" ); + my->version = block_log::max_supported_version; my->block_stream.seekp( 0 ); - my->block_stream.write( (char*)&version, sizeof(version) ); // Finally write actual version to disk. + my->block_stream.write( (char*)&my->version, sizeof(my->version) ); my->block_stream.seekp( pos ); flush(); my->block_write = false; my->check_block_write(); // Reset to append-only writing. - - return ret; } std::pair block_log::read_block(uint64_t pos)const { @@ -266,10 +289,9 @@ namespace eosio { namespace chain { uint64_t block_log::get_block_pos(uint32_t block_num) const { my->check_index_read(); - - if (!(my->head && block_num <= block_header::num_from_id(my->head_id) && block_num > 0)) + if (!(my->head && block_num <= block_header::num_from_id(my->head_id) && block_num >= my->first_block_num)) return npos; - my->index_stream.seekg(sizeof(uint64_t) * (block_num - 1)); + my->index_stream.seekg(sizeof(uint64_t) * (block_num - my->first_block_num)); uint64_t pos; my->index_stream.read((char*)&pos, sizeof(pos)); return pos; @@ -287,13 +309,21 @@ namespace eosio { namespace chain { my->block_stream.seekg(-sizeof(pos), std::ios::end); my->block_stream.read((char*)&pos, sizeof(pos)); - return read_block(pos).first; + if (pos != npos) { + return read_block(pos).first; + } else { + return {}; + } } const signed_block_ptr& block_log::head()const { return my->head; } + uint32_t block_log::first_block_num() const { + return my->first_block_num; + } + void block_log::construct_index() { ilog("Reconstructing Block Log Index..."); my->index_stream.close(); @@ -308,7 +338,12 @@ namespace eosio { namespace chain { my->block_stream.read((char*)&end_pos, sizeof(end_pos)); signed_block tmp; - uint64_t pos = 4; // Skip version which should have already been checked. + uint64_t pos = 0; + if (my->version == 1) { + pos = 4; // Skip version which should have already been checked. + } else { + pos = 8; // Skip version and first block offset which should have already been checked + } my->block_stream.seekg(pos); genesis_state gs; @@ -361,16 +396,23 @@ namespace eosio { namespace chain { uint32_t version = 0; old_block_stream.read( (char*)&version, sizeof(version) ); - EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly with genesis information." ); - EOS_ASSERT( version == block_log::supported_version, block_log_unsupported_version, - "Unsupported version of block log. Block log version is ${version} while code supports version ${supported}", - ("version", version)("supported", block_log::supported_version) ); + EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly" ); + EOS_ASSERT( version >= min_supported_version && version <= max_supported_version, block_log_unsupported_version, + "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", + ("version", version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); + + new_block_stream.write( (char*)&version, sizeof(version) ); + + uint32_t first_block_num = 1; + if (version != 1) { + old_block_stream.read ( (char*)&first_block_num, sizeof(first_block_num) ); + new_block_stream.write( (char*)&first_block_num, sizeof(first_block_num) ); + } genesis_state gs; fc::raw::unpack(old_block_stream, gs); auto data = fc::raw::pack( gs ); - new_block_stream.write( (char*)&version, sizeof(version) ); new_block_stream.write( data.data(), data.size() ); std::exception_ptr except_ptr; @@ -472,10 +514,15 @@ namespace eosio { namespace chain { uint32_t version = 0; block_stream.read( (char*)&version, sizeof(version) ); - EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly with genesis information." ); - EOS_ASSERT( version == block_log::supported_version, block_log_unsupported_version, - "Unsupported version of block log. Block log version is ${version} while code supports version ${supported}", - ("version", version)("supported", block_log::supported_version) ); + EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly." ); + EOS_ASSERT( version >= min_supported_version && version <= max_supported_version, block_log_unsupported_version, + "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", + ("version", version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); + + uint32_t first_block_num = 1; + if (version != 1) { + block_stream.read ( (char*)&first_block_num, sizeof(first_block_num) ); + } genesis_state gs; fc::raw::unpack(block_stream, gs); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 1555f5946a5..3b72a44377a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -236,21 +236,31 @@ struct controller_impl { blog.read_head(); const auto& log_head = blog.head(); - EOS_ASSERT( log_head, block_log_exception, "block log head can not be found" ); - auto lh_block_num = log_head->block_num(); + bool append_to_blog = false; + if (!log_head) { + if (s->block) { + EOS_ASSERT(s->block_num == blog.first_block_num(), block_log_exception, "block log has no blocks and is appending the wrong first block. Expected ${expecgted}, but received: ${actual}", + ("expected", blog.first_block_num())("actual", s->block_num)); + append_to_blog = true; + } else { + EOS_ASSERT(s->block_num == blog.first_block_num() - 1, block_log_exception, "block log has no blocks and is not properly set up to start after the snapshot"); + } + } else { + auto lh_block_num = log_head->block_num(); + if (s->block_num > lh_block_num) { + EOS_ASSERT(s->block_num - 1 == lh_block_num, unlinkable_block_exception, "unlinkable block", ("s->block_num", s->block_num)("lh_block_num", lh_block_num)); + EOS_ASSERT(s->block->previous == log_head->id(), unlinkable_block_exception, "irreversible doesn't link to block log head"); + append_to_blog = true; + } + } + db.commit( s->block_num ); - if( s->block_num <= lh_block_num ) { -// edump((s->block_num)("double call to on_irr")); -// edump((s->block_num)(s->block->previous)(log_head->id())); - return; + if( append_to_blog ) { + blog.append(s->block); } - EOS_ASSERT( s->block_num - 1 == lh_block_num, unlinkable_block_exception, "unlinkable block", ("s->block_num",s->block_num)("lh_block_num", lh_block_num) ); - EOS_ASSERT( s->block->previous == log_head->id(), unlinkable_block_exception, "irreversible doesn't link to block log head" ); - blog.append(s->block); - const auto& ubi = reversible_blocks.get_index(); auto objitr = ubi.begin(); while( objitr != ubi.end() && objitr->blocknum <= s->block_num ) { @@ -258,13 +268,54 @@ struct controller_impl { objitr = ubi.begin(); } - if ( read_mode == db_read_mode::IRREVERSIBLE ) { - apply_block( s->block, controller::block_status::complete ); - fork_db.mark_in_current_chain( s, true ); - fork_db.set_validity( s, true ); - head = s; + // the "head" block when a snapshot is loaded is virtual and has no block data, all of its effects + // should already have been loaded from the snapshot so, it cannot be applied + if (s->block) { + if (read_mode == db_read_mode::IRREVERSIBLE) { + apply_block(s->block, controller::block_status::complete); + fork_db.mark_in_current_chain(s, true); + fork_db.set_validity(s, true); + head = s; + } + emit(self.irreversible_block, s); } - emit( self.irreversible_block, s ); + } + + void replay() { + auto blog_head = blog.read_head(); + auto blog_head_time = blog_head->timestamp.to_time_point(); + replaying = true; + replay_head_time = blog_head_time; + ilog( "existing block log, attempting to replay ${n} blocks", ("n",blog_head->block_num()) ); + + auto start = fc::time_point::now(); + while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { + self.push_block( next, controller::block_status::irreversible ); + if( next->block_num() % 100 == 0 ) { + std::cerr << std::setw(10) << next->block_num() << " of " << blog_head->block_num() <<"\r"; + } + } + std::cerr<< "\n"; + ilog( "${n} blocks replayed", ("n", head->block_num) ); + + // if the irreverible log is played without undo sessions enabled, we need to sync the + // revision ordinal to the appropriate expected value here. + if( self.skip_db_sessions( controller::block_status::irreversible ) ) + db.set_revision(head->block_num); + + int rev = 0; + while( auto obj = reversible_blocks.find(head->block_num+1) ) { + ++rev; + self.push_block( obj->get_block(), controller::block_status::validated ); + } + + ilog( "${n} reversible blocks replayed", ("n",rev) ); + auto end = fc::time_point::now(); + ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", + ("n", head->block_num)("duration", (end-start).count()/1000000) + ("mspb", ((end-start).count()/1000.0)/head->block_num) ); + replaying = false; + replay_head_time.reset(); } void init(const snapshot_reader_ptr& snapshot) { @@ -274,47 +325,21 @@ struct controller_impl { read_from_snapshot(snapshot); + auto end = blog.read_head(); + if( !end ) { + blog.reset(conf.genesis, signed_block_ptr(), head->block_num + 1); + } else if ( end->block_num() > head->block_num) { + replay(); + } + } else if( !head ) { initialize_fork_db(); // set head to genesis state auto end = blog.read_head(); if( end && end->block_num() > 1 ) { - auto end_time = end->timestamp.to_time_point(); - replaying = true; - replay_head_time = end_time; - ilog( "existing block log, attempting to replay ${n} blocks", ("n",end->block_num()) ); - - auto start = fc::time_point::now(); - while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { - self.push_block( next, controller::block_status::irreversible ); - if( next->block_num() % 100 == 0 ) { - std::cerr << std::setw(10) << next->block_num() << " of " << end->block_num() <<"\r"; - } - } - std::cerr<< "\n"; - ilog( "${n} blocks replayed", ("n", head->block_num) ); - - // if the irreverible log is played without undo sessions enabled, we need to sync the - // revision ordinal to the appropriate expected value here. - if( self.skip_db_sessions( controller::block_status::irreversible ) ) - db.set_revision(head->block_num); - - int rev = 0; - while( auto obj = reversible_blocks.find(head->block_num+1) ) { - ++rev; - self.push_block( obj->get_block(), controller::block_status::validated ); - } - - ilog( "${n} reversible blocks replayed", ("n",rev) ); - auto end = fc::time_point::now(); - ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", - ("n", head->block_num)("duration", (end-start).count()/1000000) - ("mspb", ((end-start).count()/1000.0)/head->block_num) ); - replaying = false; - replay_head_time.reset(); - + replay(); } else if( !end ) { - blog.reset_to_genesis( conf.genesis, head->block ); + blog.reset( conf.genesis, head->block ); } } @@ -388,70 +413,6 @@ struct controller_impl { return enc.result(); } - struct json_snapshot_writer : public snapshot_writer { - json_snapshot_writer() - : snapshot(fc::mutable_variant_object()("sections", fc::variants())) - { - - } - - void write_start_section( const string& section_name ) override { - current_rows.clear(); - current_section_name = section_name; - } - - void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override { - current_rows.emplace_back(row_writer.to_variant()); - } - - void write_end_section( ) override { - snapshot["sections"].get_array().emplace_back(mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); - } - - fc::mutable_variant_object snapshot; - string current_section_name; - fc::variants current_rows; - }; - - struct json_snapshot_reader : public snapshot_reader { - json_snapshot_reader(const fc::variant& snapshot) - :snapshot(snapshot) - ,cur_row(0) - { - - } - - void set_section( const string& section_name ) override { - const auto& sections = snapshot["sections"].get_array(); - for( const auto& section: sections ) { - if (section["name"].as_string() == section_name) { - cur_section = §ion.get_object(); - break; - } - } - } - - bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override { - const auto& rows = (*cur_section)["rows"].get_array(); - row_reader.provide(rows.at(cur_row++)); - return cur_row < rows.size(); - } - - bool empty ( ) override { - const auto& rows = (*cur_section)["rows"].get_array(); - return rows.empty(); - } - - void clear_section() override { - cur_section = nullptr; - cur_row = 0; - } - - const fc::variant& snapshot; - const fc::variant_object* cur_section; - int cur_row; - }; - void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { snapshot->write_section([this]( auto §ion ){ section.template add_row(*fork_db.head()); @@ -469,13 +430,6 @@ struct controller_impl { resource_limits.add_to_snapshot(snapshot); } - void print_json_snapshot() const { - json_snapshot_writer snapshot; - auto snapshot_ptr = shared_ptr(&snapshot, [](snapshot_writer *) {}); - add_to_snapshot(snapshot_ptr); - std::cerr << fc::json::to_pretty_string(snapshot.snapshot) << std::endl; - } - void read_from_snapshot( const snapshot_reader_ptr& snapshot ) { snapshot->read_section([this]( auto §ion ){ block_header_state head_header_state; @@ -490,10 +444,10 @@ struct controller_impl { controller_index_set::walk_indices([this, &snapshot]( auto utils ){ snapshot->read_section([this]( auto& section ) { - bool done = section.empty(); - while(!done) { - decltype(utils)::create(db, [§ion]( auto &row ) { - section.read_row(row); + bool more = !section.empty(); + while(more) { + decltype(utils)::create(db, [§ion, &more]( auto &row ) { + more = section.read_row(row); }); } }); @@ -1749,6 +1703,11 @@ sha256 controller::calculate_integrity_hash()const { try { return my->calculate_integrity_hash(); } FC_LOG_AND_RETHROW() } +void controller::write_snapshot( const snapshot_writer_ptr& snapshot ) const { + EOS_ASSERT( !my->pending, block_validate_exception, "cannot take a consistent snapshot with a pending block" ); + return my->add_to_snapshot(snapshot); +} + void controller::pop_block() { my->pop_block(); } diff --git a/libraries/chain/include/eosio/chain/block_log.hpp b/libraries/chain/include/eosio/chain/block_log.hpp index 95560f94789..24ff0ad2835 100644 --- a/libraries/chain/include/eosio/chain/block_log.hpp +++ b/libraries/chain/include/eosio/chain/block_log.hpp @@ -11,10 +11,10 @@ namespace eosio { namespace chain { namespace detail { class block_log_impl; } - /* The block log is an external append only log of the blocks. Blocks should only be written - * to the log after they irreverisble as the log is append only. The log is a doubly linked - * list of blocks. There is a secondary index file of only block positions that enables O(1) - * random access lookup by block number. + /* The block log is an external append only log of the blocks with a header. Blocks should only + * be written to the log after they irreverisble as the log is append only. The log is a doubly + * linked list of blocks. There is a secondary index file of only block positions that enables + * O(1) random access lookup by block number. * * +---------+----------------+---------+----------------+-----+------------+-------------------+ * | Block 1 | Pos of Block 1 | Block 2 | Pos of Block 2 | ... | Head Block | Pos of Head Block | @@ -44,7 +44,7 @@ namespace eosio { namespace chain { uint64_t append(const signed_block_ptr& b); void flush(); - uint64_t reset_to_genesis( const genesis_state& gs, const signed_block_ptr& genesis_block ); + void reset( const genesis_state& gs, const signed_block_ptr& genesis_block, uint32_t first_block_num = 1 ); std::pair read_block(uint64_t file_pos)const; signed_block_ptr read_block_by_num(uint32_t block_num)const; @@ -58,10 +58,12 @@ namespace eosio { namespace chain { uint64_t get_block_pos(uint32_t block_num) const; signed_block_ptr read_head()const; const signed_block_ptr& head()const; + uint32_t first_block_num() const; static const uint64_t npos = std::numeric_limits::max(); - static const uint32_t supported_version; + static const uint32_t min_supported_version; + static const uint32_t max_supported_version; static fc::path repair_log( const fc::path& data_dir, uint32_t truncate_at_block = 0 ); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 41134a90799..753d460d716 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -206,6 +206,7 @@ namespace eosio { namespace chain { block_id_type get_block_id_for_num( uint32_t block_num )const; sha256 calculate_integrity_hash()const; + void write_snapshot( const snapshot_writer_ptr& snapshot )const; void check_contract_list( account_name code )const; void check_action_list( account_name code, action_name action )const; diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 2904dae50be..f16ea747025 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -90,6 +90,8 @@ namespace eosio { namespace chain { write_end_section(); } + virtual ~snapshot_writer(){}; + protected: virtual void write_start_section( const std::string& section_name ) = 0; virtual void write_row( const detail::abstract_snapshot_row_writer& row_writer ) = 0; @@ -167,6 +169,8 @@ namespace eosio { namespace chain { clear_section(); } + virtual ~snapshot_reader(){}; + protected: virtual void set_section( const std::string& section_name ) = 0; virtual bool read_row( detail::abstract_snapshot_row_reader& row_reader ) = 0; @@ -176,4 +180,76 @@ namespace eosio { namespace chain { using snapshot_reader_ptr = std::shared_ptr; + class variant_snapshot_writer : public snapshot_writer { + public: + variant_snapshot_writer() + : snapshot(fc::mutable_variant_object()("sections", fc::variants())) + { + + } + + void write_start_section( const std::string& section_name ) override { + current_rows.clear(); + current_section_name = section_name; + } + + void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override { + current_rows.emplace_back(row_writer.to_variant()); + } + + void write_end_section( ) override { + snapshot["sections"].get_array().emplace_back(fc::mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); + } + + fc::variant finalize() { + fc::variant result = std::move(snapshot); + return result; + } + private: + fc::mutable_variant_object snapshot; + std::string current_section_name; + fc::variants current_rows; + }; + + class variant_snapshot_reader : public snapshot_reader { + public: + variant_snapshot_reader(const fc::variant& snapshot) + :snapshot(snapshot) + ,cur_row(0) + { + + } + + void set_section( const string& section_name ) override { + const auto& sections = snapshot["sections"].get_array(); + for( const auto& section: sections ) { + if (section["name"].as_string() == section_name) { + cur_section = §ion.get_object(); + break; + } + } + } + + bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override { + const auto& rows = (*cur_section)["rows"].get_array(); + row_reader.provide(rows.at(cur_row++)); + return cur_row < rows.size(); + } + + bool empty ( ) override { + const auto& rows = (*cur_section)["rows"].get_array(); + return rows.empty(); + } + + void clear_section() override { + cur_section = nullptr; + cur_row = 0; + } + + private: + const fc::variant& snapshot; + const fc::variant_object* cur_section; + int cur_row; + }; + }} diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 1a805e8c066..3bfaecb61e0 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -86,10 +86,10 @@ void resource_limits_manager::add_to_snapshot( const snapshot_writer_ptr& snapsh void resource_limits_manager::read_from_snapshot( const snapshot_reader_ptr& snapshot ) { resource_index_set::walk_indices([this, &snapshot]( auto utils ){ snapshot->read_section([this]( auto& section ) { - bool done = section.empty(); - while(!done) { - decltype(utils)::create(_db, [§ion]( auto &row ) { - section.read_row(row); + bool more = !section.empty(); + while(more) { + decltype(utils)::create(_db, [§ion, &more]( auto &row ) { + more = section.read_row(row); }); } }); diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 772ca726e31..9a4f4094330 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -83,10 +83,10 @@ namespace eosio { namespace testing { virtual ~base_tester() {}; void init(bool push_genesis = true, db_read_mode read_mode = db_read_mode::SPECULATIVE); - void init(controller::config config); + void init(controller::config config, const snapshot_reader_ptr& snapshot = nullptr); void close(); - void open(); + void open( const snapshot_reader_ptr& snapshot ); bool is_same_chain( base_tester& other ); virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; @@ -264,6 +264,10 @@ namespace eosio { namespace testing { return true; } + const controller::config& get_config() const { + return cfg; + } + protected: signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false, uint32_t skip_flag = 0 ); void _start_block(fc::time_point block_time); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 69c9d8f47e4..b116d1a4969 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -102,16 +102,16 @@ namespace eosio { namespace testing { cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; } - open(); + open(nullptr); if (push_genesis) push_genesis_block(); } - void base_tester::init(controller::config config) { + void base_tester::init(controller::config config, const snapshot_reader_ptr& snapshot) { cfg = config; - open(); + open(snapshot); } @@ -121,10 +121,10 @@ namespace eosio { namespace testing { } - void base_tester::open() { + void base_tester::open( const snapshot_reader_ptr& snapshot) { control.reset( new controller(cfg) ); control->add_indices(); - control->startup(); + control->startup(snapshot); chain_transactions.clear(); control->accepted_block.connect([this]( const block_state_ptr& block_state ){ FC_ASSERT( block_state->block ); diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index c21c9597312..f0ca7fe788d 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -28,7 +28,7 @@ target_include_directories( unit_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/include ) -add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig payloadless tic_tac_toe deferred_test) +add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig payloadless tic_tac_toe deferred_test snapshot_test) #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index e5f6f09a5b9..27f63abdda4 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -6,17 +6,86 @@ #include #include +#include + +#include +#include + + using namespace eosio; using namespace testing; using namespace chain; +class snapshotted_tester : public base_tester { +public: + snapshotted_tester(controller::config config, const snapshot_reader_ptr& snapshot, int ordinal) { + FC_ASSERT(config.blocks_dir.filename().generic_string() != "." + && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); + + controller::config copied_config = config; + copied_config.blocks_dir = + config.blocks_dir.parent_path() / std::to_string(ordinal).append(config.blocks_dir.filename().generic_string()); + copied_config.state_dir = + config.state_dir.parent_path() / std::to_string(ordinal).append(config.state_dir.filename().generic_string()); + + init(copied_config, snapshot); + } + + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + return _produce_block(skip_time, false, skip_flag); + } + + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + control->abort_block(); + return _produce_block(skip_time, true, skip_flag); + } + + bool validate() { return true; } +}; + BOOST_AUTO_TEST_SUITE(snapshot_tests) BOOST_AUTO_TEST_CASE(test_multi_index_snapshot) { - tester main; + tester chain; + + chain.create_account(N(snapshot)); + chain.produce_blocks(1); + chain.set_code(N(snapshot), snapshot_test_wast); + chain.set_abi(N(snapshot), snapshot_test_abi); + chain.produce_blocks(1); + chain.control->abort_block(); + + + static const int generation_count = 20; + std::list sub_testers; + + for (int generation = 0; generation < generation_count; generation++) { + // create a new snapshot child + variant_snapshot_writer writer; + auto writer_p = std::shared_ptr(&writer, [](snapshot_writer *){}); + chain.control->write_snapshot(writer_p); + auto snapshot = writer.finalize(); + + // create a new child at this snapshot + sub_testers.emplace_back(chain.get_config(), std::make_shared(snapshot), generation); + + // increment the test contract + + // produce block + auto new_block = chain.produce_block(); + + // undo the auto-pending from tester + chain.control->abort_block(); + auto integrity_value = chain.control->calculate_integrity_hash(); + // push that block to all sub testers and validate the integrity of the database after it. + for (auto& other: sub_testers) { + other.push_block(new_block); + BOOST_REQUIRE_EQUAL(integrity_value.str(), other.control->calculate_integrity_hash().str()); + } + } } BOOST_AUTO_TEST_SUITE_END() From cb903a5cab87ffaac6732014196a6345206f13ac Mon Sep 17 00:00:00 2001 From: William LeGate Date: Thu, 4 Oct 2018 17:06:04 -0700 Subject: [PATCH 076/161] Adds reference to ZeroMQ community plugin --- plugins/COMMUNITY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index 9d568adc9a1..5b7a5eb5d1d 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -10,6 +10,7 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Kafka | https://github.com/TP-Lab/kafka_plugin | | SQL | https://github.com/asiniscalchi/eosio_sql_plugin | | ElasticSearch | https://github.com/EOSLaoMao/elasticsearch_plugin | +| ZeroMQ | https://github.com/cc32d9/eos_zmq_plugin | ## DISCLAIMER: From 53e5e2f67983b140e465be1121e3a7a7739d4d27 Mon Sep 17 00:00:00 2001 From: stephen Date: Fri, 5 Oct 2018 09:41:48 +0900 Subject: [PATCH 077/161] introduce eosio_mysql_plugin by eosBLACK team --- plugins/COMMUNITY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index 9d568adc9a1..3408168ef3d 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -10,6 +10,7 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Kafka | https://github.com/TP-Lab/kafka_plugin | | SQL | https://github.com/asiniscalchi/eosio_sql_plugin | | ElasticSearch | https://github.com/EOSLaoMao/elasticsearch_plugin | +| MySQL | https://github.com/eosBLACK/eosio_mysqldb_plugin | ## DISCLAIMER: From 10d2929b67bc12ba893781d478a03f887c275fdc Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 5 Oct 2018 10:20:58 -0500 Subject: [PATCH 078/161] Added program to printout binary block log in json format. GH #5656 --- programs/CMakeLists.txt | 1 + programs/eosio-blocklog/CMakeLists.txt | 25 ++++ programs/eosio-blocklog/main.cpp | 158 +++++++++++++++++++++++++ 3 files changed, 184 insertions(+) create mode 100644 programs/eosio-blocklog/CMakeLists.txt create mode 100644 programs/eosio-blocklog/main.cpp diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 3457b5679b2..b3a656c3b10 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -3,3 +3,4 @@ add_subdirectory( cleos ) add_subdirectory( keosd ) add_subdirectory( eosio-launcher ) add_subdirectory( eosio-abigen ) +add_subdirectory( eosio-blocklog ) diff --git a/programs/eosio-blocklog/CMakeLists.txt b/programs/eosio-blocklog/CMakeLists.txt new file mode 100644 index 00000000000..b883e493f85 --- /dev/null +++ b/programs/eosio-blocklog/CMakeLists.txt @@ -0,0 +1,25 @@ +add_executable( eosio-blocklog main.cpp ) + +if( UNIX AND NOT APPLE ) + set(rt_library rt ) +endif() + +find_package( Gperftools QUIET ) +if( GPERFTOOLS_FOUND ) + message( STATUS "Found gperftools; compiling eosio-blocklog with TCMalloc") + list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) +endif() + +target_include_directories(eosio-blocklog PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) + +target_link_libraries( eosio-blocklog + PRIVATE appbase + PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + +install( TARGETS + eosio-blocklog + + RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} +) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp new file mode 100644 index 00000000000..20c99f17409 --- /dev/null +++ b/programs/eosio-blocklog/main.cpp @@ -0,0 +1,158 @@ +/** + * @file + * @copyright defined in eosio/LICENSE.txt + */ +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +using namespace eosio::chain; +namespace bfs = boost::filesystem; +namespace bpo = boost::program_options; +using bpo::options_description; +using bpo::variables_map; + +struct blocklog { + blocklog() + {} + + void read_log(); + void set_program_options(options_description& cli); + void initialize(const variables_map& options); + + bfs::path blocks_dir; + bfs::path output_file; + uint32_t first_block; + uint32_t last_block; + bool no_pretty_print; +}; + +void blocklog::read_log() { + block_log block_logger(blocks_dir); + const auto end = block_logger.read_head(); + EOS_ASSERT( end, block_log_exception, "No blocks found in block log" ); + EOS_ASSERT( end->block_num() > 1, block_log_exception, "Only one block found in block log" ); + + auto end_time = end->timestamp.to_time_point(); + ilog( "existing block log with ${n} blocks", ("n",end->block_num()) ); + + std::ofstream output_blocks; + std::ostream* out; + if (!output_file.empty()) { + output_blocks.open(output_file.generic_string().c_str()); + if (output_blocks.fail()) { + std::ostringstream ss; + ss << "Unable to open file '" << output_file.string() << "'"; + throw std::runtime_error(ss.str()); + } + out = &output_blocks; + } + else + out = &std::cout; + + uint32_t block_num = (first_block < 1) ? 1 : first_block; + const uint32_t end_block = (last_block < end->block_num() ? last_block : end->block_num()) + 1; + signed_block_ptr next; + fc::variant pretty_output; + const fc::microseconds deadline = fc::seconds(10); + while((block_num < end_block) && (next = block_logger.read_block_by_num( block_num ))) { + abi_serializer::to_variant(*next, + pretty_output, + []( account_name n ) { return optional(); }, + deadline); + const auto block_id = next->id(); + const uint32_t ref_block_prefix = block_id._hash[1]; + const auto enhanced_object = fc::mutable_variant_object + ("block_num",next->block_num()) + ("id", block_id) + ("ref_block_prefix", ref_block_prefix) + (pretty_output.get_object()); + fc::variant v(std::move(enhanced_object)); + if (no_pretty_print) + fc::json::to_stream(*out, v, fc::json::stringify_large_ints_and_doubles); + else + *out << fc::json::to_pretty_string(v) << "\n"; + + ++block_num; + } +} + +void blocklog::set_program_options(options_description& cli) +{ + cli.add_options() + ("blocks-dir", bpo::value()->default_value("blocks"), + "the location of the blocks directory (absolute path or relative to the current directory)") + ("output-file,o", bpo::value(), + "the file to write the block log output to (absolute or relative path). If not specified then output is to stdout.") + ("first", bpo::value(&first_block)->default_value(1), + "the first block number to log") + ("last", bpo::value(&last_block)->default_value(std::numeric_limits::max()), + "the last block number (inclusive) to log") + ("no-pretty-print", bpo::bool_switch(&no_pretty_print)->default_value(false), + "Do not pretty print the output. Useful if piping to jq to improve performance.") + ("help", "Print this help message and exit.") + ; + +} + +void blocklog::initialize(const variables_map& options) { + try { + auto bld = options.at( "blocks-dir" ).as(); + if( bld.is_relative()) + blocks_dir = bfs::current_path() / bld; + else + blocks_dir = bld; + + if (options.count( "output-file" )) { + bld = options.at( "output-file" ).as(); + if( bld.is_relative()) + output_file = bfs::current_path() / bld; + else + output_file = bld; + } + } FC_LOG_AND_RETHROW() + +} + + +int main(int argc, char** argv) +{ + std::ios::sync_with_stdio(false); // for potential performance boost for large block log files + options_description cli ("eosio-blocklog command line options"); + try { + blocklog blog; + blog.set_program_options(cli); + variables_map vmap; + bpo::store(bpo::parse_command_line(argc, argv, cli), vmap); + bpo::notify(vmap); + if (vmap.count("help") > 0) { + cli.print(std::cerr); + return 0; + } + blog.initialize(vmap); + blog.read_log(); + } catch( const fc::exception& e ) { + elog( "${e}", ("e", e.to_detail_string())); + return -1; + } catch( const boost::exception& e ) { + elog("${e}", ("e",boost::diagnostic_information(e))); + return -1; + } catch( const std::exception& e ) { + elog("${e}", ("e",e.what())); + return -1; + } catch( ... ) { + elog("unknown exception"); + return -1; + } + + return 0; +} From 0d3f26424c54dfe9fc835bb10390e352b80f9aef Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 5 Oct 2018 15:29:12 -0400 Subject: [PATCH 079/161] add contract state manipulation to the test, add test for replaying from a snapshot, fix bugs that bubbled up as a result --- contracts/snapshot_test/snapshot_test.cpp | 1 - libraries/chain/block_log.cpp | 6 ++ unittests/snapshot_tests.cpp | 84 ++++++++++++++++++++++- 3 files changed, 87 insertions(+), 4 deletions(-) diff --git a/contracts/snapshot_test/snapshot_test.cpp b/contracts/snapshot_test/snapshot_test.cpp index a63d05643b4..0ef6939f07e 100644 --- a/contracts/snapshot_test/snapshot_test.cpp +++ b/contracts/snapshot_test/snapshot_test.cpp @@ -56,7 +56,6 @@ namespace snapshot_test { } else { data.modify( current, self, [&]( auto& r ) { - r.id += value; r.index_f64 += value; r.index_f128 += value; r.index_i64 += value; diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 1167b478570..9f86b0e7de5 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -349,6 +349,12 @@ namespace eosio { namespace chain { genesis_state gs; fc::raw::unpack(my->block_stream, gs); + // skip the totem + if (my->version > 1) { + uint64_t totem; + my->block_stream.read((char*) &totem, sizeof(totem)); + } + while( pos < end_pos ) { fc::raw::unpack(my->block_stream, tmp); my->block_stream.read((char*)&pos, sizeof(pos)); diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index 27f63abdda4..4389edf2d78 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -31,6 +31,24 @@ class snapshotted_tester : public base_tester { init(copied_config, snapshot); } + snapshotted_tester(controller::config config, const snapshot_reader_ptr& snapshot, int ordinal, int copy_block_log_from_ordinal) { + FC_ASSERT(config.blocks_dir.filename().generic_string() != "." + && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); + + controller::config copied_config = config; + copied_config.blocks_dir = + config.blocks_dir.parent_path() / std::to_string(ordinal).append(config.blocks_dir.filename().generic_string()); + copied_config.state_dir = + config.state_dir.parent_path() / std::to_string(ordinal).append(config.state_dir.filename().generic_string()); + + // create a copy of the desired block log and reversible + auto block_log_path = config.blocks_dir.parent_path() / std::to_string(copy_block_log_from_ordinal).append(config.blocks_dir.filename().generic_string()); + fc::create_directories(copied_config.blocks_dir); + fc::copy(block_log_path / "blocks.log", copied_config.blocks_dir / "blocks.log"); + fc::copy(block_log_path / config::reversible_blocks_dir_name, copied_config.blocks_dir / config::reversible_blocks_dir_name ); + + init(copied_config, snapshot); + } signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { return _produce_block(skip_time, false, skip_flag); } @@ -45,7 +63,7 @@ class snapshotted_tester : public base_tester { BOOST_AUTO_TEST_SUITE(snapshot_tests) -BOOST_AUTO_TEST_CASE(test_multi_index_snapshot) +BOOST_AUTO_TEST_CASE(test_exhaustive_snapshot) { tester chain; @@ -56,8 +74,7 @@ BOOST_AUTO_TEST_CASE(test_multi_index_snapshot) chain.produce_blocks(1); chain.control->abort_block(); - - static const int generation_count = 20; + static const int generation_count = 8; std::list sub_testers; for (int generation = 0; generation < generation_count; generation++) { @@ -71,6 +88,9 @@ BOOST_AUTO_TEST_CASE(test_multi_index_snapshot) sub_testers.emplace_back(chain.get_config(), std::make_shared(snapshot), generation); // increment the test contract + chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() + ( "value", 1 ) + ); // produce block auto new_block = chain.produce_block(); @@ -88,4 +108,62 @@ BOOST_AUTO_TEST_CASE(test_multi_index_snapshot) } } +BOOST_AUTO_TEST_CASE(test_replay_over_snapshot) +{ + tester chain; + + chain.create_account(N(snapshot)); + chain.produce_blocks(1); + chain.set_code(N(snapshot), snapshot_test_wast); + chain.set_abi(N(snapshot), snapshot_test_abi); + chain.produce_blocks(1); + chain.control->abort_block(); + + static const int pre_snapshot_block_count = 12; + static const int post_snapshot_block_count = 12; + + for (int itr = 0; itr < pre_snapshot_block_count; itr++) { + // increment the contract + chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() + ( "value", 1 ) + ); + + // produce block + chain.produce_block(); + } + + chain.control->abort_block(); + auto expected_pre_integrity_hash = chain.control->calculate_integrity_hash(); + + // create a new snapshot child + variant_snapshot_writer writer; + auto writer_p = std::shared_ptr(&writer, [](snapshot_writer *){}); + chain.control->write_snapshot(writer_p); + auto snapshot = writer.finalize(); + + // create a new child at this snapshot + snapshotted_tester snap_chain(chain.get_config(), std::make_shared(snapshot), 1); + BOOST_REQUIRE_EQUAL(expected_pre_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); + + // push more blocks to build up a block log + for (int itr = 0; itr < post_snapshot_block_count; itr++) { + // increment the contract + chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() + ( "value", 1 ) + ); + + // produce & push block + snap_chain.push_block(chain.produce_block()); + } + + // verify the hash at the end + chain.control->abort_block(); + auto expected_post_integrity_hash = chain.control->calculate_integrity_hash(); + BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); + + // replay the block log from the snapshot child, from the snapshot + snapshotted_tester replay_chain(chain.get_config(), std::make_shared(snapshot), 2, 1); + BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); +} + BOOST_AUTO_TEST_SUITE_END() From 3e9162546c292d93d21cd2c6fdc813b25edfa555 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 5 Oct 2018 15:11:48 -0500 Subject: [PATCH 080/161] Added reading reversible blocks. GH #5656 --- programs/eosio-blocklog/main.cpp | 44 ++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 20c99f17409..bc2de200ed5 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -42,8 +43,30 @@ void blocklog::read_log() { EOS_ASSERT( end, block_log_exception, "No blocks found in block log" ); EOS_ASSERT( end->block_num() > 1, block_log_exception, "Only one block found in block log" ); - auto end_time = end->timestamp.to_time_point(); - ilog( "existing block log with ${n} blocks", ("n",end->block_num()) ); + ilog( "existing block log contains block num 1 through block num ${n}", ("n",end->block_num()) ); + + optional reversible_blocks; + try { + reversible_blocks.emplace(blocks_dir / config::reversible_blocks_dir_name, chainbase::database::read_only, config::default_reversible_cache_size); + reversible_blocks->add_index(); + const auto& idx = reversible_blocks->get_index(); + auto first = idx.lower_bound(end->block_num()); + auto last = idx.rbegin(); + if (first != idx.end() && last != idx.rend()) + ilog( "existing reversible block num ${first} through block num ${last} ", ("first",first->get_block()->block_num())("last",last->get_block()->block_num()) ); + else { + elog( "no blocks available in reversible block database: only block_log blocks are available" ); + reversible_blocks.reset(); + } + } catch( const std::runtime_error& e ) { + if( std::string(e.what()) == "database dirty flag set" ) { + elog( "database dirty flag set (likely due to unclean shutdown): only block_log blocks are available" ); + } else if( std::string(e.what()) == "database metadata dirty flag set" ) { + elog( "database metadata dirty flag set (likely due to unclean shutdown): only block_log blocks are available" ); + } else { + throw; + } + } std::ofstream output_blocks; std::ostream* out; @@ -60,11 +83,10 @@ void blocklog::read_log() { out = &std::cout; uint32_t block_num = (first_block < 1) ? 1 : first_block; - const uint32_t end_block = (last_block < end->block_num() ? last_block : end->block_num()) + 1; signed_block_ptr next; fc::variant pretty_output; const fc::microseconds deadline = fc::seconds(10); - while((block_num < end_block) && (next = block_logger.read_block_by_num( block_num ))) { + auto print_block = [&](signed_block_ptr& next) { abi_serializer::to_variant(*next, pretty_output, []( account_name n ) { return optional(); }, @@ -81,7 +103,19 @@ void blocklog::read_log() { fc::json::to_stream(*out, v, fc::json::stringify_large_ints_and_doubles); else *out << fc::json::to_pretty_string(v) << "\n"; - + }; + while((block_num <= last_block) && (next = block_logger.read_block_by_num( block_num ))) { + print_block(next); + ++block_num; + out->flush(); + } + if (!reversible_blocks) { + return; + } + const reversible_block_object* obj = nullptr; + while( (block_num <= last_block) && (obj = reversible_blocks->find(block_num)) ) { + auto next = obj->get_block(); + print_block(next); ++block_num; } } From 51df14855231e5cf1c77342baaba5d9bf19b2a49 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 5 Oct 2018 17:32:22 -0400 Subject: [PATCH 081/161] add validation to the snapshot reader abstraction, hide more implementation details in source files or detail namespaces --- libraries/chain/CMakeLists.txt | 1 + libraries/chain/controller.cpp | 1 + .../chain/include/eosio/chain/exceptions.hpp | 5 + .../chain/include/eosio/chain/snapshot.hpp | 125 +++++++----------- libraries/chain/snapshot.cpp | 104 +++++++++++++++ 5 files changed, 157 insertions(+), 79 deletions(-) create mode 100644 libraries/chain/snapshot.cpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 8d7d9a775c2..a8eeadaa94a 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -35,6 +35,7 @@ add_library( eosio_chain apply_context.cpp abi_serializer.cpp asset.cpp + snapshot.cpp webassembly/wavm.cpp webassembly/wabt.cpp diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 3b72a44377a..9901fbe40d7 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -322,6 +322,7 @@ struct controller_impl { if (snapshot) { EOS_ASSERT(!head, fork_database_exception, ""); + snapshot->validate(); read_from_snapshot(snapshot); diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 91467e746f6..b0eac36d465 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -510,4 +510,9 @@ namespace eosio { namespace chain { 3230002, "Database API Exception" ) FC_DECLARE_DERIVED_EXCEPTION( arithmetic_exception, contract_api_exception, 3230003, "Arithmetic Exception" ) + + FC_DECLARE_DERIVED_EXCEPTION( snapshot_exception, chain_exception, + 3240000, "Snapshot exception" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_validation_exception, snapshot_exception, + 3240001, "Snapshot Validation Exception" ) } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index f16ea747025..8ee4d8a195a 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -10,30 +10,36 @@ #include namespace eosio { namespace chain { - template - struct snapshot_section_traits { - static std::string section_name() { - return boost::core::demangle(typeid(T).name()); - } - }; + /** + * History: + * Version 1: initial version with string identified sections and rows + */ + static const uint32_t current_snapshot_version = 1; - template - struct snapshot_row_traits { - using row_type = std::decay_t; - using value_type = const row_type&; - }; + namespace detail { + template + struct snapshot_section_traits { + static std::string section_name() { + return boost::core::demangle(typeid(T).name()); + } + }; - template - auto to_snapshot_row( const T& value ) -> typename snapshot_row_traits::value_type { - return value; - }; + template + struct snapshot_row_traits { + using row_type = std::decay_t; + using value_type = const row_type&; + }; - template - auto from_snapshot_row( typename snapshot_row_traits::value_type&& row, T& value ) { - value = row; - } + template + auto to_snapshot_row( const T& value ) -> typename snapshot_row_traits::value_type { + return value; + }; + + template + auto from_snapshot_row( typename snapshot_row_traits::value_type&& row, T& value ) { + value = row; + } - namespace detail { struct abstract_snapshot_row_writer { virtual void write(std::ostream& out) const = 0; virtual variant to_variant() const = 0; @@ -69,7 +75,7 @@ namespace eosio { namespace chain { public: template void add_row( const T& row ) { - _writer.write_row(detail::make_row_writer(to_snapshot_row(row))); + _writer.write_row(detail::make_row_writer(detail::to_snapshot_row(row))); } private: @@ -84,7 +90,7 @@ namespace eosio { namespace chain { template void write_section(F f) { - write_start_section(snapshot_section_traits::section_name()); + write_start_section(detail::snapshot_section_traits::section_name()); auto section = section_writer(*this); f(section); write_end_section(); @@ -133,17 +139,17 @@ namespace eosio { namespace chain { class section_reader { public: template - auto read_row( T& out ) -> std::enable_if_t, typename snapshot_row_traits::row_type>::value,bool> { + auto read_row( T& out ) -> std::enable_if_t, typename detail::snapshot_row_traits::row_type>::value,bool> { auto reader = detail::make_row_reader(out); return _reader.read_row(reader); } template - auto read_row( T& out ) -> std::enable_if_t, typename snapshot_row_traits::row_type>::value,bool> { - auto temp = typename snapshot_row_traits::row_type(); + auto read_row( T& out ) -> std::enable_if_t, typename detail::snapshot_row_traits::row_type>::value,bool> { + auto temp = typename detail::snapshot_row_traits::row_type(); auto reader = detail::make_row_reader(temp); bool result = _reader.read_row(reader); - from_snapshot_row(std::move(temp), out); + detail::from_snapshot_row(std::move(temp), out); return result; } @@ -163,12 +169,14 @@ namespace eosio { namespace chain { template void read_section(F f) { - set_section(snapshot_section_traits::section_name()); + set_section(detail::snapshot_section_traits::section_name()); auto section = section_reader(*this); f(section); clear_section(); } + virtual void validate() const = 0; + virtual ~snapshot_reader(){}; protected: @@ -182,29 +190,13 @@ namespace eosio { namespace chain { class variant_snapshot_writer : public snapshot_writer { public: - variant_snapshot_writer() - : snapshot(fc::mutable_variant_object()("sections", fc::variants())) - { + variant_snapshot_writer(); - } - - void write_start_section( const std::string& section_name ) override { - current_rows.clear(); - current_section_name = section_name; - } - - void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override { - current_rows.emplace_back(row_writer.to_variant()); - } + void write_start_section( const std::string& section_name ) override; + void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override; + void write_end_section( ) override; + fc::variant finalize(); - void write_end_section( ) override { - snapshot["sections"].get_array().emplace_back(fc::mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); - } - - fc::variant finalize() { - fc::variant result = std::move(snapshot); - return result; - } private: fc::mutable_variant_object snapshot; std::string current_section_name; @@ -213,38 +205,13 @@ namespace eosio { namespace chain { class variant_snapshot_reader : public snapshot_reader { public: - variant_snapshot_reader(const fc::variant& snapshot) - :snapshot(snapshot) - ,cur_row(0) - { - - } + variant_snapshot_reader(const fc::variant& snapshot); - void set_section( const string& section_name ) override { - const auto& sections = snapshot["sections"].get_array(); - for( const auto& section: sections ) { - if (section["name"].as_string() == section_name) { - cur_section = §ion.get_object(); - break; - } - } - } - - bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override { - const auto& rows = (*cur_section)["rows"].get_array(); - row_reader.provide(rows.at(cur_row++)); - return cur_row < rows.size(); - } - - bool empty ( ) override { - const auto& rows = (*cur_section)["rows"].get_array(); - return rows.empty(); - } - - void clear_section() override { - cur_section = nullptr; - cur_row = 0; - } + void validate() const override; + void set_section( const string& section_name ) override; + bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override; + bool empty ( ) override; + void clear_section() override; private: const fc::variant& snapshot; diff --git a/libraries/chain/snapshot.cpp b/libraries/chain/snapshot.cpp new file mode 100644 index 00000000000..4181ac3ec94 --- /dev/null +++ b/libraries/chain/snapshot.cpp @@ -0,0 +1,104 @@ +#include +#include + +namespace eosio { namespace chain { + +variant_snapshot_writer::variant_snapshot_writer() +: snapshot(fc::mutable_variant_object()("sections", fc::variants())("version", current_snapshot_version )) +{ + +} + +void variant_snapshot_writer::write_start_section( const std::string& section_name ) { + current_rows.clear(); + current_section_name = section_name; +} + +void variant_snapshot_writer::write_row( const detail::abstract_snapshot_row_writer& row_writer ) { + current_rows.emplace_back(row_writer.to_variant()); +} + +void variant_snapshot_writer::write_end_section( ) { + snapshot["sections"].get_array().emplace_back(fc::mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); +} + +fc::variant variant_snapshot_writer::finalize() { + fc::variant result = std::move(snapshot); + return result; +} + +variant_snapshot_reader::variant_snapshot_reader(const fc::variant& snapshot) +:snapshot(snapshot) +,cur_row(0) +{ +} + +void variant_snapshot_reader::validate() const { + EOS_ASSERT(snapshot.is_object(), snapshot_validation_exception, + "Variant snapshot is not an object"); + const fc::variant_object& o = snapshot.get_object(); + + EOS_ASSERT(o.contains("version"), snapshot_validation_exception, + "Variant snapshot has no version"); + + const auto& version = o["version"]; + EOS_ASSERT(version.is_integer(), snapshot_validation_exception, + "Variant snapshot version is not an integer"); + + EOS_ASSERT(version.as_uint64() == (uint64_t)current_snapshot_version, snapshot_validation_exception, + "Variant snapshot is an unsuppored version. Expected : ${expected}, Got: ${actual}", + ("expected", current_snapshot_version)("actual",o["version"].as_uint64())); + + EOS_ASSERT(o.contains("sections"), snapshot_validation_exception, + "Variant snapshot has no sections"); + + const auto& sections = o["sections"]; + EOS_ASSERT(sections.is_array(), snapshot_validation_exception, "Variant snapshot sections is not an array"); + + const auto& section_array = sections.get_array(); + for( const auto& section: section_array ) { + EOS_ASSERT(section.is_object(), snapshot_validation_exception, "Variant snapshot section is not an object"); + + const auto& so = section.get_object(); + EOS_ASSERT(so.contains("name"), snapshot_validation_exception, + "Variant snapshot section has no name"); + + EOS_ASSERT(so["name"].is_string(), snapshot_validation_exception, + "Variant snapshot section name is not a string"); + + EOS_ASSERT(so.contains("rows"), snapshot_validation_exception, + "Variant snapshot section has no rows"); + + EOS_ASSERT(so["rows"].is_array(), snapshot_validation_exception, + "Variant snapshot section rows is not an array"); + } +} + +void variant_snapshot_reader::set_section( const string& section_name ) { + const auto& sections = snapshot["sections"].get_array(); + for( const auto& section: sections ) { + if (section["name"].as_string() == section_name) { + cur_section = §ion.get_object(); + break; + } + } +} + +bool variant_snapshot_reader::read_row( detail::abstract_snapshot_row_reader& row_reader ) { + const auto& rows = (*cur_section)["rows"].get_array(); + row_reader.provide(rows.at(cur_row++)); + return cur_row < rows.size(); +} + +bool variant_snapshot_reader::empty ( ) { + const auto& rows = (*cur_section)["rows"].get_array(); + return rows.empty(); +} + +void variant_snapshot_reader::clear_section() { + cur_section = nullptr; + cur_row = 0; +} + + +}} \ No newline at end of file From 44815b45b8c57052f39173f70d8b01fa0a6876a0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Oct 2018 08:21:59 -0500 Subject: [PATCH 082/161] Add mongodb-update-via-block-num option --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 32 +++++++++++++++++---- 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index f0e27401dfe..05f77887efa 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -127,6 +127,7 @@ class mongo_db_plugin_impl { bool filter_on_star = true; std::set filter_on; std::set filter_out; + bool update_blocks_via_block_num = false; bool store_blocks = true; bool store_block_states = true; bool store_transactions = true; @@ -930,9 +931,16 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr block_state_doc.append( kvp( "createdAt", b_date{now} ) ); try { - if( !_block_states.update_one( make_document( kvp( "block_id", block_id_str ) ), - make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${bid}", ("bid", block_id) ); + if( update_blocks_via_block_num ) { + if( !_block_states.update_one( make_document( kvp( "block_num", b_int32{static_cast(block_num)} ) ), + make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${num}", ("num", block_num) ); + } + } else { + if( !_block_states.update_one( make_document( kvp( "block_id", block_id_str ) ), + make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${bid}", ("bid", block_id) ); + } } } catch( ... ) { handle_mongo_exception( "block_states insert: " + json, __LINE__ ); @@ -963,9 +971,16 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr block_doc.append( kvp( "createdAt", b_date{now} ) ); try { - if( !_blocks.update_one( make_document( kvp( "block_id", block_id_str ) ), - make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { - EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${bid}", ("bid", block_id) ); + if( update_blocks_via_block_num ) { + if( !_blocks.update_one( make_document( kvp( "block_num", b_int32{static_cast(block_num)} ) ), + make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${num}", ("num", block_num) ); + } + } else { + if( !_blocks.update_one( make_document( kvp( "block_id", block_id_str ) ), + make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) { + EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${bid}", ("bid", block_id) ); + } } } catch( ... ) { handle_mongo_exception( "blocks insert: " + json, __LINE__ ); @@ -1427,6 +1442,8 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc "MongoDB URI connection string, see: https://docs.mongodb.com/master/reference/connection-string/." " If not specified then plugin is disabled. Default database 'EOS' is used if not specified in URI." " Example: mongodb://127.0.0.1:27017/EOS") + ("mongodb-update-via-block-num", bpo::value()->default_value(false), + "Update blocks/block_state with latest via block number so that duplicates are overwritten.") ("mongodb-store-blocks", bpo::value()->default_value(true), "Enables storing blocks in mongodb.") ("mongodb-store-block-states", bpo::value()->default_value(true), @@ -1476,6 +1493,9 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) if( options.count( "mongodb-block-start" )) { my->start_block_num = options.at( "mongodb-block-start" ).as(); } + if( options.count( "mongodb-update-via-block-num" )) { + my->update_blocks_via_block_num = options.at( "mongodb-update-via-block-num" ).as(); + } if( options.count( "mongodb-store-blocks" )) { my->store_blocks = options.at( "mongodb-store-blocks" ).as(); } From 8f13ff3258f4236be9a8756c416576a888f17376 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Oct 2018 15:57:48 -0500 Subject: [PATCH 083/161] Add filter-on filter-out to transactions collection --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 53 +++++++++++++++------ 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 05f77887efa..a23eeda311f 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -111,7 +111,8 @@ class mongo_db_plugin_impl { void remove_account_control( const account_name& name, const permission_name& permission ); /// @return true if act should be added to mongodb, false to skip it - bool filter_include( const chain::action_trace& action_trace ) const; + bool filter_include( const account_name& receiver, const action_name& act_name, + const vector& authorization ) const; void init(); void wipe_database(); @@ -218,20 +219,22 @@ const std::string mongo_db_plugin_impl::accounts_col = "accounts"; const std::string mongo_db_plugin_impl::pub_keys_col = "pub_keys"; const std::string mongo_db_plugin_impl::account_controls_col = "account_controls"; -bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_trace ) const { +bool mongo_db_plugin_impl::filter_include( const account_name& receiver, const action_name& act_name, + const vector& authorization ) const +{ bool include = false; if( filter_on_star ) { include = true; } else { - auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace]( const auto& filter ) { - return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 ); + auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&receiver, &act_name]( const auto& filter ) { + return filter.match( receiver, act_name, 0 ); } ); if( itr != filter_on.cend() ) { include = true; } else { - for( const auto& a : action_trace.act.authorization ) { - auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace, &a]( const auto& filter ) { - return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor ); + for( const auto& a : authorization ) { + auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&receiver, &act_name, &a]( const auto& filter ) { + return filter.match( receiver, act_name, a.actor ); } ); if( itr != filter_on.cend() ) { include = true; @@ -242,15 +245,16 @@ bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_tra } if( !include ) { return false; } + if( filter_out.empty() ) { return true; } - auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace]( const auto& filter ) { - return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 ); + auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&receiver, &act_name]( const auto& filter ) { + return filter.match( receiver, act_name, 0 ); } ); if( itr != filter_out.cend() ) { return false; } - for( const auto& a : action_trace.act.authorization ) { - auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace, &a]( const auto& filter ) { - return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor ); + for( const auto& a : authorization ) { + auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&receiver, &act_name, &a]( const auto& filter ) { + return filter.match( receiver, act_name, a.actor ); } ); if( itr != filter_out.cend() ) { return false; } } @@ -695,6 +699,27 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti using bsoncxx::builder::basic::make_array; namespace bbb = bsoncxx::builder::basic; + const auto& trx = t->trx; + + if( !filter_on_star || !filter_out.empty() ) { + bool include = false; + for( const auto& a : trx.actions ) { + if( filter_include( a.account, a.name, a.authorization ) ) { + include = true; + break; + } + } + if( !include ) { + for( const auto& a : trx.context_free_actions ) { + if( filter_include( a.account, a.name, a.authorization ) ) { + include = true; + break; + } + } + } + if( !include ) return; + } + auto trans_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -702,7 +727,6 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti const auto& trx_id = t->id; const auto trx_id_str = trx_id.str(); - const auto& trx = t->trx; trans_doc.append( kvp( "trx_id", trx_id_str ) ); @@ -777,7 +801,8 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces } bool added = false; - if( start_block_reached && store_action_traces && filter_include( atrace ) ) { + if( start_block_reached && store_action_traces && + filter_include( atrace.receipt.receiver, atrace.act.name, atrace.act.authorization ) ) { auto action_traces_doc = bsoncxx::builder::basic::document{}; const chain::base_action_trace& base = atrace; // without inline action traces From 218973405968a64cf2e2f990db82d19111e8b545 Mon Sep 17 00:00:00 2001 From: Tal Muskal Date: Sat, 6 Oct 2018 14:24:42 -0700 Subject: [PATCH 084/161] added link to the BP heartbeat plugin Added link to the heartbeat plugin (details here: https://medium.com/@liquideos/increasing-stability-of-the-eos-blockchain-11921477549e) Some BPs have started using it (as can be seen here: http://heartbeat.liquideos.com/), So people in the community requested that i add it here as well. A curated list of some more plugins can be found here: https://github.com/tmuskal/awesome-eosio-plugins --- plugins/COMMUNITY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index 9d568adc9a1..8fad2103166 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -10,6 +10,7 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Kafka | https://github.com/TP-Lab/kafka_plugin | | SQL | https://github.com/asiniscalchi/eosio_sql_plugin | | ElasticSearch | https://github.com/EOSLaoMao/elasticsearch_plugin | +| BP Heartbeat | https://github.com/bancorprotocol/eos-producer-heartbeat-plugin | ## DISCLAIMER: From 9dfafceaf332a2d6861416b314c532096cfb22e4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 6 Oct 2018 08:28:47 -0500 Subject: [PATCH 085/161] Removed diagnostic test and increased time to allow long running tests to run. GH #5674 --- .buildkite/long_running_tests.yml | 12 ++++++------ tests/CMakeLists.txt | 2 -- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index d4bc7244193..ed3e2da2850 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -107,7 +107,7 @@ steps: - "mongod.log" - "build/genesis.json" - "build/config.ini" - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -131,7 +131,7 @@ steps: docker#v1.4.0: image: "eosio/ci:ubuntu" workdir: /data/job - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -155,7 +155,7 @@ steps: docker#v1.4.0: image: "eosio/ci:ubuntu18" workdir: /data/job - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -179,7 +179,7 @@ steps: docker#v1.4.0: image: "eosio/ci:fedora" workdir: /data/job - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -203,7 +203,7 @@ steps: docker#v1.4.0: image: "eosio/ci:centos" workdir: /data/job - timeout: 60 + timeout: 100 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -227,4 +227,4 @@ steps: docker#v1.4.0: image: "eosio/ci:amazonlinux" workdir: /data/job - timeout: 60 + timeout: 100 diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7b5bf7311ed..cc9eec4e538 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -83,8 +83,6 @@ add_test(NAME bnet_nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --s set_property(TEST bnet_nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_run_check2_lr_test COMMAND tests/nodeos_run_test.py -v --wallet-port 9900 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_run_check2_lr_test PROPERTY LABELS long_running_tests) #add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) #set_property(TEST distributed_transactions_lr_test PROPERTY LABELS long_running_tests) From 8b191e52491e8f347a95df06e16df51542e172df Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 4 Oct 2018 15:09:23 +0800 Subject: [PATCH 086/161] fix nodeos_under_min_avail_ram_lr_test --- tests/nodeos_under_min_avail_ram.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 064296ed282..040be402ca3 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -48,6 +48,7 @@ def setName(self, num): Print("NamedAccounts Name for %d is %s" % (temp, retStr)) return retStr + ############################################################### # nodeos_voting_test # --dump-error-details @@ -151,6 +152,7 @@ def setName(self, num): count=0 while keepProcessing: numAmount+=1 + timeOutCount=0 for fromIndex in range(namedAccounts.numAccounts): count+=1 toIndex=fromIndex+1 @@ -163,8 +165,15 @@ def setName(self, num): try: trans=nodes[0].pushMessage(contract, action, data, opts) if trans is None or not trans[0]: + timeOutCount+=1 + if timeOutCount>=3: + Print("Failed to push create action to eosio contract for %d consecutive times, looks like nodeos already exited." % (timeOutCount)) + keepProcessing=False + break Print("Failed to push create action to eosio contract. sleep for 60 seconds") time.sleep(60) + else: + timeOutCount=0 time.sleep(1) except TypeError as ex: keepProcessing=False From 5432f669b83501864627c132455892507c456c88 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 8 Oct 2018 15:36:17 -0500 Subject: [PATCH 087/161] Make blocklog output proper json (blocks as array). GH #5674 --- programs/eosio-blocklog/main.cpp | 33 ++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index bc2de200ed5..f8becf986e9 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -85,6 +85,7 @@ void blocklog::read_log() { uint32_t block_num = (first_block < 1) ? 1 : first_block; signed_block_ptr next; fc::variant pretty_output; + std::vector outputs; const fc::microseconds deadline = fc::seconds(10); auto print_block = [&](signed_block_ptr& next) { abi_serializer::to_variant(*next, @@ -98,26 +99,30 @@ void blocklog::read_log() { ("id", block_id) ("ref_block_prefix", ref_block_prefix) (pretty_output.get_object()); - fc::variant v(std::move(enhanced_object)); - if (no_pretty_print) - fc::json::to_stream(*out, v, fc::json::stringify_large_ints_and_doubles); - else - *out << fc::json::to_pretty_string(v) << "\n"; + outputs.emplace_back(std::move(enhanced_object)); }; while((block_num <= last_block) && (next = block_logger.read_block_by_num( block_num ))) { print_block(next); ++block_num; - out->flush(); - } - if (!reversible_blocks) { - return; } - const reversible_block_object* obj = nullptr; - while( (block_num <= last_block) && (obj = reversible_blocks->find(block_num)) ) { - auto next = obj->get_block(); - print_block(next); - ++block_num; + if (reversible_blocks) { + const reversible_block_object* obj = nullptr; + while( (block_num <= last_block) && (obj = reversible_blocks->find(block_num)) ) { + auto next = obj->get_block(); + print_block(next); + ++block_num; + } } + + fc::variant v; + abi_serializer::to_variant(outputs, + v, + []( account_name n ) { return optional(); }, + deadline); + if (no_pretty_print) + fc::json::to_stream(*out, v, fc::json::stringify_large_ints_and_doubles); + else + *out << fc::json::to_pretty_string(v) << "\n"; } void blocklog::set_program_options(options_description& cli) From 9758eea39c79c96b2bcf6819dd0f4df9bc577268 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 8 Oct 2018 16:51:22 -0500 Subject: [PATCH 088/161] Removed BlockWalker and just setting flag to indicate a transaction wasn't found. GH #5674 --- tests/Node.py | 54 ++------------------------------------------------- 1 file changed, 2 insertions(+), 52 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 93ab8502555..29a9810771e 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -50,6 +50,7 @@ def __init__(self, host, port, pid=None, cmd=None, walletMgr=None, enableMongo=F self.lastRetrievedLIB=None self.transCache={} self.walletMgr=walletMgr + self.missingTransaction=False if self.enableMongo: self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) @@ -324,60 +325,11 @@ def isBlockFinalized(self, blockNum): """Is blockNum finalized""" return self.isBlockPresent(blockNum, blockType=BlockType.lib) - class BlockWalker: - def __init__(self, node, transId, startBlockNum=None, endBlockNum=None): - assert(isinstance(transId, str)) - self.trans=None - self.transId=transId - self.node=node - self.startBlockNum=startBlockNum - self.endBlockNum=endBlockNum - - def walkBlocks(self): - start=None - end=None - if self.trans is None and self.transId in self.transCache.keys(): - self.trans=self.transCache[self.transId] - if self.trans is not None: - cntxt=Node.Context(self.trans, "trans") - cntxt.add("processed") - cntxt.add("action_traces") - cntxt.index(0) - blockNum=cntxt.add("block_num") - else: - blockNum=None - # it should be blockNum or later, but just in case the block leading up have any clues... - start=None - if self.startBlockNum is not None: - start=self.startBlockNum - elif blockNum is not None: - start=blockNum-5 - if self.endBlockNum is not None: - end=self.endBlockNum - else: - info=self.node.getInfo() - end=info["head_block_num"] - if start is None: - if end > 100: - start=end-100 - else: - start=0 - transDesc=" id =%s" % (self.transId) - if self.trans is not None: - transDesc="=%s" % (json.dumps(self.trans, indent=2, sort_keys=True)) - msg="Original transaction%s\nExpected block_num=%s\n" % (transDesc, blockNum) - for blockNum in range(start, end+1): - block=self.node.getBlock(blockNum) - msg+=json.dumps(block, indent=2, sort_keys=True)+"\n" - - return msg - # pylint: disable=too-many-branches def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayedRetry=True): assert(isinstance(transId, str)) exitOnErrorForDelayed=not delayedRetry and exitOnError timeout=3 - blockWalker=None if not self.enableMongo: cmdDesc="get transaction" cmd="%s %s" % (cmdDesc, transId) @@ -386,12 +338,10 @@ def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayed trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed, exitMsg=msg) if trans is not None or not delayedRetry: return trans - if blockWalker is None: - blockWalker=Node.BlockWalker(self, transId) if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId)) time.sleep(timeout) - msg+="\nBlock printout -->>\n%s" % blockWalker.walkBlocks(); + self.missingTransaction=True # either it is there or the transaction has timed out return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) else: From cc10c50a3ffa5c5ddea01f9c20c4c1850565e71a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 8 Oct 2018 16:55:10 -0500 Subject: [PATCH 089/161] Replaced hard coded directory names with common parameters and methods. GH #5674 --- tests/Cluster.py | 47 +++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index b8041227c3f..786beab691c 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -30,6 +30,8 @@ class Cluster(object): __BiosPort=8788 __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" + __configDir="etc/eosio/" + __dataDir="var/lib/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -745,6 +747,14 @@ def nodeNameToId(name): m=re.search(r"node_([\d]+)", name) return int(m.group(1)) + @staticmethod + def nodeExtensionToName(ext): + r"""Convert node extension (bios, 0, 1, etc) to node name. """ + prefix="node_" + if ext == "bios": + return prefix + ext + + return "node_%02d" % (ext) @staticmethod def parseProducerKeys(configFile, nodeName): @@ -783,8 +793,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - node="node_%02d" % (nodeNum) - configFile="etc/eosio/%s/config.ini" % (node) + configFile=Cluster.__configDir + Cluster.nodeExtensionToName(nodeNum) + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -802,20 +811,20 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - node="node_bios" - configFile="etc/eosio/%s/config.ini" % (node) + nodeName=Cluster.nodeExtensionToName("bios") + configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) - producerKeys=Cluster.parseProducerKeys(configFile, node) + producerKeys=Cluster.parseProducerKeys(configFile, nodeName) if producerKeys is None: Utils.Print("ERROR: Failed to parse eosio private keys from cluster config files.") return None for i in range(0, totalNodes): - node="node_%02d" % (i) - configFile="etc/eosio/%s/config.ini" % (node) + nodeName=Cluster.nodeExtensionToName(i) + configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) - keys=Cluster.parseProducerKeys(configFile, node) + keys=Cluster.parseProducerKeys(configFile, nodeName) if keys is not None: producerKeys.update(keys) keyMsg="None" if keys is None else len(keys) @@ -1183,11 +1192,8 @@ def myFunc(): @staticmethod def pgrepEosServerPattern(nodeInstance): - if isinstance(nodeInstance, str): - return r"[\n]?(\d+) (.* --data-dir var/lib/node_%s .*)\n" % nodeInstance - else: - nodeInstanceStr="%02d" % nodeInstance - return Cluster.pgrepEosServerPattern(nodeInstanceStr) + dataLocation=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeInstance) + return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) # Populates list of EosInstanceInfo objects, matched to actual running instances def discoverLocalNodes(self, totalNodes, timeout=None): @@ -1268,17 +1274,18 @@ def dumpErrorDetailImpl(fileName): Utils.Print("File %s not found." % (fileName)) def dumpErrorDetails(self): - fileName="etc/eosio/node_bios/config.ini" + fileName=Cluster.__configDir + Cluster.nodeExtensionToName("bios") + "/config.ini" Cluster.dumpErrorDetailImpl(fileName) - fileName="var/lib/node_bios/stderr.txt" + fileName=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + "/stderr.txt" Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - fileName="etc/eosio/node_%02d/config.ini" % (i) + configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + "/" + fileName=configLocation + "config.ini" Cluster.dumpErrorDetailImpl(fileName) - fileName="etc/eosio/node_%02d/genesis.json" % (i) + fileName=configLocation + "genesis.json" Cluster.dumpErrorDetailImpl(fileName) - fileName="var/lib/node_%02d/stderr.txt" % (i) + fileName=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + "/stderr.txt" Cluster.dumpErrorDetailImpl(fileName) if self.useBiosBootFile: @@ -1350,9 +1357,9 @@ def waitForNextBlock(self, timeout=None): return node.waitForNextBlock(timeout) def cleanup(self): - for f in glob.glob("var/lib/node_*"): + for f in glob.glob(Cluster.__dataDir + "node_*"): shutil.rmtree(f) - for f in glob.glob("etc/eosio/node_*"): + for f in glob.glob(Cluster.__configDir + "node_*"): shutil.rmtree(f) for f in self.filesToCleanup: From 9019424edef64f4a0dbd5aa7f82aa7ee9dacf3a0 Mon Sep 17 00:00:00 2001 From: Ciju John Date: Mon, 8 Oct 2018 22:53:43 -0500 Subject: [PATCH 090/161] Don't import eosio key in remote runs as it won't be available. Use defproducera account instead of eosio for user accounts creation as remote runs don't have eosio keys. --- tests/distributed-transactions-remote-test.py | 6 +++--- tests/distributed-transactions-test.py | 10 +++++++--- tests/nodeos_run_remote_test.py | 7 ++++--- tests/nodeos_run_test.py | 18 +++++++++++------- 4 files changed, 25 insertions(+), 16 deletions(-) diff --git a/tests/distributed-transactions-remote-test.py b/tests/distributed-transactions-remote-test.py index f94d609fb46..1b678f3be03 100755 --- a/tests/distributed-transactions-remote-test.py +++ b/tests/distributed-transactions-remote-test.py @@ -47,7 +47,7 @@ } """ -cluster=Cluster() +cluster=Cluster(walletd=True) (fd, nodesFile) = tempfile.mkstemp() try: @@ -58,7 +58,7 @@ Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, prodCount, topo, delay) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, prodCount=prodCount, topo=topo, delay=delay, dontKill=dontKill) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") @@ -76,7 +76,7 @@ tfile.write(clusterMapJson) tfile.close() - cmd="%s --nodes-file %s %s %s" % (actualTest, nodesFile, "-v" if debug else "", "--dont-kill" if dontKill else "") + cmd="%s --nodes-file %s %s %s" % (actualTest, nodesFile, "-v" if debug else "", "--leave-running" if dontKill else "") Print("Starting up distributed transactions test: %s" % (actualTest)) Print("cmd: %s\n" % (cmd)) if 0 != subprocess.call(cmd, shell=True): diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index f83cb3aa0e7..39e083ddc46 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -19,6 +19,7 @@ total_nodes = pnodes if args.n == 0 else args.n debug=args.v nodesFile=args.nodes_file +dontLaunch=nodesFile is not None seed=args.seed dontKill=args.leave_running dumpErrorDetails=args.dump_error_details @@ -40,7 +41,7 @@ try: cluster.setWalletMgr(walletMgr) - if nodesFile is not None: + if dontLaunch: # run test against remote cluster jsonStr=None with open(nodesFile, "r") as f: jsonStr=f.read() @@ -74,7 +75,10 @@ accountsCount=total_nodes walletName="MyWallet-%d" % (random.randrange(10000)) Print("Creating wallet %s if one doesn't already exist." % walletName) - wallet=walletMgr.create(walletName, [cluster.eosioAccount,cluster.defproduceraAccount,cluster.defproducerbAccount]) + walletAccounts=[cluster.defproduceraAccount,cluster.defproducerbAccount] + if not dontLaunch: + walletAccounts.append(cluster.eosioAccount) + wallet=walletMgr.create(walletName, walletAccounts) if wallet is None: errorExit("Failed to create wallet %s" % (walletName)) @@ -95,7 +99,7 @@ errorExit("Failed to spread and validate funds.") print("Funds spread validated") - + testSuccessful=True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) diff --git a/tests/nodeos_run_remote_test.py b/tests/nodeos_run_remote_test.py index 5b3459e780c..6c918f71c64 100755 --- a/tests/nodeos_run_remote_test.py +++ b/tests/nodeos_run_remote_test.py @@ -33,7 +33,7 @@ actualTest="tests/nodeos_run_test.py" testSuccessful=False -cluster=Cluster() +cluster=Cluster(walletd=True) try: Print("BEGIN") cluster.killall(allInstances=killAll) @@ -42,7 +42,8 @@ Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, prodCount, topo, delay, onlyBios=onlyBios, dontKill=dontKill) is False: + + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, prodCount=prodCount, topo=topo, delay=delay, onlyBios=onlyBios, dontKill=dontKill) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") @@ -54,7 +55,7 @@ defproduceraPrvtKey=producerKeys["defproducera"]["private"] defproducerbPrvtKey=producerKeys["defproducerb"]["private"] - cmd="%s --dont-launch --defproducera_prvt_key %s --defproducerb_prvt_key %s %s %s %s" % (actualTest, defproduceraPrvtKey, defproducerbPrvtKey, "-v" if debug else "", "--dont-kill" if dontKill else "", "--only-bios" if onlyBios else "") + cmd="%s --dont-launch --defproducera_prvt_key %s --defproducerb_prvt_key %s %s %s %s" % (actualTest, defproduceraPrvtKey, defproducerbPrvtKey, "-v" if debug else "", "--leave-running" if dontKill else "", "--only-bios" if onlyBios else "") Print("Starting up %s test: %s" % ("nodeos", actualTest)) Print("cmd: %s\n" % (cmd)) if 0 != subprocess.call(cmd, shell=True): diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index f28f62a730a..f4bd815f792 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -72,6 +72,7 @@ cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: + Print("Collecting cluster info.") cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) killEosInstances=False @@ -114,7 +115,10 @@ testWalletName="test" Print("Creating wallet \"%s\"." % (testWalletName)) - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,cluster.defproduceraAccount,cluster.defproducerbAccount]) + walletAccounts=[cluster.defproduceraAccount,cluster.defproducerbAccount] + if not dontLaunch: + walletAccounts.append(cluster.eosioAccount) + testWallet=walletMgr.create(testWalletName, walletAccounts) Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) @@ -202,14 +206,14 @@ cluster.validateAccounts(None) # create accounts via eosio as otherwise a bid is needed - Print("Create new account %s via %s" % (testeraAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(testeraAccount, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) + Print("Create new account %s via %s" % (testeraAccount.name, cluster.defproduceraAccount.name)) + transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) - Print("Create new account %s via %s" % (currencyAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000, exitOnError=True) + Print("Create new account %s via %s" % (currencyAccount.name, cluster.defproduceraAccount.name)) + transId=node.createInitializeAccount(currencyAccount, cluster.defproduceraAccount, buyRAM=200000, stakedDeposit=5000, exitOnError=True) - Print("Create new account %s via %s" % (exchangeAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, buyRAM=1000000, waitForTransBlock=True, exitOnError=True) + Print("Create new account %s via %s" % (exchangeAccount.name, cluster.defproduceraAccount.name)) + transId=node.createInitializeAccount(exchangeAccount, cluster.defproduceraAccount, buyRAM=200000, waitForTransBlock=True, exitOnError=True) Print("Validating accounts after user accounts creation") accounts=[testeraAccount, currencyAccount, exchangeAccount] From af719da7e485dee63f50849f2e73b106c8337f63 Mon Sep 17 00:00:00 2001 From: Ciju John Date: Tue, 9 Oct 2018 09:18:52 -0500 Subject: [PATCH 091/161] Remove legacy comment. --- tests/nodeos_run_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index f4bd815f792..3fe8002dd11 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -205,7 +205,6 @@ Print("Validating accounts before user accounts creation") cluster.validateAccounts(None) - # create accounts via eosio as otherwise a bid is needed Print("Create new account %s via %s" % (testeraAccount.name, cluster.defproduceraAccount.name)) transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) From 64574226b357d36b99f07c3aba3236eac5680d79 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 9 Oct 2018 15:47:00 -0400 Subject: [PATCH 092/161] refactor eosio_exit for WAVM; improve long running performance WAVM becomes increasingly slow as more and more contracts are activated. The ultimate reason is due to high exception overhead from eosio_exit. I believe this steams from the huge number of memory mappings and interaction with the unwinder. Refactor eosio_exit implementation on WAVM to not use exceptions and instead longjmp out of the running wasm code. --- libraries/chain/include/eosio/chain/wasm_interface.hpp | 3 +++ .../include/eosio/chain/webassembly/runtime_interface.hpp | 3 +++ libraries/chain/include/eosio/chain/webassembly/wabt.hpp | 2 ++ libraries/chain/include/eosio/chain/webassembly/wavm.hpp | 2 ++ libraries/chain/wasm_interface.cpp | 6 +++++- libraries/chain/webassembly/wabt.cpp | 4 ++++ libraries/chain/webassembly/wavm.cpp | 8 ++++++++ libraries/wasm-jit/Include/Platform/Platform.h | 1 + libraries/wasm-jit/Source/Platform/POSIX.cpp | 4 ++++ 9 files changed, 32 insertions(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 974ee92e5ac..7e6991996af 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -65,6 +65,9 @@ namespace eosio { namespace chain { //Calls apply or error on a given code void apply(const digest_type& code_id, const shared_string& code, apply_context& context); + //Immediately exits currently running wasm. UB is called when no wasm running + void exit(); + private: unique_ptr my; friend class eosio::chain::webassembly::common::intrinsics_accessor; diff --git a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp index 8158c727829..2a9b8119b67 100644 --- a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp @@ -17,6 +17,9 @@ class wasm_runtime_interface { public: virtual std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) = 0; + //immediately exit the currently running wasm_instantiated_module_interface. Yep, this assumes only one can possibly run at a time. + virtual void immediately_exit_currently_running_module() = 0; + virtual ~wasm_runtime_interface(); }; diff --git a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp index bf33448a787..31456dc1dda 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp @@ -52,6 +52,8 @@ class wabt_runtime : public eosio::chain::wasm_runtime_interface { wabt_runtime(); std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; + void immediately_exit_currently_running_module() override; + private: wabt::ReadBinaryOptions read_binary_options; //note default ctor will look at each option in feature.def and default to DISABLED for the feature }; diff --git a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp index 2bffe3f621f..f619e318b3f 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp @@ -22,6 +22,8 @@ class wavm_runtime : public eosio::chain::wasm_runtime_interface { ~wavm_runtime(); std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; + void immediately_exit_currently_running_module() override; + struct runtime_guard { runtime_guard(); ~runtime_guard(); diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 67bf07430b1..0ea29d3c6d8 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -57,6 +57,10 @@ namespace eosio { namespace chain { my->get_instantiated_module(code_id, code, context.trx_context)->apply(context); } + void wasm_interface::exit() { + my->runtime_interface->immediately_exit_currently_running_module(); + } + wasm_instantiated_module_interface::~wasm_instantiated_module_interface() {} wasm_runtime_interface::~wasm_runtime_interface() {} @@ -948,7 +952,7 @@ class context_free_system_api : public context_aware_api { } void eosio_exit(int32_t code) { - throw wasm_exit{code}; + context.control.get_wasm_interface().exit(); } }; diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp index bf5e1c9d6c8..2d45fa4ee01 100644 --- a/libraries/chain/webassembly/wabt.cpp +++ b/libraries/chain/webassembly/wabt.cpp @@ -96,4 +96,8 @@ std::unique_ptr wabt_runtime::instantiate_mo return std::make_unique(std::move(env), initial_memory, instantiated_module); } +void wabt_runtime::immediately_exit_currently_running_module() { + throw wasm_exit(); +} + }}}} diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index 9844cb2373f..e614398c74e 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -127,4 +127,12 @@ std::unique_ptr wavm_runtime::instantiate_mo return std::make_unique(instance, std::move(module), initial_memory); } +void wavm_runtime::immediately_exit_currently_running_module() { +#ifdef _WIN32 + throw wasm_exit(); +#else + Platform::immediately_exit(); +#endif +} + }}}} diff --git a/libraries/wasm-jit/Include/Platform/Platform.h b/libraries/wasm-jit/Include/Platform/Platform.h index 5f33133a5fb..8d8769d4834 100644 --- a/libraries/wasm-jit/Include/Platform/Platform.h +++ b/libraries/wasm-jit/Include/Platform/Platform.h @@ -134,6 +134,7 @@ namespace Platform Uptr& outTrapOperand, const std::function& thunk ); + PLATFORM_API void immediately_exit(); // // Threading diff --git a/libraries/wasm-jit/Source/Platform/POSIX.cpp b/libraries/wasm-jit/Source/Platform/POSIX.cpp index 8dac984bb2c..4305381b39f 100644 --- a/libraries/wasm-jit/Source/Platform/POSIX.cpp +++ b/libraries/wasm-jit/Source/Platform/POSIX.cpp @@ -276,6 +276,10 @@ namespace Platform return signalType; } + void immediately_exit() { + siglongjmp(signalReturnEnv,1); + } + CallStack captureCallStack(Uptr numOmittedFramesFromTop) { #if 0 From 10a5b5ae0c7d16ec5c227b5ad41ccb2b0954d10e Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 9 Oct 2018 15:54:26 -0400 Subject: [PATCH 093/161] Remove unneeded lambda capture in abi serializer --- libraries/chain/abi_serializer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 639e2eb430d..93cc69fe5a3 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -800,7 +800,7 @@ namespace eosio { namespace chain { } fc::scoped_exit> variant_to_binary_context::disallow_extensions_unless( bool condition ) { - std::function callback = [old_recursion_depth=recursion_depth, old_allow_extensions=allow_extensions, this](){ + std::function callback = [old_allow_extensions=allow_extensions, this](){ allow_extensions = old_allow_extensions; }; From 45580435103129f3d9f23ced4825f0ab0e3c23b6 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 9 Oct 2018 14:58:14 -0500 Subject: [PATCH 094/161] Reverted in memory storing of json objects and manually adding json array indication and adding flag to request json array format. GH #5674 --- programs/eosio-blocklog/main.cpp | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index f8becf986e9..31db5b25c70 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -35,6 +35,7 @@ struct blocklog { uint32_t first_block; uint32_t last_block; bool no_pretty_print; + bool as_json_array; }; void blocklog::read_log() { @@ -82,10 +83,11 @@ void blocklog::read_log() { else out = &std::cout; + if (as_json_array) + *out << "["; uint32_t block_num = (first_block < 1) ? 1 : first_block; signed_block_ptr next; fc::variant pretty_output; - std::vector outputs; const fc::microseconds deadline = fc::seconds(10); auto print_block = [&](signed_block_ptr& next) { abi_serializer::to_variant(*next, @@ -99,30 +101,33 @@ void blocklog::read_log() { ("id", block_id) ("ref_block_prefix", ref_block_prefix) (pretty_output.get_object()); - outputs.emplace_back(std::move(enhanced_object)); + fc::variant v(std::move(enhanced_object)); + if (no_pretty_print) + fc::json::to_stream(*out, v, fc::json::stringify_large_ints_and_doubles); + else + *out << fc::json::to_pretty_string(v) << "\n"; }; + bool contains_obj = false; while((block_num <= last_block) && (next = block_logger.read_block_by_num( block_num ))) { + if (as_json_array && contains_obj) + *out << ","; print_block(next); ++block_num; + contains_obj = true; } if (reversible_blocks) { const reversible_block_object* obj = nullptr; while( (block_num <= last_block) && (obj = reversible_blocks->find(block_num)) ) { + if (as_json_array && contains_obj) + *out << ","; auto next = obj->get_block(); print_block(next); ++block_num; + contains_obj = true; } } - - fc::variant v; - abi_serializer::to_variant(outputs, - v, - []( account_name n ) { return optional(); }, - deadline); - if (no_pretty_print) - fc::json::to_stream(*out, v, fc::json::stringify_large_ints_and_doubles); - else - *out << fc::json::to_pretty_string(v) << "\n"; + if (as_json_array) + *out << "]"; } void blocklog::set_program_options(options_description& cli) @@ -138,6 +143,8 @@ void blocklog::set_program_options(options_description& cli) "the last block number (inclusive) to log") ("no-pretty-print", bpo::bool_switch(&no_pretty_print)->default_value(false), "Do not pretty print the output. Useful if piping to jq to improve performance.") + ("as-json-array", bpo::bool_switch(&as_json_array)->default_value(false), + "Print out json blocks wrapped in json array (otherwise the output is free-standing json objects).") ("help", "Print this help message and exit.") ; From 173139c8c068ee7a51a560d21a35b18495ba0e22 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 9 Oct 2018 15:02:19 -0500 Subject: [PATCH 095/161] Added code to retrieve a block log using the tool and improved filterJsonObject to also handle json arrays. GH #5674 --- tests/Node.py | 2 +- tests/testUtils.py | 45 ++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 29a9810771e..82d6581bb02 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -189,7 +189,7 @@ def runMongoCmdReturnJson(cmd, subcommand, trace=False, exitOnError=False): outStr=Node.byteArrToStr(outs) if not outStr: return None - extJStr=Utils.filterJsonObject(outStr) + extJStr=Utils.filterJsonObjectOrArray(outStr) if not extJStr: return None jStr=Node.normalizeJsonObject(extJStr) diff --git a/tests/testUtils.py b/tests/testUtils.py index 9c52fe3796a..9302aeda8b5 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -32,6 +32,8 @@ class Utils: ShuttingDown=False CheckOutputDeque=deque(maxlen=10) + EosBlockLogPath="programs/eosio-blocklog/eosio-blocklog" + @staticmethod def Print(*args, **kwargs): stackDepth=len(inspect.stack())-2 @@ -136,16 +138,25 @@ def waitForBool(lam, timeout=None): return False if ret is None else ret @staticmethod - def filterJsonObject(data): - firstIdx=data.find('{') - lastIdx=data.rfind('}') - retStr=data[firstIdx:lastIdx+1] + def filterJsonObjectOrArray(data): + firstObjIdx=data.find('{') + lastObjIdx=data.rfind('}') + firstArrayIdx=data.find('[') + lastArrayIdx=data.rfind(']') + if firstArrayIdx==-1 or lastArrayIdx==-1: + retStr=data[firstObjIdx:lastObjIdx+1] + elif firstObjIdx==-1 or lastObjIdx==-1: + retStr=data[firstArrayIdx:lastArrayIdx+1] + elif lastArrayIdx < lastObjIdx: + retStr=data[firstObjIdx:lastObjIdx+1] + else: + retStr=data[firstArrayIdx:lastArrayIdx+1] return retStr @staticmethod def runCmdArrReturnJson(cmdArr, trace=False, silentErrors=True): retStr=Utils.checkOutput(cmdArr) - jStr=Utils.filterJsonObject(retStr) + jStr=Utils.filterJsonObjectOrArray(retStr) if trace: Utils.Print ("RAW > %s" % (retStr)) if trace: Utils.Print ("JSON> %s" % (jStr)) if not jStr: @@ -213,6 +224,30 @@ def pgrepCmd(serverName): return "pgrep %s %s" % (pgrepOpts, serverName) + @staticmethod + def getBlockLog(blockLogLocation, silentErrors=False, exitOnError=False): + assert(isinstance(blockLogLocation, str)) + cmd="%s --blocks-dir %s --as-json-array" % (Utils.EosBlockLogPath, blockLogLocation) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + rtn=None + try: + rtn=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) + except subprocess.CalledProcessError as ex: + if not silentErrors: + msg=ex.output.decode("utf-8") + errorMsg="Exception during \"%s\". %s" % (cmd, msg) + if exitOnError: + Utils.cmdError(errorMsg) + Utils.errorExit(errorMsg) + else: + Utils.Print("ERROR: %s" % (errorMsg)) + return None + + if exitOnError and rtn is None: + Utils.cmdError("could not \"%s\"" % (cmd)) + Utils.errorExit("Failed to \"%s\"" % (cmd)) + + return rtn ########################################################################################### class Account(object): From 983d6019a18035ab6d3385c9203a22df4956601b Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 9 Oct 2018 15:07:09 -0500 Subject: [PATCH 096/161] Added printing out block logs if a transaction was missed. GH #5674 --- tests/Cluster.py | 38 +++++++++++++++++++++++++++++++++++++- tests/TestHelper.py | 1 + 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 786beab691c..ace5ec0e927 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -32,6 +32,7 @@ class Cluster(object): __bootlog="eosio-ignition-wd/bootlog.txt" __configDir="etc/eosio/" __dataDir="var/lib/" + __fileDivider="=================================================================" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -1265,7 +1266,7 @@ def relaunchEosInstances(self): @staticmethod def dumpErrorDetailImpl(fileName): - Utils.Print("=================================================================") + Utils.Print(Cluster.__fileDivider) Utils.Print("Contents of %s:" % (fileName)) if os.path.exists(fileName): with open(fileName, "r") as f: @@ -1414,3 +1415,38 @@ def reportStatus(self): node.reportStatus() except: Utils.Print("No reportStatus") + + def printBlockLogIfNeeded(self): + printBlockLog=False + if hasattr(self, "nodes"): + for node in self.nodes: + if node.missingTransaction: + printBlockLog=True + break + + if hasattr(self, "biosNode") and self.biosNode.missingTransaction: + printBlockLog=True + + if not printBlockLog: + return + + self.printBlockLog() + + def printBlockLog(self): + blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + "/blocks/" + blockLogBios=Utils.getBlockLog(blockLogDir, exitOnError=False) + Utils.Print(Cluster.__fileDivider) + Utils.Print("Block log from %s:\n%s" % (blockLogDir, json.dumps(blockLogBios, indent=1))) + + if not hasattr(self, "nodes"): + return + + + numNodes=len(self.nodes) + for i in range(numNodes): + node=self.nodes[i] + blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + "/blocks/" + blockLog=Utils.getBlockLog(blockLogDir, exitOnError=False) + Utils.Print(Cluster.__fileDivider) + Utils.Print("Block log from %s:\n%s" % (blockLogDir, json.dumps(blockLog, indent=1))) + diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 471c397beff..1650597dee5 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -148,6 +148,7 @@ def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, kil cluster.dumpErrorDetails() if walletMgr: walletMgr.dumpErrorDetails() + cluster.printBlockLogIfNeeded() Utils.Print("== Errors see above ==") if len(Utils.CheckOutputDeque)>0: Utils.Print("== cout/cerr pairs from last %d calls to Utils. ==" % len(Utils.CheckOutputDeque)) From e4f4cb6c98b00fd9a52f5cd1f098fe014f7c8898 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 9 Oct 2018 17:05:02 -0400 Subject: [PATCH 097/161] add binary snapshot and tests --- .../chain/include/eosio/chain/snapshot.hpp | 49 ++++- libraries/chain/snapshot.cpp | 197 +++++++++++++++++- unittests/snapshot_tests.cpp | 68 ++++-- 3 files changed, 289 insertions(+), 25 deletions(-) diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 8ee4d8a195a..2b0f2d66248 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -190,22 +190,22 @@ namespace eosio { namespace chain { class variant_snapshot_writer : public snapshot_writer { public: - variant_snapshot_writer(); + variant_snapshot_writer(fc::mutable_variant_object& snapshot); void write_start_section( const std::string& section_name ) override; void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override; void write_end_section( ) override; - fc::variant finalize(); + void finalize(); private: - fc::mutable_variant_object snapshot; + fc::mutable_variant_object& snapshot; std::string current_section_name; fc::variants current_rows; }; class variant_snapshot_reader : public snapshot_reader { public: - variant_snapshot_reader(const fc::variant& snapshot); + explicit variant_snapshot_reader(const fc::variant& snapshot); void validate() const override; void set_section( const string& section_name ) override; @@ -216,7 +216,46 @@ namespace eosio { namespace chain { private: const fc::variant& snapshot; const fc::variant_object* cur_section; - int cur_row; + uint64_t cur_row; + }; + + class ostream_snapshot_writer : public snapshot_writer { + public: + explicit ostream_snapshot_writer(std::ostream& snapshot); + + void write_start_section( const std::string& section_name ) override; + void write_row( const detail::abstract_snapshot_row_writer& row_writer ) override; + void write_end_section( ) override; + void finalize(); + + static const uint32_t magic_number = 0x30510550; + + private: + + std::ostream& snapshot; + std::streampos header_pos; + std::streampos section_pos; + uint64_t row_count; + + }; + + class istream_snapshot_reader : public snapshot_reader { + public: + explicit istream_snapshot_reader(std::istream& snapshot); + + void validate() const override; + void set_section( const string& section_name ) override; + bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override; + bool empty ( ) override; + void clear_section() override; + + private: + bool validate_section() const; + + std::istream& snapshot; + std::streampos header_pos; + uint64_t num_rows; + uint64_t cur_row; }; }} diff --git a/libraries/chain/snapshot.cpp b/libraries/chain/snapshot.cpp index 4181ac3ec94..15ae3abefc8 100644 --- a/libraries/chain/snapshot.cpp +++ b/libraries/chain/snapshot.cpp @@ -1,12 +1,14 @@ #include #include +#include namespace eosio { namespace chain { -variant_snapshot_writer::variant_snapshot_writer() -: snapshot(fc::mutable_variant_object()("sections", fc::variants())("version", current_snapshot_version )) +variant_snapshot_writer::variant_snapshot_writer(fc::mutable_variant_object& snapshot) +: snapshot(snapshot) { - + snapshot.set("sections", fc::variants()); + snapshot.set("version", current_snapshot_version ); } void variant_snapshot_writer::write_start_section( const std::string& section_name ) { @@ -22,9 +24,8 @@ void variant_snapshot_writer::write_end_section( ) { snapshot["sections"].get_array().emplace_back(fc::mutable_variant_object()("name", std::move(current_section_name))("rows", std::move(current_rows))); } -fc::variant variant_snapshot_writer::finalize() { - fc::variant result = std::move(snapshot); - return result; +void variant_snapshot_writer::finalize() { + } variant_snapshot_reader::variant_snapshot_reader(const fc::variant& snapshot) @@ -79,9 +80,11 @@ void variant_snapshot_reader::set_section( const string& section_name ) { for( const auto& section: sections ) { if (section["name"].as_string() == section_name) { cur_section = §ion.get_object(); - break; + return; } } + + EOS_THROW(snapshot_exception, "Variant snapshot has no section named ${n}", ("n", section_name)); } bool variant_snapshot_reader::read_row( detail::abstract_snapshot_row_reader& row_reader ) { @@ -100,5 +103,185 @@ void variant_snapshot_reader::clear_section() { cur_row = 0; } +ostream_snapshot_writer::ostream_snapshot_writer(std::ostream& snapshot) +:snapshot(snapshot) +,header_pos(snapshot.tellp()) +,section_pos(-1) +,row_count(0) +{ + // write magic number + auto totem = magic_number; + snapshot.write((char*)&totem, sizeof(totem)); + + // write version + auto version = current_snapshot_version; + snapshot.write((char*)&version, sizeof(version)); +} + +void ostream_snapshot_writer::write_start_section( const std::string& section_name ) +{ + EOS_ASSERT(section_pos == std::streampos(-1), snapshot_exception, "Attempting to write a new section without closing the previous section"); + section_pos = snapshot.tellp(); + row_count = 0; + + uint64_t placeholder = std::numeric_limits::max(); + + // write a placeholder for the section size + snapshot.write((char*)&placeholder, sizeof(placeholder)); + + // write placeholder for row count + snapshot.write((char*)&placeholder, sizeof(placeholder)); + + // write the section name (null terminated) + snapshot.write(section_name.data(), section_name.size()); + snapshot.put(0); +} + +void ostream_snapshot_writer::write_row( const detail::abstract_snapshot_row_writer& row_writer ) { + auto restore = snapshot.tellp(); + try { + row_writer.write(snapshot); + } catch (...) { + snapshot.seekp(restore); + throw; + } + row_count++; +} + +void ostream_snapshot_writer::write_end_section( ) { + auto restore = snapshot.tellp(); + + uint64_t section_size = restore - section_pos - sizeof(uint64_t); + + snapshot.seekp(section_pos); + + // write a the section size + snapshot.write((char*)§ion_size, sizeof(section_size)); + + // write the row count + snapshot.write((char*)&row_count, sizeof(row_count)); + + snapshot.seekp(restore); + + section_pos = std::streampos(-1); + row_count = 0; +} + +void ostream_snapshot_writer::finalize() { + uint64_t end_marker = std::numeric_limits::max(); + + // write a placeholder for the section size + snapshot.write((char*)&end_marker, sizeof(end_marker)); +} + +istream_snapshot_reader::istream_snapshot_reader(std::istream& snapshot) +:snapshot(snapshot) +,header_pos(snapshot.tellg()) +,num_rows(0) +,cur_row(0) +{ + +} + +void istream_snapshot_reader::validate() const { + // make sure to restore the read pos + auto restore_pos = fc::make_scoped_exit([this,pos=snapshot.tellg(),ex=snapshot.exceptions()](){ + snapshot.seekg(pos); + snapshot.exceptions(ex); + }); + + snapshot.exceptions(std::istream::failbit|std::istream::eofbit); + + try { + // validate totem + auto expected_totem = ostream_snapshot_writer::magic_number; + decltype(expected_totem) actual_totem; + snapshot.read((char*)&actual_totem, sizeof(actual_totem)); + EOS_ASSERT(actual_totem == expected_totem, snapshot_exception, + "Binary snapshot has unexpected magic number!"); + + // validate version + auto expected_version = current_snapshot_version; + decltype(expected_version) actual_version; + snapshot.read((char*)&actual_version, sizeof(actual_version)); + EOS_ASSERT(actual_version == expected_version, snapshot_exception, + "Binary snapshot is an unsuppored version. Expected : ${expected}, Got: ${actual}", + ("expected", expected_version)("actual", actual_version)); + + while (validate_section()) {} + } catch( const std::exception& e ) { \ + snapshot_exception fce(FC_LOG_MESSAGE( warn, "Binary snapshot validation threw IO exception (${what})",("what",e.what()))); + throw fce; + } +} + +bool istream_snapshot_reader::validate_section() const { + uint64_t section_size = 0; + snapshot.read((char*)§ion_size,sizeof(section_size)); + + // stop when we see the end marker + if (section_size == std::numeric_limits::max()) { + return false; + } + + // seek past the section + snapshot.seekg(snapshot.tellg() + std::streamoff(section_size)); + + return true; +} + +void istream_snapshot_reader::set_section( const string& section_name ) { + auto restore_pos = fc::make_scoped_exit([this,pos=snapshot.tellg()](){ + snapshot.seekg(pos); + }); + + const std::streamoff header_size = sizeof(ostream_snapshot_writer::magic_number) + sizeof(current_snapshot_version); + + auto next_section_pos = header_pos + header_size; + + while (true) { + snapshot.seekg(next_section_pos); + uint64_t section_size = 0; + snapshot.read((char*)§ion_size,sizeof(section_size)); + if (section_size == std::numeric_limits::max()) { + break; + } + + next_section_pos = snapshot.tellg() + std::streamoff(section_size); + + uint64_t row_count = 0; + snapshot.read((char*)&row_count,sizeof(row_count)); + + bool match = true; + for(auto c : section_name) { + if(snapshot.get() != c) { + match = false; + break; + } + } + + if (match && snapshot.get() == 0) { + cur_row = 0; + num_rows = row_count; + return; + } + } + + EOS_THROW(snapshot_exception, "Binary snapshot has no section named ${n}", ("n", section_name)); +} + +bool istream_snapshot_reader::read_row( detail::abstract_snapshot_row_reader& row_reader ) { + row_reader.provide(snapshot); + return ++cur_row < num_rows; +} + +bool istream_snapshot_reader::empty ( ) { + return num_rows == 0; +} + +void istream_snapshot_reader::clear_section() { + num_rows = 0; + cur_row = 0; +} }} \ No newline at end of file diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index 4389edf2d78..3df861e5bd6 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -4,6 +4,7 @@ */ #include +#include #include #include @@ -11,6 +12,7 @@ #include #include +#include using namespace eosio; using namespace testing; @@ -61,9 +63,52 @@ class snapshotted_tester : public base_tester { bool validate() { return true; } }; +struct variant_snapshot_suite { + using writer_t = variant_snapshot_writer; + using reader_t = variant_snapshot_reader; + using storage_t = fc::mutable_variant_object; +}; + +struct buffered_snapshot_suite { + using writer_t = ostream_snapshot_writer; + using reader_t = istream_snapshot_reader; + using storage_t = std::stringstream; +}; + +template +struct suite_funcs { + struct writer : public SUITE::writer_t { + writer( const std::shared_ptr& storage ) + :SUITE::writer_t(*storage) + ,storage(storage) + {} + + std::shared_ptr storage; + }; + + struct reader : public SUITE::reader_t { + explicit reader(const std::shared_ptr& from) + :SUITE::reader_t(*from->storage) + ,storage(from->storage) + {} + + std::shared_ptr storage; + }; + + static auto get_writer() { + return std::make_shared(std::make_shared()); + } + + static auto get_reader(const std::shared_ptr& w) { + return std::make_shared(w); + } +}; + BOOST_AUTO_TEST_SUITE(snapshot_tests) -BOOST_AUTO_TEST_CASE(test_exhaustive_snapshot) +using snapshot_suites = boost::mpl::list; + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_exhaustive_snapshot, SNAPSHOT_SUITE, snapshot_suites) { tester chain; @@ -79,13 +124,12 @@ BOOST_AUTO_TEST_CASE(test_exhaustive_snapshot) for (int generation = 0; generation < generation_count; generation++) { // create a new snapshot child - variant_snapshot_writer writer; - auto writer_p = std::shared_ptr(&writer, [](snapshot_writer *){}); - chain.control->write_snapshot(writer_p); - auto snapshot = writer.finalize(); + auto writer = suite_funcs::get_writer(); + chain.control->write_snapshot(writer); + writer->finalize(); // create a new child at this snapshot - sub_testers.emplace_back(chain.get_config(), std::make_shared(snapshot), generation); + sub_testers.emplace_back(chain.get_config(), suite_funcs::get_reader(writer), generation); // increment the test contract chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() @@ -108,7 +152,7 @@ BOOST_AUTO_TEST_CASE(test_exhaustive_snapshot) } } -BOOST_AUTO_TEST_CASE(test_replay_over_snapshot) +BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapshot_suites) { tester chain; @@ -136,13 +180,11 @@ BOOST_AUTO_TEST_CASE(test_replay_over_snapshot) auto expected_pre_integrity_hash = chain.control->calculate_integrity_hash(); // create a new snapshot child - variant_snapshot_writer writer; - auto writer_p = std::shared_ptr(&writer, [](snapshot_writer *){}); - chain.control->write_snapshot(writer_p); - auto snapshot = writer.finalize(); + auto writer = suite_funcs::get_writer(); + chain.control->write_snapshot(writer); // create a new child at this snapshot - snapshotted_tester snap_chain(chain.get_config(), std::make_shared(snapshot), 1); + snapshotted_tester snap_chain(chain.get_config(), suite_funcs::get_reader(writer), 1); BOOST_REQUIRE_EQUAL(expected_pre_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); // push more blocks to build up a block log @@ -162,7 +204,7 @@ BOOST_AUTO_TEST_CASE(test_replay_over_snapshot) BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); // replay the block log from the snapshot child, from the snapshot - snapshotted_tester replay_chain(chain.get_config(), std::make_shared(snapshot), 2, 1); + snapshotted_tester replay_chain(chain.get_config(), suite_funcs::get_reader(writer), 2, 1); BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); } From aa2a6931ac16a0e4b6a2a00fa96768c82527ffc1 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 9 Oct 2018 17:46:38 -0400 Subject: [PATCH 098/161] fix for mutable_variants not being aliasable to variant --- unittests/snapshot_tests.cpp | 41 ++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index 3df861e5bd6..eb4a9cf1986 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -66,41 +66,49 @@ class snapshotted_tester : public base_tester { struct variant_snapshot_suite { using writer_t = variant_snapshot_writer; using reader_t = variant_snapshot_reader; - using storage_t = fc::mutable_variant_object; + using write_storage_t = fc::mutable_variant_object; + using read_storage_t = fc::variant; }; struct buffered_snapshot_suite { using writer_t = ostream_snapshot_writer; using reader_t = istream_snapshot_reader; - using storage_t = std::stringstream; + using write_storage_t = std::stringstream; + using read_storage_t = write_storage_t; + }; template struct suite_funcs { struct writer : public SUITE::writer_t { - writer( const std::shared_ptr& storage ) + writer( const std::shared_ptr& storage ) :SUITE::writer_t(*storage) ,storage(storage) - {} + { + + } - std::shared_ptr storage; + std::shared_ptr storage; }; struct reader : public SUITE::reader_t { - explicit reader(const std::shared_ptr& from) - :SUITE::reader_t(*from->storage) - ,storage(from->storage) + explicit reader(typename SUITE::read_storage_t& buffer) + :SUITE::reader_t(buffer) {} - std::shared_ptr storage; }; static auto get_writer() { - return std::make_shared(std::make_shared()); + return std::make_shared(std::make_shared()); + } + + static auto finalize(const std::shared_ptr& w) { + w->finalize(); + return typename SUITE::read_storage_t(std::move(*w->storage)); } - static auto get_reader(const std::shared_ptr& w) { - return std::make_shared(w); + static auto get_reader( typename SUITE::read_storage_t& buffer) { + return std::make_shared(buffer); } }; @@ -126,10 +134,10 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_exhaustive_snapshot, SNAPSHOT_SUITE, snapshot // create a new snapshot child auto writer = suite_funcs::get_writer(); chain.control->write_snapshot(writer); - writer->finalize(); + auto snapshot = suite_funcs::finalize(writer); // create a new child at this snapshot - sub_testers.emplace_back(chain.get_config(), suite_funcs::get_reader(writer), generation); + sub_testers.emplace_back(chain.get_config(), suite_funcs::get_reader(snapshot), generation); // increment the test contract chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() @@ -182,9 +190,10 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapsho // create a new snapshot child auto writer = suite_funcs::get_writer(); chain.control->write_snapshot(writer); + auto snapshot = suite_funcs::finalize(writer); // create a new child at this snapshot - snapshotted_tester snap_chain(chain.get_config(), suite_funcs::get_reader(writer), 1); + snapshotted_tester snap_chain(chain.get_config(), suite_funcs::get_reader(snapshot), 1); BOOST_REQUIRE_EQUAL(expected_pre_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); // push more blocks to build up a block log @@ -204,7 +213,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapsho BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); // replay the block log from the snapshot child, from the snapshot - snapshotted_tester replay_chain(chain.get_config(), suite_funcs::get_reader(writer), 2, 1); + snapshotted_tester replay_chain(chain.get_config(), suite_funcs::get_reader(snapshot), 2, 1); BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); } From 13d0a6c2cff74f8d9e37495b3cea6102fb43c31d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 10 Oct 2018 08:34:01 -0500 Subject: [PATCH 099/161] Turned off automatic retry for buildkite tests. GH #5674 --- .buildkite/long_running_tests.yml | 18 ---------------- .buildkite/pipeline.yml | 36 ------------------------------- 2 files changed, 54 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index ed3e2da2850..e20657a2db6 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -97,9 +97,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":darwin: Tests" agents: - "role=macos-tester" @@ -117,9 +114,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: Tests" agents: - "role=linux-tester" @@ -141,9 +135,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: 18.04 Tests" agents: - "role=linux-tester" @@ -165,9 +156,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":fedora: Tests" agents: - "role=linux-tester" @@ -189,9 +177,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":centos: Tests" agents: - "role=linux-tester" @@ -213,9 +198,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L long_running_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":aws: Tests" agents: - "role=linux-tester" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index b26a20f569b..d35e401f7b6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -97,9 +97,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":darwin: Tests" agents: - "role=macos-tester" @@ -117,9 +114,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":darwin: NP Tests" agents: - "role=macos-tester" @@ -137,9 +131,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: Tests" agents: - "role=linux-tester" @@ -161,9 +152,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: NP Tests" agents: - "role=linux-tester" @@ -185,9 +173,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: 18.04 Tests" agents: - "role=linux-tester" @@ -209,9 +194,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":ubuntu: 18.04 NP Tests" agents: - "role=linux-tester" @@ -233,9 +215,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":fedora: Tests" agents: - "role=linux-tester" @@ -257,9 +236,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":fedora: NP Tests" agents: - "role=linux-tester" @@ -281,9 +257,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":centos: Tests" agents: - "role=linux-tester" @@ -305,9 +278,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":centos: NP Tests" agents: - "role=linux-tester" @@ -329,9 +299,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - retry: - automatic: - limit: 1 label: ":aws: Tests" agents: - "role=linux-tester" @@ -353,9 +320,6 @@ steps: $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - retry: - automatic: - limit: 1 label: ":aws: NP Tests" agents: - "role=linux-tester" From 92052fd1c9b747b41aede0c26036f5172ba01cbe Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 10 Oct 2018 11:13:56 -0400 Subject: [PATCH 100/161] rename cleos sudo command to wrap; also allow user to override contract account --- programs/cleos/main.cpp | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 1428c4d1a38..48002552aa1 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -3053,19 +3053,21 @@ int main( int argc, char** argv ) { } ); - // sudo subcommand - auto sudo = app.add_subcommand("sudo", localized("Sudo contract commands"), false); - sudo->require_subcommand(); + // wrap subcommand + auto wrap = app.add_subcommand("wrap", localized("Wrap contract commands"), false); + wrap->require_subcommand(); - // sudo exec + // wrap exec + con = "eosio.wrap"; executer = ""; string trx_to_exec; - auto sudo_exec = sudo->add_subcommand("exec", localized("Execute a transaction while bypassing authorization checks")); - add_standard_transaction_options(sudo_exec); - sudo_exec->add_option("executer", executer, localized("Account executing the transaction and paying for the deferred transaction RAM"))->required(); - sudo_exec->add_option("transaction", trx_to_exec, localized("The JSON string or filename defining the transaction to execute"))->required(); + auto wrap_exec = wrap->add_subcommand("exec", localized("Execute a transaction while bypassing authorization checks")); + add_standard_transaction_options(wrap_exec); + wrap_exec->add_option("executer", executer, localized("Account executing the transaction and paying for the deferred transaction RAM"))->required(); + wrap_exec->add_option("transaction", trx_to_exec, localized("The JSON string or filename defining the transaction to execute"))->required(); + wrap_exec->add_option("--contract,-c", con, localized("The contract which controls the wrap contract")); - sudo_exec->set_callback([&] { + wrap_exec->set_callback([&] { fc::variant trx_var; try { trx_var = json_from_file_or_string(trx_to_exec); @@ -3073,14 +3075,14 @@ int main( int argc, char** argv ) { auto accountPermissions = get_account_permissions(tx_permission); if( accountPermissions.empty() ) { - accountPermissions = vector{{executer, config::active_name}, {"eosio.sudo", config::active_name}}; + accountPermissions = vector{{executer, config::active_name}, {con, config::active_name}}; } auto args = fc::mutable_variant_object() ("executer", executer ) ("trx", trx_var); - send_actions({chain::action{accountPermissions, "eosio.sudo", "exec", variant_to_bin( N(eosio.sudo), N(exec), args ) }}); + send_actions({chain::action{accountPermissions, con, "exec", variant_to_bin( con, N(exec), args ) }}); }); // system subcommand From 45bdd373ec2fd7d631976e00dd00df6f3f5f7f35 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 10 Oct 2018 11:43:35 -0400 Subject: [PATCH 101/161] fixes to binary serialization caused by overloaded operators --- .../include/eosio/chain/database_utils.hpp | 31 ++++--- .../include/eosio/chain/permission_object.hpp | 2 +- .../chain/include/eosio/chain/snapshot.hpp | 60 ++++++++++++-- libraries/chain/snapshot.cpp | 3 + libraries/fc | 2 +- unittests/snapshot_tests.cpp | 83 +++++++++++++------ 6 files changed, 131 insertions(+), 50 deletions(-) diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index de4c1a96af3..c3a1bddfd0e 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -62,6 +62,17 @@ namespace eosio { namespace chain { } }; + template + DataStream& operator << ( DataStream& ds, const shared_blob& b ) { + fc::raw::pack(ds, static_cast(b)); + return ds; + } + + template + DataStream& operator >> ( DataStream& ds, shared_blob& b ) { + fc::raw::unpack(ds, static_cast(b)); + return ds; + } } } namespace fc { @@ -131,30 +142,18 @@ namespace fc { from_variant(v, _v); sv = eosio::chain::shared_vector(_v.begin(), _v.end(), sv.get_allocator()); } - - template - DataStream& operator << ( DataStream& ds, const eosio::chain::shared_blob& b ) { - fc::raw::pack(ds, static_cast(b)); - return ds; - } - - template - DataStream& operator >> ( DataStream& ds, eosio::chain::shared_blob& b ) { - fc::raw::unpack(ds, static_cast(b)); - return ds; - } } namespace chainbase { // overloads for OID packing template - DataStream& operator << ( DataStream& ds, const chainbase::oid& oid ) { + DataStream& operator << ( DataStream& ds, const oid& oid ) { fc::raw::pack(ds, oid._id); return ds; } template - DataStream& operator >> ( DataStream& ds, chainbase::oid& oid ) { + DataStream& operator >> ( DataStream& ds, oid& oid ) { fc::raw::unpack(ds, oid._id); return ds; } @@ -169,7 +168,7 @@ DataStream& operator << ( DataStream& ds, const float64_t& v ) { template DataStream& operator >> ( DataStream& ds, float64_t& v ) { - fc::raw::unpack(ds, *reinterpret_cast(&v)); + fc::raw::unpack(ds, *reinterpret_cast(&v)); return ds; } @@ -181,7 +180,7 @@ DataStream& operator << ( DataStream& ds, const float128_t& v ) { template DataStream& operator >> ( DataStream& ds, float128_t& v ) { - fc::raw::unpack(ds, *reinterpret_cast(&v)); + fc::raw::unpack(ds, *reinterpret_cast(&v)); return ds; } diff --git a/libraries/chain/include/eosio/chain/permission_object.hpp b/libraries/chain/include/eosio/chain/permission_object.hpp index a9e572e404c..81a0f1efe27 100644 --- a/libraries/chain/include/eosio/chain/permission_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_object.hpp @@ -4,6 +4,7 @@ */ #pragma once #include +#include #include "multi_index_includes.hpp" @@ -112,5 +113,4 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_usage_object, eosio::chain::pe FC_REFLECT(eosio::chain::permission_object, (usage_id)(parent)(owner)(name)(last_updated)(auth)) -FC_REFLECT(chainbase::oid, (_id)) FC_REFLECT(eosio::chain::permission_usage_object, (id)(last_used)) diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 2b0f2d66248..25a07312828 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -40,9 +40,47 @@ namespace eosio { namespace chain { value = row; } + /** + * Due to a pattern in our code of overloading `operator << ( std::ostream&, ... )` to provide + * human-readable string forms of data, we cannot directly use ostream as those operators will + * be used instead of the expected operators. In otherwords: + * fc::raw::pack(fc::datastream...) + * will end up calling _very_ different operators than + * fc::raw::pack(std::ostream...) + */ + struct ostream_wrapper { + explicit ostream_wrapper(std::ostream& s) + :inner(s) { + + } + + ostream_wrapper(ostream_wrapper &&) = default; + ostream_wrapper(const ostream_wrapper& ) = default; + + auto& write( const char* d, size_t s ) { + return inner.write(d, s); + } + + auto& put(char c) { + return inner.put(c); + } + + auto tellp() const { + return inner.tellp(); + } + + auto& seekp(std::ostream::pos_type p) { + return inner.seekp(p); + } + + std::ostream& inner; + }; + + struct abstract_snapshot_row_writer { - virtual void write(std::ostream& out) const = 0; + virtual void write(ostream_wrapper& out) const = 0; virtual variant to_variant() const = 0; + virtual std::string row_type_name() const = 0; }; template @@ -50,7 +88,7 @@ namespace eosio { namespace chain { explicit snapshot_row_writer( const T& data ) :data(data) {} - void write(std::ostream& out) const override { + void write(ostream_wrapper& out) const override { fc::raw::pack(out, data); } @@ -60,6 +98,10 @@ namespace eosio { namespace chain { return var; } + std::string row_type_name() const override { + return boost::core::demangle( typeid( T ).name() ); + } + const T& data; }; @@ -110,6 +152,7 @@ namespace eosio { namespace chain { struct abstract_snapshot_row_reader { virtual void provide(std::istream& in) const = 0; virtual void provide(const fc::variant&) const = 0; + virtual std::string row_type_name() const = 0; }; template @@ -125,6 +168,10 @@ namespace eosio { namespace chain { fc::from_variant(var, data); } + std::string row_type_name() const override { + return boost::core::demangle( typeid( T ).name() ); + } + T& data; }; @@ -231,11 +278,10 @@ namespace eosio { namespace chain { static const uint32_t magic_number = 0x30510550; private: - - std::ostream& snapshot; - std::streampos header_pos; - std::streampos section_pos; - uint64_t row_count; + detail::ostream_wrapper snapshot; + std::streampos header_pos; + std::streampos section_pos; + uint64_t row_count; }; diff --git a/libraries/chain/snapshot.cpp b/libraries/chain/snapshot.cpp index 15ae3abefc8..9002f6073bc 100644 --- a/libraries/chain/snapshot.cpp +++ b/libraries/chain/snapshot.cpp @@ -263,6 +263,9 @@ void istream_snapshot_reader::set_section( const string& section_name ) { if (match && snapshot.get() == 0) { cur_row = 0; num_rows = row_count; + + // leave the stream at the right point + restore_pos.cancel(); return; } } diff --git a/libraries/fc b/libraries/fc index 8114051b088..4e59c679777 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 8114051b088d8242babdf3678ac45dc7ea84edec +Subproject commit 4e59c6797777d3d9a226ac214701a08f52be4451 diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index eb4a9cf1986..c6100219d21 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -67,49 +67,82 @@ struct variant_snapshot_suite { using writer_t = variant_snapshot_writer; using reader_t = variant_snapshot_reader; using write_storage_t = fc::mutable_variant_object; - using read_storage_t = fc::variant; + using snapshot_t = fc::variant; + + struct writer : public writer_t { + writer( const std::shared_ptr& storage ) + :writer_t(*storage) + ,storage(storage) + { + + } + + std::shared_ptr storage; + }; + + struct reader : public reader_t { + explicit reader(const snapshot_t& storage) + :reader_t(storage) + {} + }; + + + static auto get_writer() { + return std::make_shared(std::make_shared()); + } + + static auto finalize(const std::shared_ptr& w) { + w->finalize(); + return snapshot_t(*w->storage); + } + + static auto get_reader( const snapshot_t& buffer) { + return std::make_shared(buffer); + } + }; struct buffered_snapshot_suite { using writer_t = ostream_snapshot_writer; using reader_t = istream_snapshot_reader; - using write_storage_t = std::stringstream; - using read_storage_t = write_storage_t; - -}; + using write_storage_t = std::ostringstream; + using snapshot_t = std::string; + using read_storage_t = std::istringstream; -template -struct suite_funcs { - struct writer : public SUITE::writer_t { - writer( const std::shared_ptr& storage ) - :SUITE::writer_t(*storage) + struct writer : public writer_t { + writer( const std::shared_ptr& storage ) + :writer_t(*storage) ,storage(storage) { } - std::shared_ptr storage; + std::shared_ptr storage; }; - struct reader : public SUITE::reader_t { - explicit reader(typename SUITE::read_storage_t& buffer) - :SUITE::reader_t(buffer) + struct reader : public reader_t { + explicit reader(const std::shared_ptr& storage) + :reader_t(*storage) + ,storage(storage) {} + std::shared_ptr storage; }; + static auto get_writer() { - return std::make_shared(std::make_shared()); + return std::make_shared(std::make_shared()); } static auto finalize(const std::shared_ptr& w) { w->finalize(); - return typename SUITE::read_storage_t(std::move(*w->storage)); + return w->storage->str(); } - static auto get_reader( typename SUITE::read_storage_t& buffer) { - return std::make_shared(buffer); + static auto get_reader( const snapshot_t& buffer) { + return std::make_shared(std::make_shared(buffer)); } + }; BOOST_AUTO_TEST_SUITE(snapshot_tests) @@ -132,12 +165,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_exhaustive_snapshot, SNAPSHOT_SUITE, snapshot for (int generation = 0; generation < generation_count; generation++) { // create a new snapshot child - auto writer = suite_funcs::get_writer(); + auto writer = SNAPSHOT_SUITE::get_writer(); chain.control->write_snapshot(writer); - auto snapshot = suite_funcs::finalize(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); // create a new child at this snapshot - sub_testers.emplace_back(chain.get_config(), suite_funcs::get_reader(snapshot), generation); + sub_testers.emplace_back(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), generation); // increment the test contract chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() @@ -188,12 +221,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapsho auto expected_pre_integrity_hash = chain.control->calculate_integrity_hash(); // create a new snapshot child - auto writer = suite_funcs::get_writer(); + auto writer = SNAPSHOT_SUITE::get_writer(); chain.control->write_snapshot(writer); - auto snapshot = suite_funcs::finalize(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); // create a new child at this snapshot - snapshotted_tester snap_chain(chain.get_config(), suite_funcs::get_reader(snapshot), 1); + snapshotted_tester snap_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), 1); BOOST_REQUIRE_EQUAL(expected_pre_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); // push more blocks to build up a block log @@ -213,7 +246,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapsho BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); // replay the block log from the snapshot child, from the snapshot - snapshotted_tester replay_chain(chain.get_config(), suite_funcs::get_reader(snapshot), 2, 1); + snapshotted_tester replay_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), 2, 1); BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); } From 93897c6f111a842a01a806ec6927500f2dc2ce99 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 10 Oct 2018 11:54:17 -0400 Subject: [PATCH 102/161] fix bug with cleos wrap change --- programs/cleos/main.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 48002552aa1..fc2aed750c6 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -3058,14 +3058,14 @@ int main( int argc, char** argv ) { wrap->require_subcommand(); // wrap exec - con = "eosio.wrap"; + string wrap_con = "eosio.wrap"; executer = ""; string trx_to_exec; auto wrap_exec = wrap->add_subcommand("exec", localized("Execute a transaction while bypassing authorization checks")); add_standard_transaction_options(wrap_exec); wrap_exec->add_option("executer", executer, localized("Account executing the transaction and paying for the deferred transaction RAM"))->required(); wrap_exec->add_option("transaction", trx_to_exec, localized("The JSON string or filename defining the transaction to execute"))->required(); - wrap_exec->add_option("--contract,-c", con, localized("The contract which controls the wrap contract")); + wrap_exec->add_option("--contract,-c", wrap_con, localized("The account which controls the wrap contract")); wrap_exec->set_callback([&] { fc::variant trx_var; @@ -3075,14 +3075,14 @@ int main( int argc, char** argv ) { auto accountPermissions = get_account_permissions(tx_permission); if( accountPermissions.empty() ) { - accountPermissions = vector{{executer, config::active_name}, {con, config::active_name}}; + accountPermissions = vector{{executer, config::active_name}, {wrap_con, config::active_name}}; } auto args = fc::mutable_variant_object() ("executer", executer ) ("trx", trx_var); - send_actions({chain::action{accountPermissions, con, "exec", variant_to_bin( con, N(exec), args ) }}); + send_actions({chain::action{accountPermissions, wrap_con, "exec", variant_to_bin( wrap_con, N(exec), args ) }}); }); // system subcommand From eb4bfe5c02f9499cbfad4bbf71b07b79834e30cd Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Thu, 11 Oct 2018 01:13:25 +0900 Subject: [PATCH 103/161] Remove unused method from controller --- libraries/chain/include/eosio/chain/controller.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 6d2baa9165f..c8f2afdb259 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -153,7 +153,6 @@ namespace eosio { namespace chain { const account_object& get_account( account_name n )const; const global_property_object& get_global_properties()const; const dynamic_global_property_object& get_dynamic_global_properties()const; - const permission_object& get_permission( const permission_level& level )const; const resource_limits_manager& get_resource_limits_manager()const; resource_limits_manager& get_mutable_resource_limits_manager(); const authorization_manager& get_authorization_manager()const; From ca1b97081f505be28bf0da3506e7d803e6d733e7 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 10 Oct 2018 12:20:08 -0400 Subject: [PATCH 104/161] fix irreversible read mode test --- libraries/chain/controller.cpp | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9901fbe40d7..36bb0b8caec 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -272,10 +272,20 @@ struct controller_impl { // should already have been loaded from the snapshot so, it cannot be applied if (s->block) { if (read_mode == db_read_mode::IRREVERSIBLE) { - apply_block(s->block, controller::block_status::complete); - fork_db.mark_in_current_chain(s, true); - fork_db.set_validity(s, true); - head = s; + // when applying a snapshot, head may not be present + // when not applying a snapshot, make sure this is the next block + if (!head || s->block_num == head->block_num + 1) { + apply_block(s->block, controller::block_status::complete); + head = s; + } else { + // otherwise, assert the one odd case where initializing a chain + // from genesis creates and applies the first block automatically. + // when syncing from another chain, this is pushed in again + EOS_ASSERT(!head || head->block_num == 1, block_validate_exception, "Attempting to re-apply an irreversible block that was not the implied genesis block"); + } + + fork_db.mark_in_current_chain(head, true); + fork_db.set_validity(head, true); } emit(self.irreversible_block, s); } From a9edaa6e61a48c63ab54f07d03c3518bb8c8bc7c Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Thu, 11 Oct 2018 01:21:03 +0900 Subject: [PATCH 105/161] Fix build warning by catching exceptions in proper order --- plugins/producer_plugin/producer_plugin.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 85f1d4615e7..66bb50468fa 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1357,15 +1357,17 @@ bool producer_plugin_impl::maybe_produce_block() { }); try { - produce_block(); - return true; - } catch ( const guard_exception& e ) { - app().get_plugin().handle_guard_exception(e); - return false; - } catch ( boost::interprocess::bad_alloc& ) { + try { + produce_block(); + return true; + } catch ( const guard_exception& e ) { + app().get_plugin().handle_guard_exception(e); + return false; + } FC_LOG_AND_DROP(); + } catch ( boost::interprocess::bad_alloc&) { raise(SIGUSR1); return false; - } FC_LOG_AND_DROP(); + } fc_dlog(_log, "Aborting block due to produce_block error"); chain::controller& chain = app().get_plugin().chain(); From b8dc1a2f9677c472e524d126d17ed054fb0494d2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Oct 2018 11:50:21 -0500 Subject: [PATCH 106/161] Add optional except to action_trace --- libraries/chain/include/eosio/chain/trace.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index ad02baf5bac..516d50b673a 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -37,6 +37,7 @@ namespace eosio { namespace chain { block_timestamp_type block_time; fc::optional producer_block_id; flat_set account_ram_deltas; + fc::optional except; }; struct action_trace : public base_action_trace { @@ -71,7 +72,7 @@ FC_REFLECT( eosio::chain::account_delta, FC_REFLECT( eosio::chain::base_action_trace, (receipt)(act)(context_free)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) - (block_num)(block_time)(producer_block_id)(account_ram_deltas) ) + (block_num)(block_time)(producer_block_id)(account_ram_deltas)(except) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, (eosio::chain::base_action_trace), (inline_traces) ) From 8245a5170f3c7aed187ab92f26f646a20e548892 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Oct 2018 11:52:02 -0500 Subject: [PATCH 107/161] Fill out action_trace and action_receipt on action execution even when action throws exception. This provides action information even on hard_fail which previously generated a default constructed action_trace. --- libraries/chain/apply_context.cpp | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 9b92b8d2a1f..a4e4805e985 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -33,13 +33,14 @@ action_trace apply_context::exec_one() { auto start = fc::time_point::now(); + fc::optional except; const auto& cfg = control.get_global_properties().configuration; try { const auto& a = control.get_account( receiver ); privileged = a.privileged; auto native = control.find_apply_handler( receiver, act.account, act.name ); if( native ) { - if( trx_context.can_subjectively_fail && control.is_producing_block()) { + if( trx_context.can_subjectively_fail && control.is_producing_block() ) { control.check_contract_list( receiver ); control.check_action_list( act.account, act.name ); } @@ -48,8 +49,8 @@ action_trace apply_context::exec_one() if( a.code.size() > 0 && !(act.account == config::system_account_name && act.name == N( setcode ) && - receiver == config::system_account_name)) { - if( trx_context.can_subjectively_fail && control.is_producing_block()) { + receiver == config::system_account_name) ) { + if( trx_context.can_subjectively_fail && control.is_producing_block() ) { control.check_contract_list( receiver ); control.check_action_list( act.account, act.name ); } @@ -57,8 +58,22 @@ action_trace apply_context::exec_one() control.get_wasm_interface().apply( a.code_version, a.code, *this ); } catch( const wasm_exit& ) {} } - - } FC_RETHROW_EXCEPTIONS(warn, "pending console output: ${console}", ("console", _pending_console_output.str())) + } catch( const boost::interprocess::bad_alloc& ) { + throw; + } catch( fc::exception& e ) { + e.append_log( FC_LOG_MESSAGE( warn, "pending console output: ${console}", ("console", _pending_console_output.str()) ) ); + except.emplace( e ); + } catch( std::exception& e ) { + fc::exception fce( FC_LOG_MESSAGE( warn, "${what}: pending console output: ${console}", + ("what", e.what())("console", _pending_console_output.str()) ), + fc::std_exception_code, BOOST_CORE_TYPEID( e ).name(), e.what() ); + except.emplace( fce ); + } catch( ... ) { + fc::unhandled_exception fce( FC_LOG_MESSAGE( warn, "unknown exception: pending console output: ${console}" , + ("console", _pending_console_output.str()) ), + std::current_exception() ); + except.emplace( fce ); + } action_receipt r; r.receiver = receiver; @@ -84,6 +99,7 @@ action_trace apply_context::exec_one() t.act = act; t.context_free = context_free; t.console = _pending_console_output.str(); + t.except = std::move( except ); trx_context.executed.emplace_back( move(r) ); @@ -94,6 +110,7 @@ action_trace apply_context::exec_one() reset_console(); t.elapsed = fc::time_point::now() - start; + return t; } @@ -101,9 +118,13 @@ void apply_context::exec() { _notified.push_back(receiver); trace = exec_one(); + if( trace.except.valid() ) + throw *trace.except; for( uint32_t i = 1; i < _notified.size(); ++i ) { receiver = _notified[i]; trace.inline_traces.emplace_back( exec_one() ); + if( trace.inline_traces.back().except.valid() ) + throw *trace.inline_traces.back().except; } if( _cfa_inline_actions.size() > 0 || _inline_actions.size() > 0 ) { From d7a6ed7bf0afad845626d6e870ff38924bbd8c90 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 10 Oct 2018 14:42:11 -0400 Subject: [PATCH 108/161] fix `block_log::repair_log` which was missing the new separating totem between header info and blocks --- libraries/chain/block_log.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 9f86b0e7de5..334d6ec57cf 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -421,6 +421,18 @@ namespace eosio { namespace chain { auto data = fc::raw::pack( gs ); new_block_stream.write( data.data(), data.size() ); + if (version != 1) { + auto expected_totem = npos; + std::decay_t actual_totem; + old_block_stream.read ( (char*)&actual_totem, sizeof(actual_totem) ); + + EOS_ASSERT(actual_totem == expected_totem, block_log_exception, + "Expected separator between block log header and blocks was not found( expected: ${e}, actual: ${a} )", + ("e", fc::to_hex((char*)&expected_totem, sizeof(expected_totem) ))("a", fc::to_hex((char*)&actual_totem, sizeof(actual_totem) ))); + + new_block_stream.write( (char*)&actual_totem, sizeof(actual_totem) ); + } + std::exception_ptr except_ptr; vector incomplete_block_data; optional bad_block; From e6cecb92cad64a88192443b5e76e3aeb5b27af35 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Oct 2018 15:32:16 -0500 Subject: [PATCH 109/161] Pass action_trace to exec() --- libraries/chain/transaction_context.cpp | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 806ce5d3f8e..fd79a91703c 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -429,14 +429,7 @@ namespace eosio { namespace chain { acontext.context_free = context_free; acontext.receiver = receiver; - try { - acontext.exec(); - } catch( ... ) { - trace = move(acontext.trace); - throw; - } - - trace = move(acontext.trace); + acontext.exec( trace ); } void transaction_context::schedule_transaction() { From a9c128eef00ef0ab1d5a2fbdad94442f1bf71fc8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Oct 2018 15:33:24 -0500 Subject: [PATCH 110/161] Pass in action_trace instead of attribute of class --- libraries/chain/include/eosio/chain/apply_context.hpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 70fb198b1e7..a253d950358 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -472,8 +472,8 @@ class apply_context { /// Execution methods: public: - action_trace exec_one(); - void exec(); + void exec_one( action_trace& trace ); + void exec( action_trace& trace ); void execute_inline( action&& a ); void execute_context_free_inline( action&& a ); void schedule_deferred_transaction( const uint128_t& sender_id, account_name payer, transaction&& trx, bool replace_existing ); @@ -573,6 +573,7 @@ class apply_context { uint64_t next_auth_sequence( account_name actor ); void add_ram_usage( account_name account, int64_t ram_delta ); + void finalize_trace( action_trace& trace, const fc::time_point& start ); private: @@ -600,8 +601,6 @@ class apply_context { generic_index idx_double; generic_index idx_long_double; - action_trace trace; - private: iterator_cache keyval_cache; From 21763e31c023328ec55979de3ab66591eed771bf Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 10 Oct 2018 15:35:29 -0500 Subject: [PATCH 111/161] Revert back to using FC_RETHROW_EXCEPTIONS macro. Preset action_receipt and action_trace with known values. Add finalize_trace method to set trace values after execution. --- libraries/chain/apply_context.cpp | 121 ++++++++++++++---------------- 1 file changed, 56 insertions(+), 65 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index a4e4805e985..4fc3b00c795 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -29,52 +29,10 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { } } -action_trace apply_context::exec_one() +void apply_context::exec_one( action_trace& trace ) { auto start = fc::time_point::now(); - fc::optional except; - const auto& cfg = control.get_global_properties().configuration; - try { - const auto& a = control.get_account( receiver ); - privileged = a.privileged; - auto native = control.find_apply_handler( receiver, act.account, act.name ); - if( native ) { - if( trx_context.can_subjectively_fail && control.is_producing_block() ) { - control.check_contract_list( receiver ); - control.check_action_list( act.account, act.name ); - } - (*native)( *this ); - } - - if( a.code.size() > 0 - && !(act.account == config::system_account_name && act.name == N( setcode ) && - receiver == config::system_account_name) ) { - if( trx_context.can_subjectively_fail && control.is_producing_block() ) { - control.check_contract_list( receiver ); - control.check_action_list( act.account, act.name ); - } - try { - control.get_wasm_interface().apply( a.code_version, a.code, *this ); - } catch( const wasm_exit& ) {} - } - } catch( const boost::interprocess::bad_alloc& ) { - throw; - } catch( fc::exception& e ) { - e.append_log( FC_LOG_MESSAGE( warn, "pending console output: ${console}", ("console", _pending_console_output.str()) ) ); - except.emplace( e ); - } catch( std::exception& e ) { - fc::exception fce( FC_LOG_MESSAGE( warn, "${what}: pending console output: ${console}", - ("what", e.what())("console", _pending_console_output.str()) ), - fc::std_exception_code, BOOST_CORE_TYPEID( e ).name(), e.what() ); - except.emplace( fce ); - } catch( ... ) { - fc::unhandled_exception fce( FC_LOG_MESSAGE( warn, "unknown exception: pending console output: ${console}" , - ("console", _pending_console_output.str()) ), - std::current_exception() ); - except.emplace( fce ); - } - action_receipt r; r.receiver = receiver; r.act_digest = digest_type::hash(act); @@ -89,42 +47,75 @@ action_trace apply_context::exec_one() r.auth_sequence[auth.actor] = next_auth_sequence( auth.actor ); } - action_trace t(r); - t.trx_id = trx_context.id; - t.block_num = control.pending_block_state()->block_num; - t.block_time = control.pending_block_time(); - t.producer_block_id = control.pending_producer_block_id(); - t.account_ram_deltas = std::move( _account_ram_deltas ); - _account_ram_deltas.clear(); - t.act = act; - t.context_free = context_free; - t.console = _pending_console_output.str(); - t.except = std::move( except ); + trace = {r}; // reset action_trace + + trace.trx_id = trx_context.id; + trace.block_num = control.pending_block_state()->block_num; + trace.block_time = control.pending_block_time(); + trace.producer_block_id = control.pending_producer_block_id(); + trace.act = act; + trace.context_free = context_free; + + const auto& cfg = control.get_global_properties().configuration; + try { + try { + const auto& a = control.get_account( receiver ); + privileged = a.privileged; + auto native = control.find_apply_handler( receiver, act.account, act.name ); + if( native ) { + if( trx_context.can_subjectively_fail && control.is_producing_block() ) { + control.check_contract_list( receiver ); + control.check_action_list( act.account, act.name ); + } + (*native)( *this ); + } + + if( a.code.size() > 0 + && !(act.account == config::system_account_name && act.name == N( setcode ) && + receiver == config::system_account_name) ) { + if( trx_context.can_subjectively_fail && control.is_producing_block() ) { + control.check_contract_list( receiver ); + control.check_action_list( act.account, act.name ); + } + try { + control.get_wasm_interface().apply( a.code_version, a.code, *this ); + } catch( const wasm_exit& ) {} + } + } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output.str()) ) + } catch( fc::exception& e ) { + trace.except = e; + finalize_trace( trace, start ); + throw; + } trx_context.executed.emplace_back( move(r) ); if ( control.contracts_console() ) { - print_debug(receiver, t); + print_debug(receiver, trace); } - reset_console(); + finalize_trace( trace, start ); +} - t.elapsed = fc::time_point::now() - start; +void apply_context::finalize_trace( action_trace& trace, const fc::time_point& start ) +{ + trace.account_ram_deltas = std::move( _account_ram_deltas ); + _account_ram_deltas.clear(); + + trace.console = _pending_console_output.str(); + reset_console(); - return t; + trace.elapsed = fc::time_point::now() - start; } -void apply_context::exec() +void apply_context::exec( action_trace& trace ) { _notified.push_back(receiver); - trace = exec_one(); - if( trace.except.valid() ) - throw *trace.except; + exec_one( trace ); for( uint32_t i = 1; i < _notified.size(); ++i ) { receiver = _notified[i]; - trace.inline_traces.emplace_back( exec_one() ); - if( trace.inline_traces.back().except.valid() ) - throw *trace.inline_traces.back().except; + trace.inline_traces.emplace_back( ); + exec_one( trace.inline_traces.back() ); } if( _cfa_inline_actions.size() > 0 || _inline_actions.size() > 0 ) { From 730f25320bbb352d0cc115bd105d920d4f607db0 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 10 Oct 2018 17:49:29 -0400 Subject: [PATCH 112/161] add RPC in producer_plugin_api to trigger the creation of a snapshot or return the integrity hash over the database --- .../chain/include/eosio/chain/exceptions.hpp | 4 ++ .../producer_api_plugin.cpp | 4 ++ .../eosio/producer_plugin/producer_plugin.hpp | 17 ++++- plugins/producer_plugin/producer_plugin.cpp | 69 +++++++++++++++++++ 4 files changed, 92 insertions(+), 2 deletions(-) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index b0eac36d465..736229a24ad 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -456,6 +456,10 @@ namespace eosio { namespace chain { 3170004, "Producer schedule exception" ) FC_DECLARE_DERIVED_EXCEPTION( producer_not_in_schedule, producer_exception, 3170006, "The producer is not part of current schedule" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_directory_not_found_exception, producer_exception, + 3170007, "The configured snapshot directory does not exist" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_exists_exception, producer_exception, + 3170008, "The requested snapshot already exists" ) FC_DECLARE_DERIVED_EXCEPTION( reversible_blocks_exception, chain_exception, 3180000, "Reversible Blocks exception" ) diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index f414d2c3924..13599d5f834 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -86,6 +86,10 @@ void producer_api_plugin::plugin_startup() { INVOKE_R_V(producer, get_whitelist_blacklist), 201), CALL(producer, producer, set_whitelist_blacklist, INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), + CALL(producer, producer, get_integrity_hash, + INVOKE_R_V(producer, get_integrity_hash), 201), + CALL(producer, producer, create_snapshot, + INVOKE_R_V(producer, create_snapshot), 201), }); } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 5697823d4f7..f2e50e92849 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -40,6 +40,16 @@ class producer_plugin : public appbase::plugin { std::vector accounts; }; + struct integrity_hash_information { + chain::block_id_type head_block_id; + chain::digest_type integrity_hash; + }; + + struct snapshot_information { + chain::block_id_type head_block_id; + std::string snapshot_name; + }; + producer_plugin(); virtual ~producer_plugin(); @@ -67,7 +77,9 @@ class producer_plugin : public appbase::plugin { whitelist_blacklist get_whitelist_blacklist() const; void set_whitelist_blacklist(const whitelist_blacklist& params); - + + integrity_hash_information get_integrity_hash() const; + snapshot_information create_snapshot() const; signal confirmed_block; private: @@ -79,5 +91,6 @@ class producer_plugin : public appbase::plugin { FC_REFLECT(eosio::producer_plugin::runtime_options, (max_transaction_time)(max_irreversible_block_age)(produce_time_offset_us)(last_block_time_offset_us)(subjective_cpu_leeway_us)(incoming_defer_ratio)); FC_REFLECT(eosio::producer_plugin::greylist_params, (accounts)); FC_REFLECT(eosio::producer_plugin::whitelist_blacklist, (actor_whitelist)(actor_blacklist)(contract_whitelist)(contract_blacklist)(action_blacklist)(key_blacklist) ) - +FC_REFLECT(eosio::producer_plugin::integrity_hash_information, (head_block_id)(integrity_hash)) +FC_REFLECT(eosio::producer_plugin::snapshot_information, (head_block_id)(snapshot_name)) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 85f1d4615e7..f2e8496044a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -173,6 +174,10 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.timestamp <= _last_signed_block_time ) return; if( bsp->header.timestamp <= _start_time ) return; @@ -519,6 +524,8 @@ void producer_plugin::set_program_options( "offset of last block producing time in microseconds. Negative number results in blocks to go out sooner, and positive number results in blocks to go out later") ("incoming-defer-ratio", bpo::value()->default_value(1.0), "ratio between incoming transations and deferred transactions when both are exhausted") + ("snapshots-dir", bpo::value()->default_value("snapshots"), + "the location of the snapshots directory (absolute path or relative to application data dir)") ; config_file_options.add(producer_options); } @@ -647,6 +654,21 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_incoming_defer_ratio = options.at("incoming-defer-ratio").as(); + if( options.count( "snapshots-dir" )) { + auto sd = options.at( "snapshots-dir" ).as(); + if( sd.is_relative()) { + my->_snapshots_dir = app().data_dir() / sd; + if (!fc::exists(my->_snapshots_dir)) { + fc::create_directories(my->_snapshots_dir); + } + } else { + my->_snapshots_dir = sd; + } + + EOS_ASSERT( fc::is_directory(my->_snapshots_dir), snapshot_directory_not_found_exception, + "No such directory '${dir}'", ("dir", my->_snapshots_dir.generic_string()) ); + } + my->_incoming_block_subscription = app().get_channel().subscribe([this](const signed_block_ptr& block){ try { my->on_incoming_block(block); @@ -849,6 +871,53 @@ void producer_plugin::set_whitelist_blacklist(const producer_plugin::whitelist_b if(params.key_blacklist.valid()) chain.set_key_blacklist(*params.key_blacklist); } +producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash() const { + chain::controller& chain = app().get_plugin().chain(); + + auto reschedule = fc::make_scoped_exit([this](){ + my->schedule_production_loop(); + }); + + if (chain.pending_block_state()) { + // abort the pending block + chain.abort_block(); + } else { + reschedule.cancel(); + } + + return {chain.head_block_id(), chain.calculate_integrity_hash()}; +} + +producer_plugin::snapshot_information producer_plugin::create_snapshot() const { + chain::controller& chain = app().get_plugin().chain(); + + auto reschedule = fc::make_scoped_exit([this](){ + my->schedule_production_loop(); + }); + + if (chain.pending_block_state()) { + // abort the pending block + chain.abort_block(); + } else { + reschedule.cancel(); + } + + auto head_id = chain.head_block_id(); + std::string snapshot_path = (my->_snapshots_dir / fc::format_string("snapshot-${id}.bin", fc::mutable_variant_object()("id", head_id))).generic_string(); + + EOS_ASSERT( !fc::is_regular_file(snapshot_path), snapshot_exists_exception, + "snapshot named ${name} already exists", ("name", snapshot_path)); + + + auto snap_out = std::ofstream(snapshot_path, (std::ios::out | std::ios::binary | std::ios::app)); + auto writer = std::make_shared(snap_out); + chain.write_snapshot(writer); + writer->finalize(); + snap_out.flush(); + snap_out.close(); + + return {head_id, snapshot_path}; +} optional producer_plugin_impl::calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const { chain::controller& chain = app().get_plugin().chain(); From d0fb3802edb8891a8081421816f63e590aecae21 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 10 Oct 2018 18:45:39 -0400 Subject: [PATCH 113/161] add genesis information to snapshot, add snapshot option to initialize a node from a snapshot --- libraries/chain/controller.cpp | 4 + plugins/chain_plugin/chain_plugin.cpp | 104 ++++++++++++++------ plugins/producer_plugin/producer_plugin.cpp | 2 +- 3 files changed, 78 insertions(+), 32 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 36bb0b8caec..17f7036f94f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -425,6 +425,10 @@ struct controller_impl { } void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { + snapshot->write_section([this]( auto §ion ){ + section.add_row(conf.genesis); + }); + snapshot->write_section([this]( auto §ion ){ section.template add_row(*fork_db.head()); }); diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index a9d4ed06392..b3c06aa7cfc 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -166,6 +167,7 @@ class chain_plugin_impl { //txn_msg_rate_limits rate_limits; fc::optional wasm_runtime; fc::microseconds abi_serializer_max_time_ms; + fc::optional snapshot_path; // retained references to channels for easy publication @@ -284,6 +286,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("export-reversible-blocks", bpo::value(), "export reversible block database in portable format into specified file and then exit") ("trusted-producer", bpo::value>()->composing(), "Indicate a producer whose blocks headers signed by it will be fully validated, but transactions in those validated blocks will be trusted.") + ("snapshot", bpo::value(), "File to read Snapshot State from") ; } @@ -530,44 +533,76 @@ void chain_plugin::plugin_initialize(const variables_map& options) { wlog("The --import-reversible-blocks option should be used by itself."); } - if( options.count( "genesis-json" )) { - EOS_ASSERT( !fc::exists( my->blocks_dir / "blocks.log" ), - plugin_config_exception, - "Genesis state can only be set on a fresh blockchain." ); - - auto genesis_file = options.at( "genesis-json" ).as(); - if( genesis_file.is_relative()) { - genesis_file = bfs::current_path() / genesis_file; + if (options.count( "snapshot" )) { + my->snapshot_path = options.at( "snapshot" ).as(); + EOS_ASSERT( fc::exists(*my->snapshot_path), plugin_config_exception, + "Cannot load snapshot, ${name} does not exist", ("name", my->snapshot_path->generic_string()) ); + + // recover genesis information from the snapshot + auto infile = std::ifstream(my->snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); + auto reader = std::make_shared(infile); + reader->validate(); + reader->read_section([this]( auto §ion ){ + section.read_row(my->chain_config->genesis); + }); + infile.close(); + + EOS_ASSERT( options.count( "genesis-json" ) == 0 && options.count( "genesis-timestamp" ) == 0, + plugin_config_exception, + "--snapshot is incompatible with --genesis-json and --genesis-timestamp as the snapshot contains genesis information"); + + auto shared_mem_path = my->chain_config->state_dir / "shared_memory.bin"; + EOS_ASSERT( !fc::exists(shared_mem_path), + plugin_config_exception, + "Snapshot can only be used to initialize an empty database." ); + + if( fc::is_regular_file( my->blocks_dir / "blocks.log" )) { + auto log_genesis = block_log::extract_genesis_state(my->blocks_dir); + EOS_ASSERT( log_genesis.compute_chain_id() == my->chain_config->genesis.compute_chain_id(), + plugin_config_exception, + "Genesis information in blocks.log does not match genesis information in the snapshot"); } - EOS_ASSERT( fc::is_regular_file( genesis_file ), - plugin_config_exception, - "Specified genesis file '${genesis}' does not exist.", - ("genesis", genesis_file.generic_string())); + } else { + if( options.count( "genesis-json" )) { + EOS_ASSERT( !fc::exists( my->blocks_dir / "blocks.log" ), + plugin_config_exception, + "Genesis state can only be set on a fresh blockchain." ); + + auto genesis_file = options.at( "genesis-json" ).as(); + if( genesis_file.is_relative()) { + genesis_file = bfs::current_path() / genesis_file; + } + + EOS_ASSERT( fc::is_regular_file( genesis_file ), + plugin_config_exception, + "Specified genesis file '${genesis}' does not exist.", + ("genesis", genesis_file.generic_string())); - my->chain_config->genesis = fc::json::from_file( genesis_file ).as(); + my->chain_config->genesis = fc::json::from_file( genesis_file ).as(); - ilog( "Using genesis state provided in '${genesis}'", ("genesis", genesis_file.generic_string())); + ilog( "Using genesis state provided in '${genesis}'", ("genesis", genesis_file.generic_string())); - if( options.count( "genesis-timestamp" )) { - my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( - options.at( "genesis-timestamp" ).as()); - } + if( options.count( "genesis-timestamp" )) { + my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( + options.at( "genesis-timestamp" ).as()); + } - wlog( "Starting up fresh blockchain with provided genesis state." ); - } else if( options.count( "genesis-timestamp" )) { - EOS_ASSERT( !fc::exists( my->blocks_dir / "blocks.log" ), - plugin_config_exception, - "Genesis state can only be set on a fresh blockchain." ); + wlog( "Starting up fresh blockchain with provided genesis state." ); + } else if( options.count( "genesis-timestamp" )) { + EOS_ASSERT( !fc::exists( my->blocks_dir / "blocks.log" ), + plugin_config_exception, + "Genesis state can only be set on a fresh blockchain." ); - my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( - options.at( "genesis-timestamp" ).as()); + my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( + options.at( "genesis-timestamp" ).as()); - wlog( "Starting up fresh blockchain with default genesis state but with adjusted genesis timestamp." ); - } else if( fc::is_regular_file( my->blocks_dir / "blocks.log" )) { - my->chain_config->genesis = block_log::extract_genesis_state( my->blocks_dir ); - } else { - wlog( "Starting up fresh blockchain with default genesis state." ); + wlog( "Starting up fresh blockchain with default genesis state but with adjusted genesis timestamp." ); + } else if( fc::is_regular_file( my->blocks_dir / "blocks.log" )) { + my->chain_config->genesis = block_log::extract_genesis_state( my->blocks_dir ); + } else { + wlog( "Starting up fresh blockchain with default genesis state." ); + } } if ( options.count("read-mode") ) { @@ -652,7 +687,14 @@ void chain_plugin::plugin_initialize(const variables_map& options) { void chain_plugin::plugin_startup() { try { try { - my->chain->startup(); + if (my->snapshot_path) { + auto infile = std::ifstream(my->snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); + auto reader = std::make_shared(infile); + my->chain->startup(reader); + infile.close(); + } else { + my->chain->startup(); + } } catch (const database_guard_exception& e) { log_guard_exception(e); // make sure to properly close the db diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index f2e8496044a..f8fb3d63e2c 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -909,7 +909,7 @@ producer_plugin::snapshot_information producer_plugin::create_snapshot() const { "snapshot named ${name} already exists", ("name", snapshot_path)); - auto snap_out = std::ofstream(snapshot_path, (std::ios::out | std::ios::binary | std::ios::app)); + auto snap_out = std::ofstream(snapshot_path, (std::ios::out | std::ios::binary)); auto writer = std::make_shared(snap_out); chain.write_snapshot(writer); writer->finalize(); From 9ef0e1378a77de2e2e8d4018549276f5f2bed6fe Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Wed, 10 Oct 2018 19:39:02 -0400 Subject: [PATCH 114/161] initialize first_block_num for legacy blocks.log files --- libraries/chain/block_log.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 334d6ec57cf..4cdcba04b1e 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -147,6 +147,8 @@ namespace eosio { namespace chain { my->first_block_num = 0; my->block_stream.read( (char*)&my->first_block_num, sizeof(my->first_block_num) ); EOS_ASSERT(my->first_block_num > 0, block_log_exception, "Block log is malformed, first recorded block number is 0 but must be greater than or equal to 1"); + } else { + my->first_block_num = 1; } my->head = read_head(); From 0fec3cfc6b3d5c9a5b9356684a183615ddd04275 Mon Sep 17 00:00:00 2001 From: wuyahuang Date: Thu, 11 Oct 2018 12:36:34 +0800 Subject: [PATCH 115/161] change core symbol name length to 7(MEETONE) --- eosio_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eosio_build.sh b/eosio_build.sh index f24e80d4600..7056b01241f 100755 --- a/eosio_build.sh +++ b/eosio_build.sh @@ -85,7 +85,7 @@ DOXYGEN=true ;; s) - if [ "${#OPTARG}" -gt 6 ] || [ -z "${#OPTARG}" ]; then + if [ "${#OPTARG}" -gt 7 ] || [ -z "${#OPTARG}" ]; then printf "\\n\\tInvalid argument: %s\\n" "${OPTARG}" 1>&2 usage exit 1 From 7fb374cad3700a729677b7bf22b0f0b154a87dc7 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Thu, 11 Oct 2018 09:42:43 -0400 Subject: [PATCH 116/161] validate that the block log matches or exceeds the snapshot, track the irreversible block implied by the snapshot --- libraries/chain/controller.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 17f7036f94f..a247ff74144 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -128,6 +128,7 @@ struct controller_impl { bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped optional subjective_cpu_leeway; bool trusted_producer_light_validation = false; + uint32_t snapshot_head_block = 0; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -341,8 +342,10 @@ struct controller_impl { blog.reset(conf.genesis, signed_block_ptr(), head->block_num + 1); } else if ( end->block_num() > head->block_num) { replay(); + } else { + EOS_ASSERT(end->block_num() == head->block_num, fork_database_exception, + "Block log is provided with snapshot but does not contain the head block from the snapshot"); } - } else if( !head ) { initialize_fork_db(); // set head to genesis state @@ -455,6 +458,7 @@ struct controller_impl { fork_db.set_validity(head_state, true); fork_db.mark_in_current_chain(head_state, true); head = head_state; + snapshot_head_block = head->block_num; }); controller_index_set::walk_indices([this, &snapshot]( auto utils ){ @@ -1652,7 +1656,7 @@ optional controller::pending_producer_block_id()const { } uint32_t controller::last_irreversible_block_num() const { - return std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum); + return std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum, my->snapshot_head_block); } block_id_type controller::last_irreversible_block_id() const { From e633866c22d357b8b4010eea4af4349754cb670c Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Thu, 11 Oct 2018 09:45:05 -0400 Subject: [PATCH 117/161] no variadic max duh --- libraries/chain/controller.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index a247ff74144..417b611bd26 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1656,7 +1656,7 @@ optional controller::pending_producer_block_id()const { } uint32_t controller::last_irreversible_block_num() const { - return std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum, my->snapshot_head_block); + return std::max(std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum), my->snapshot_head_block); } block_id_type controller::last_irreversible_block_id() const { From 13ce3dab7b6bfa9f7f0401e3d7d9ab43c74e0298 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Oct 2018 11:08:04 -0500 Subject: [PATCH 118/161] Move receipt info setting after execution --- libraries/chain/apply_context.cpp | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 4fc3b00c795..eb07b83e4c5 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -36,18 +36,6 @@ void apply_context::exec_one( action_trace& trace ) action_receipt r; r.receiver = receiver; r.act_digest = digest_type::hash(act); - r.global_sequence = next_global_sequence(); - r.recv_sequence = next_recv_sequence( receiver ); - - const auto& account_sequence = db.get(act.account); - r.code_sequence = account_sequence.code_sequence; - r.abi_sequence = account_sequence.abi_sequence; - - for( const auto& auth : act.authorization ) { - r.auth_sequence[auth.actor] = next_auth_sequence( auth.actor ); - } - - trace = {r}; // reset action_trace trace.trx_id = trx_context.id; trace.block_num = control.pending_block_state()->block_num; @@ -83,11 +71,25 @@ void apply_context::exec_one( action_trace& trace ) } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output.str()) ) } catch( fc::exception& e ) { + trace.receipt = r; // fill with known data trace.except = e; finalize_trace( trace, start ); throw; } + const auto& account_sequence = db.get(act.account); + r.code_sequence = account_sequence.code_sequence; // could be modified by action execution above + r.abi_sequence = account_sequence.abi_sequence; // could be modified by action execution above + + r.global_sequence = next_global_sequence(); + r.recv_sequence = next_recv_sequence( receiver ); + + for( const auto& auth : act.authorization ) { + r.auth_sequence[auth.actor] = next_auth_sequence( auth.actor ); + } + + trace.receipt = r; + trx_context.executed.emplace_back( move(r) ); if ( control.contracts_console() ) { From 7ae29cfeece5cb83ae7305cd7c28598327d5a7b7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Oct 2018 11:10:17 -0500 Subject: [PATCH 119/161] Move print_debug after trace finalize so it includes console output --- libraries/chain/apply_context.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index eb07b83e4c5..17b8e0558b5 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -92,11 +92,11 @@ void apply_context::exec_one( action_trace& trace ) trx_context.executed.emplace_back( move(r) ); + finalize_trace( trace, start ); + if ( control.contracts_console() ) { print_debug(receiver, trace); } - - finalize_trace( trace, start ); } void apply_context::finalize_trace( action_trace& trace, const fc::time_point& start ) From 77cde9c2d08e0c94872584cd42c419f18b1c86ab Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Oct 2018 11:39:10 -0500 Subject: [PATCH 120/161] Revert to same order of operations before these updates --- libraries/chain/apply_context.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 17b8e0558b5..de1450013d8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -77,13 +77,13 @@ void apply_context::exec_one( action_trace& trace ) throw; } + r.global_sequence = next_global_sequence(); + r.recv_sequence = next_recv_sequence( receiver ); + const auto& account_sequence = db.get(act.account); r.code_sequence = account_sequence.code_sequence; // could be modified by action execution above r.abi_sequence = account_sequence.abi_sequence; // could be modified by action execution above - r.global_sequence = next_global_sequence(); - r.recv_sequence = next_recv_sequence( receiver ); - for( const auto& auth : act.authorization ) { r.auth_sequence[auth.actor] = next_auth_sequence( auth.actor ); } From 7f382ca69ed3f94a4abe0548da9cad7e20b8c386 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Oct 2018 12:12:48 -0500 Subject: [PATCH 121/161] Remove unused attributes --- libraries/chain/include/eosio/chain/trace.hpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 516d50b673a..03750bd512e 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -28,10 +28,8 @@ namespace eosio { namespace chain { action act; bool context_free = false; fc::microseconds elapsed; - uint64_t cpu_usage = 0; string console; - uint64_t total_cpu_usage = 0; /// total of inline_traces[x].cpu_usage + cpu_usage transaction_id_type trx_id; ///< the transaction that generated this action uint32_t block_num = 0; block_timestamp_type block_time; @@ -71,7 +69,7 @@ FC_REFLECT( eosio::chain::account_delta, (account)(delta) ) FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(context_free)(elapsed)(cpu_usage)(console)(total_cpu_usage)(trx_id) + (receipt)(act)(context_free)(elapsed)(console)(trx_id) (block_num)(block_time)(producer_block_id)(account_ram_deltas)(except) ) FC_REFLECT_DERIVED( eosio::chain::action_trace, From 86f3dd61ad76e53644f06bbc06df84bdb3fd78ab Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Thu, 11 Oct 2018 14:08:39 -0400 Subject: [PATCH 122/161] work on fixing the consistency problems with foreign keys and tables optimized for undo --- libraries/chain/authorization_manager.cpp | 87 +++++++++++++++++-- libraries/chain/controller.cpp | 14 +-- .../eosio/chain/contract_table_objects.hpp | 29 ++++--- .../include/eosio/chain/database_utils.hpp | 11 +++ .../include/eosio/chain/permission_object.hpp | 16 +++- .../chain/include/eosio/chain/snapshot.hpp | 33 ++++--- libraries/chain/resource_limits.cpp | 8 +- 7 files changed, 151 insertions(+), 47 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 61f094d816e..e03783c8f9d 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -43,11 +43,79 @@ namespace eosio { namespace chain { }); } + namespace detail { + template<> + struct snapshot_row_traits { + using value_type = permission_object; + using snapshot_type = snapshot_permission_object; + + static snapshot_permission_object to_snapshot_row(const permission_object& value, const chainbase::database& db) { + snapshot_permission_object res; + res.name = value.name; + res.owner = value.owner; + res.last_updated = value.last_updated; + res.auth = value.auth.to_authority(); + + // lookup parent name + const auto& parent = db.get(value.parent); + res.parent = parent.name; + + // lookup the usage object + const auto& usage = db.get(value.usage_id); + res.last_used = usage.last_used; + + return res; + }; + + static void from_snapshot_row(snapshot_permission_object&& row, permission_object& value, chainbase::database& db) { + value.name = row.name; + value.owner = row.owner; + value.last_updated = row.last_updated; + value.auth = row.auth; + + value.parent = 0; + if (value.id == 0) { + EOS_ASSERT(row.parent == permission_name(), snapshot_exception, "Unexpected parent name on reserved permission 0"); + EOS_ASSERT(row.name == permission_name(), snapshot_exception, "Unexpected permission name on reserved permission 0"); + EOS_ASSERT(row.owner == name(), snapshot_exception, "Unexpected owner name on reserved permission 0"); + EOS_ASSERT(row.auth.accounts.size() == 0, snapshot_exception, "Unexpected auth accounts on reserved permission 0"); + EOS_ASSERT(row.auth.keys.size() == 0, snapshot_exception, "Unexpected auth keys on reserved permission 0"); + EOS_ASSERT(row.auth.waits.size() == 0, snapshot_exception, "Unexpected auth waits on reserved permission 0"); + EOS_ASSERT(row.auth.threshold == 0, snapshot_exception, "Unexpected auth threshold on reserved permission 0"); + EOS_ASSERT(row.last_updated == time_point(), snapshot_exception, "Unexpected auth last updated on reserved permission 0"); + value.parent = 0; + } else if ( row.parent != permission_name()){ + const auto& parent = db.get(boost::make_tuple(row.owner, row.parent)); + + EOS_ASSERT(parent.id != 0, snapshot_exception, "Unexpected mapping to reserved permission 0"); + value.parent = parent.id; + } + + if (value.id != 0) { + // create the usage object + const auto& usage = db.create([&](auto& p) { + p.last_used = row.last_used; + }); + value.usage_id = usage.id; + } else { + value.usage_id = 0; + } + } + }; + } + void authorization_manager::add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot->write_section([this]( auto& section ){ - decltype(utils)::walk(_db, [§ion]( const auto &row ) { - section.add_row(row); + using section_t = typename decltype(utils)::index_t::value_type; + + // skip the permission_usage_index as its inlined with permission_index + if (std::is_same::value) { + return; + } + + snapshot->write_section([this]( auto& section ){ + decltype(utils)::walk(_db, [this, §ion]( const auto &row ) { + section.add_row(row, _db); }); }); }); @@ -55,11 +123,18 @@ namespace eosio { namespace chain { void authorization_manager::read_from_snapshot( const snapshot_reader_ptr& snapshot ) { authorization_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot->read_section([this]( auto& section ) { + using section_t = typename decltype(utils)::index_t::value_type; + + // skip the permission_usage_index as its inlined with permission_index + if (std::is_same::value) { + return; + } + + snapshot->read_section([this]( auto& section ) { bool more = !section.empty(); while(more) { - decltype(utils)::create(_db, [§ion, &more]( auto &row ) { - more = section.read_row(row); + decltype(utils)::create(_db, [this, §ion, &more]( auto &row ) { + more = section.read_row(row, _db); }); } }); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 417b611bd26..e2d64f9dd11 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -429,17 +429,17 @@ struct controller_impl { void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { snapshot->write_section([this]( auto §ion ){ - section.add_row(conf.genesis); + section.add_row(conf.genesis, db); }); snapshot->write_section([this]( auto §ion ){ - section.template add_row(*fork_db.head()); + section.template add_row(*fork_db.head(), db); }); controller_index_set::walk_indices([this, &snapshot]( auto utils ){ snapshot->write_section([this]( auto& section ){ - decltype(utils)::walk(db, [§ion]( const auto &row ) { - section.add_row(row); + decltype(utils)::walk(db, [this, §ion]( const auto &row ) { + section.add_row(row, db); }); }); }); @@ -451,7 +451,7 @@ struct controller_impl { void read_from_snapshot( const snapshot_reader_ptr& snapshot ) { snapshot->read_section([this]( auto §ion ){ block_header_state head_header_state; - section.read_row(head_header_state); + section.read_row(head_header_state, db); auto head_state = std::make_shared(head_header_state); fork_db.set(head_state); @@ -465,8 +465,8 @@ struct controller_impl { snapshot->read_section([this]( auto& section ) { bool more = !section.empty(); while(more) { - decltype(utils)::create(db, [§ion, &more]( auto &row ) { - more = section.read_row(row); + decltype(utils)::create(db, [this, §ion, &more]( auto &row ) { + more = section.read_row(row, db); }); } }); diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index 0555c28d3ac..0e1ca61e37e 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -79,6 +79,13 @@ namespace eosio { namespace chain { > >; + struct snapshot_key_value_object { + uint64_t table_ordinal; + uint64_t primary_key; + account_name payer; + fc::blob value; + }; + struct by_primary; struct by_secondary; @@ -130,6 +137,15 @@ namespace eosio { namespace chain { typedef secondary_index::index_object index256_object; typedef secondary_index::index_index index256_index; + template + struct snapshot_secondary_index + { + uint64_t table_ordinal; + uint64_t primary_key; + account_name payer; + SecondaryKey secondary_key; + }; + struct soft_double_less { bool operator()( const float64_t& lhs, const float64_t& rhs )const { return f64_lt(lhs, rhs); @@ -214,16 +230,5 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::index256_object, eosio::chain::index256_i CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_double_object, eosio::chain::index_double_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_long_double_object, eosio::chain::index_long_double_index) -FC_REFLECT(eosio::chain::table_id_object, (id)(code)(scope)(table) ) -FC_REFLECT(eosio::chain::key_value_object, (t_id)(primary_key)(value)(payer) ) - -#define REFLECT_SECONDARY(type)\ - FC_REFLECT(type, (t_id)(primary_key)(payer)(secondary_key) ) - -REFLECT_SECONDARY(eosio::chain::index64_object) -REFLECT_SECONDARY(eosio::chain::index128_object) -REFLECT_SECONDARY(eosio::chain::index256_object) -REFLECT_SECONDARY(eosio::chain::index_double_object) -REFLECT_SECONDARY(eosio::chain::index_long_double_object) - +FC_REFLECT(eosio::chain::table_id_object, (code)(scope)(table) ) diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index c3a1bddfd0e..07ca00130a8 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -131,6 +131,17 @@ namespace fc { b = eosio::chain::shared_blob(_s.begin(), _s.end(), b.get_allocator()); } + inline + void to_variant( const blob& b, variant& v ) { + v = variant(base64_encode(b.data.data(), b.data.size())); + } + + inline + void from_variant( const variant& v, blob& b ) { + string _s = base64_decode(v.as_string()); + b.data = std::vector(_s.begin(), _s.end()); + } + template void to_variant( const eosio::chain::shared_vector& sv, variant& v ) { to_variant(std::vector(sv.begin(), sv.end()), v); diff --git a/libraries/chain/include/eosio/chain/permission_object.hpp b/libraries/chain/include/eosio/chain/permission_object.hpp index 81a0f1efe27..ee43f0e52a7 100644 --- a/libraries/chain/include/eosio/chain/permission_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_object.hpp @@ -71,6 +71,19 @@ namespace eosio { namespace chain { } }; + /** + * special cased to abstract the foreign keys for usage and the optimization of using OID for the parent + */ + struct snapshot_permission_object { + permission_name parent; ///< parent permission + account_name owner; ///< the account this permission belongs to + permission_name name; ///< human-readable name for the permission + time_point last_updated; ///< the last time this authority was updated + time_point last_used; ///< when this permission was last used + authority auth; ///< authority required to execute this permission + }; + + struct by_parent; struct by_owner; struct by_name; @@ -112,5 +125,6 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_object, eosio::chain::permissi CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_usage_object, eosio::chain::permission_usage_index) FC_REFLECT(eosio::chain::permission_object, (usage_id)(parent)(owner)(name)(last_updated)(auth)) +FC_REFLECT(eosio::chain::snapshot_permission_object, (parent)(owner)(name)(last_updated)(last_used)(auth)) -FC_REFLECT(eosio::chain::permission_usage_object, (id)(last_used)) +FC_REFLECT(eosio::chain::permission_usage_object, (last_used)) diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 25a07312828..cc70886147b 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -26,20 +26,14 @@ namespace eosio { namespace chain { template struct snapshot_row_traits { - using row_type = std::decay_t; - using value_type = const row_type&; - }; + using value_type = std::decay_t; + using snapshot_type = value_type; - template - auto to_snapshot_row( const T& value ) -> typename snapshot_row_traits::value_type { - return value; + static const snapshot_type& to_snapshot_row( const value_type& value, const chainbase::database& ) { + return value; + }; }; - template - auto from_snapshot_row( typename snapshot_row_traits::value_type&& row, T& value ) { - value = row; - } - /** * Due to a pattern in our code of overloading `operator << ( std::ostream&, ... )` to provide * human-readable string forms of data, we cannot directly use ostream as those operators will @@ -116,8 +110,8 @@ namespace eosio { namespace chain { class section_writer { public: template - void add_row( const T& row ) { - _writer.write_row(detail::make_row_writer(detail::to_snapshot_row(row))); + void add_row( const T& row, const chainbase::database& db ) { + _writer.write_row(detail::make_row_writer(detail::snapshot_row_traits::to_snapshot_row(row, db))); } private: @@ -186,17 +180,22 @@ namespace eosio { namespace chain { class section_reader { public: template - auto read_row( T& out ) -> std::enable_if_t, typename detail::snapshot_row_traits::row_type>::value,bool> { + auto read_row( T& out ) -> std::enable_if_t, typename detail::snapshot_row_traits::snapshot_type>::value,bool> { auto reader = detail::make_row_reader(out); return _reader.read_row(reader); } template - auto read_row( T& out ) -> std::enable_if_t, typename detail::snapshot_row_traits::row_type>::value,bool> { - auto temp = typename detail::snapshot_row_traits::row_type(); + auto read_row( T& out, chainbase::database& ) -> std::enable_if_t, typename detail::snapshot_row_traits::snapshot_type>::value,bool> { + return read_row(out); + } + + template + auto read_row( T& out, chainbase::database& db ) -> std::enable_if_t, typename detail::snapshot_row_traits::snapshot_type>::value,bool> { + auto temp = typename detail::snapshot_row_traits::snapshot_type(); auto reader = detail::make_row_reader(temp); bool result = _reader.read_row(reader); - detail::from_snapshot_row(std::move(temp), out); + detail::snapshot_row_traits::from_snapshot_row(std::move(temp), out, db); return result; } diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 3bfaecb61e0..d090631b9c4 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -76,8 +76,8 @@ void resource_limits_manager::calculate_integrity_hash( fc::sha256::encoder& enc void resource_limits_manager::add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { resource_index_set::walk_indices([this, &snapshot]( auto utils ){ snapshot->write_section([this]( auto& section ){ - decltype(utils)::walk(_db, [§ion]( const auto &row ) { - section.add_row(row); + decltype(utils)::walk(_db, [this, §ion]( const auto &row ) { + section.add_row(row, _db); }); }); }); @@ -88,8 +88,8 @@ void resource_limits_manager::read_from_snapshot( const snapshot_reader_ptr& sna snapshot->read_section([this]( auto& section ) { bool more = !section.empty(); while(more) { - decltype(utils)::create(_db, [§ion, &more]( auto &row ) { - more = section.read_row(row); + decltype(utils)::create(_db, [this, §ion, &more]( auto &row ) { + more = section.read_row(row, _db); }); } }); From f6ba5ec9d33ddcac8f7bffbea4d3c719d1f04258 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Oct 2018 14:17:12 -0500 Subject: [PATCH 123/161] Apply filter to transaction updates --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 52 ++++++++++++--------- 1 file changed, 31 insertions(+), 21 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index a23eeda311f..d66cdfebf66 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -113,6 +113,7 @@ class mongo_db_plugin_impl { /// @return true if act should be added to mongodb, false to skip it bool filter_include( const account_name& receiver, const action_name& act_name, const vector& authorization ) const; + bool filter_include( const transaction& trx ) const; void init(); void wipe_database(); @@ -262,6 +263,30 @@ bool mongo_db_plugin_impl::filter_include( const account_name& receiver, const a return true; } +bool mongo_db_plugin_impl::filter_include( const transaction& trx ) const +{ + if( !filter_on_star || !filter_out.empty() ) { + bool include = false; + for( const auto& a : trx.actions ) { + if( filter_include( a.account, a.name, a.authorization ) ) { + include = true; + break; + } + } + if( !include ) { + for( const auto& a : trx.context_free_actions ) { + if( filter_include( a.account, a.name, a.authorization ) ) { + include = true; + break; + } + } + } + return include; + } + return true; +} + + template void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { boost::mutex::scoped_lock lock( mtx ); @@ -701,25 +726,8 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti const auto& trx = t->trx; - if( !filter_on_star || !filter_out.empty() ) { - bool include = false; - for( const auto& a : trx.actions ) { - if( filter_include( a.account, a.name, a.authorization ) ) { - include = true; - break; - } - } - if( !include ) { - for( const auto& a : trx.context_free_actions ) { - if( filter_include( a.account, a.name, a.authorization ) ) { - include = true; - break; - } - } - } - if( !include ) return; - } - + if( !filter_include( trx ) ) return; + auto trans_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -1070,7 +1078,9 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ const auto& pt = receipt.trx.get(); // get id via get_raw_transaction() as packed_transaction.id() mutates internal transaction state const auto& raw = pt.get_raw_transaction(); - const auto& id = fc::raw::unpack( raw ).id(); + const auto& trx = fc::raw::unpack( raw ); + if( !filter_include( trx ) ) continue; + const auto& id = trx.id(); trx_id_str = id.str(); } else { const auto& id = receipt.trx.get(); @@ -1083,7 +1093,7 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ kvp( "updatedAt", b_date{now} ) ) ) ); mongocxx::model::update_one update_op{make_document( kvp( "trx_id", trx_id_str ) ), update_doc.view()}; - update_op.upsert( true ); + update_op.upsert( false ); bulk.append( update_op ); transactions_in_block = true; } From 25c0dac0647118db86b14c6dda565825fbdb398f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Oct 2018 17:37:20 -0500 Subject: [PATCH 124/161] Update fc --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 29cd7df702e..418101dcc3e 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 29cd7df702e79954076461af0eadad2e9d745d44 +Subproject commit 418101dcc3e9512a61cca3004771f8145c9dfc68 From 6cb64db0224427f32f110e17352e046c73389528 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 11 Oct 2018 21:33:53 -0400 Subject: [PATCH 125/161] Move deadline timer impl to c++ file --- .../eosio/chain/transaction_context.hpp | 39 +++---------------- libraries/chain/transaction_context.cpp | 34 ++++++++++++++++ 2 files changed, 40 insertions(+), 33 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 572bbba275f..b69a00143e9 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -6,42 +6,15 @@ namespace eosio { namespace chain { struct deadline_timer { - deadline_timer() { - if(initialized) - return; - struct sigaction act; - act.sa_handler = timer_expired; - sigemptyset(&act.sa_mask); - act.sa_flags = 0; - sigaction(SIGALRM, &act, NULL); - initialized = true; - } - - void start(fc::time_point tp) { - microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); - if(x.count() < 18) - expired = 1; - else if(x.count() < 1000000) { - struct itimerval enable = {{0, 0}, {0, (int)x.count()-15}}; - expired = 0; - setitimer(ITIMER_REAL, &enable, NULL); - } - } - - void stop() { - struct itimerval disable = {{0, 0}, {0, 0}}; - setitimer(ITIMER_REAL, &disable, NULL); - } - - ~deadline_timer() { - stop(); - } + deadline_timer(); + ~deadline_timer(); + + void start(fc::time_point tp); + void stop(); static volatile sig_atomic_t expired; private: - static void timer_expired(int) { - expired = 1; - } + static void timer_expired(int); static bool initialized; }; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 6887ba1a2bc..e6277e164fa 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -9,6 +9,40 @@ namespace eosio { namespace chain { + deadline_timer::deadline_timer() { + if(initialized) + return; + struct sigaction act; + act.sa_handler = timer_expired; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + sigaction(SIGALRM, &act, NULL); + initialized = true; + } + + void deadline_timer::start(fc::time_point tp) { + microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); + if(x.count() < 18) + expired = 1; + else if(x.count() < 1000000) { + struct itimerval enable = {{0, 0}, {0, (int)x.count()-15}}; + expired = 0; + setitimer(ITIMER_REAL, &enable, NULL); + } + } + + void deadline_timer::stop() { + struct itimerval disable = {{0, 0}, {0, 0}}; + setitimer(ITIMER_REAL, &disable, NULL); + } + + deadline_timer::~deadline_timer() { + stop(); + } + + void deadline_timer::timer_expired(int) { + expired = 1; + } volatile sig_atomic_t deadline_timer::expired = 0; bool deadline_timer::initialized = false; From 599fe50a488d94ec6e7ac18b5a7e56ee3f67ce87 Mon Sep 17 00:00:00 2001 From: Zane Reynolds Date: Fri, 12 Oct 2018 13:43:23 -0400 Subject: [PATCH 126/161] for some reason the cwd changes during the llvm-cov/gcov runs, so switched to absolute path --- CMakeModules/EosioTesterBuild.cmake.in | 2 +- tests/CMakeLists.txt | 2 +- unittests/CMakeLists.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 2c650def39c..be26054cc57 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -173,7 +173,7 @@ if(ENABLE_COVERAGE_TESTING) # Run tests COMMAND ./tools/ctestwrapper.sh -R ${ctest_tests} -E ${ctest_exclude_tests} - COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ./tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info + COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ${CMAKE_SOURCE_DIR}/tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info COMMAND ${LCOV_PATH} -remove ${Coverage_NAME}.info '*/boost/*' '/usr/lib/*' '/usr/include/*' '*/externals/*' '*/fc/*' '*/wasm-jit/*' --output-file ${Coverage_NAME}_filtered.info diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index cc9eec4e538..80e17845781 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -128,7 +128,7 @@ if(ENABLE_COVERAGE_TESTING) COMMAND ctest -R ${ctest_tests} -E ${ctest_exclude_tests} COMMENT "Capturing lcov counters and generating report" - COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ./tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info + COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ${CMAKE_SOURCE_DIR}/tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info COMMENT "Processing code coverage counters and generating report." COMMAND ${GENHTML_PATH} -o ${Coverage_NAME} ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index c21c9597312..a57e1cb9ed7 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -68,7 +68,7 @@ if(ENABLE_COVERAGE_TESTING) # Run tests COMMAND ./tools/ctestwrapper.sh -R ${ctest_tests} -E ${ctest_exclude_tests} - COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ./tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info + COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ${CMAKE_SOURCE_DIR}/tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info COMMAND ${LCOV_PATH} -remove ${Coverage_NAME}.info '*/boost/*' '/usr/lib/*' '/usr/include/*' '*/externals/*' '*/fc/*' '*/wasm-jit/*' --output-file ${Coverage_NAME}_filtered.info From 4664c3a52fd91cbce4ea92beeb73071bca0e3b8a Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 4 Oct 2018 15:09:23 +0800 Subject: [PATCH 127/161] fix bad merge from release/1.3.x to develop: add back changes from #5920 --- libraries/chain/abi_serializer.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 93cc69fe5a3..b5f67e51059 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -119,7 +119,7 @@ namespace eosio { namespace chain { structs[st.name] = st; for( const auto& td : abi.types ) { - EOS_ASSERT(_is_type(td.type, ctx), invalid_type_inside_abi, "invalid type", ("type",td.type)); + EOS_ASSERT(_is_type(td.type, ctx), invalid_type_inside_abi, "invalid type ${type}", ("type",td.type)); EOS_ASSERT(!_is_type(td.new_type_name, ctx), duplicate_abi_type_def_exception, "type already exists", ("new_type_name",td.new_type_name)); typedefs[td.new_type_name] = td.type; } @@ -231,7 +231,7 @@ namespace eosio { namespace chain { } } FC_CAPTURE_AND_RETHROW( (t) ) } for( const auto& t : typedefs ) { try { - EOS_ASSERT(_is_type(t.second, ctx), invalid_type_inside_abi, "", ("type",t.second) ); + EOS_ASSERT(_is_type(t.second, ctx), invalid_type_inside_abi, "${type}", ("type",t.second) ); } FC_CAPTURE_AND_RETHROW( (t) ) } for( const auto& s : structs ) { try { if( s.second.base != type_name() ) { @@ -247,23 +247,23 @@ namespace eosio { namespace chain { } for( const auto& field : s.second.fields ) { try { ctx.check_deadline(); - EOS_ASSERT(_is_type(_remove_bin_extension(field.type), ctx), invalid_type_inside_abi, "", ("type",field.type) ); + EOS_ASSERT(_is_type(_remove_bin_extension(field.type), ctx), invalid_type_inside_abi, "${type}", ("type",field.type) ); } FC_CAPTURE_AND_RETHROW( (field) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& s : variants ) { try { for( const auto& type : s.second.types ) { try { ctx.check_deadline(); - EOS_ASSERT(_is_type(type, ctx), invalid_type_inside_abi, "", ("type",type) ); + EOS_ASSERT(_is_type(type, ctx), invalid_type_inside_abi, "${type}", ("type",type) ); } FC_CAPTURE_AND_RETHROW( (type) ) } } FC_CAPTURE_AND_RETHROW( (s) ) } for( const auto& a : actions ) { try { ctx.check_deadline(); - EOS_ASSERT(_is_type(a.second, ctx), invalid_type_inside_abi, "", ("type",a.second) ); + EOS_ASSERT(_is_type(a.second, ctx), invalid_type_inside_abi, "${type}", ("type",a.second) ); } FC_CAPTURE_AND_RETHROW( (a) ) } for( const auto& t : tables ) { try { ctx.check_deadline(); - EOS_ASSERT(_is_type(t.second, ctx), invalid_type_inside_abi, "", ("type",t.second) ); + EOS_ASSERT(_is_type(t.second, ctx), invalid_type_inside_abi, "${type}", ("type",t.second) ); } FC_CAPTURE_AND_RETHROW( (t) ) } } From 814ee95847c7396886d547107e46f7b132ca079c Mon Sep 17 00:00:00 2001 From: Zane Reynolds Date: Fri, 12 Oct 2018 13:47:35 -0400 Subject: [PATCH 128/161] added an undefined behavior sanitizer --- .buildkite/sanitizers.yml | 65 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 .buildkite/sanitizers.yml diff --git a/.buildkite/sanitizers.yml b/.buildkite/sanitizers.yml new file mode 100644 index 00000000000..492fb4e1e77 --- /dev/null +++ b/.buildkite/sanitizers.yml @@ -0,0 +1,65 @@ +steps: + - command: | + echo "--- :hammer: Building with Undefined Sanitizer" && \ + /usr/bin/cmake -GNinja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_CXX_COMPILER=clang++-4.0 \ + -DCMAKE_C_COMPILER=clang-4.0 \ + -DBOOST_ROOT="${BOOST_ROOT}" \ + -DWASM_ROOT="${WASM_ROOT}" \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ + -DBUILD_MONGO_DB_PLUGIN=true \ + -DENABLE_COVERAGE_TESTING=true\ + -DBUILD_DOXYGEN=false -DCMAKE_CXX_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ + -DCMAKE_C_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ + -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" \ + -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" && \ + echo "--- :shinto_shrine: Running ninja" && \ + /usr/bin/ninja | tee ninja.log && \ + echo "--- :compression: Compressing build directory" && \ + tar -pczf build.tar.gz * + echo "--- :beers: Done" + label: ":_: Undefined Sanitizer" + agents: + - "role=automation-builder-large" + artifact_paths: + - "build.tar.gz" + - "ninja.log" + plugins: + docker#v1.4.0: + image: "eosio/ci:ubuntu18" + command: ["--privileged"] + workdir: /data/job + mounts: + - /etc/buildkite-agent/config:/config + environment: + - BOOST_ROOT=/root/opt/boost + - OPENSSL_ROOT_DIR=/usr/include/openssl + - WASM_ROOT=/root/opt/wasm + - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin + - CI=true + timeout: 60 + + - wait + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":_: Undefined Sanitizer" && \ + tar -zxf build.tar.gz --no-same-owner && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + ctest -j8 -LE _tests -V -O sanitizer.log + label: ":_: Undefined Sanitizer Tests" + agents: + - "role=automation-builder-large" + artifact_paths: + - "mongod.log" + - "sanitizer.log" + plugins: + docker#v1.4.0: + image: "eosio/ci:ubuntu18" + workdir: /data/job + mounts: + - /etc/buildkite-agent/config:/config + timeout: 120 \ No newline at end of file From 17ada50536cf922b866299cfdebf29d71cf71087 Mon Sep 17 00:00:00 2001 From: Zane Reynolds Date: Fri, 12 Oct 2018 13:49:39 -0400 Subject: [PATCH 129/161] first take on a pull request template --- .github/PULL_REQUEST_TEMPLATE.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..8a745912d67 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,29 @@ + + + + +**Change Description** + + +- [ ] Bug fix +- [ ] User experience enhancement +- [ ] New functionality +- [ ] RPC +- [ ] Deprecation +- [ ] Refactoring + + + +**Consensus Changes** + + + + +**API Changes** + + + + +**Documentation Additions** + + From 39995658a9dbb42a5a96c8e2b3b93950df95d003 Mon Sep 17 00:00:00 2001 From: Zane Reynolds Date: Fri, 12 Oct 2018 13:55:38 -0400 Subject: [PATCH 130/161] added an env variable to help the udef sanitier to spit out line numbers --- .buildkite/sanitizers.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.buildkite/sanitizers.yml b/.buildkite/sanitizers.yml index 492fb4e1e77..db479d25697 100644 --- a/.buildkite/sanitizers.yml +++ b/.buildkite/sanitizers.yml @@ -38,6 +38,7 @@ steps: - WASM_ROOT=/root/opt/wasm - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - CI=true + - UBSAN_OPTIONS=print_stacktrace=1 timeout: 60 - wait From 2a52b7512a11dcc58e842bcdbb78731363d53a9e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 12 Oct 2018 14:34:52 -0500 Subject: [PATCH 131/161] Increase the number of producers and total nodes for the distributed transaction test. GH #5983 --- tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 80e17845781..a5675072108 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -62,7 +62,7 @@ if(BUILD_MONGO_DB_PLUGIN) set_property(TEST nodeos_run_test-mongodb PROPERTY LABELS nonparallelizable_tests) endif() -add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) From d1c1a670b64660a3ab531bec33a581a782479a79 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 12 Oct 2018 14:44:00 -0500 Subject: [PATCH 132/161] Fixed code for handling errors. GH #5983 --- tests/Cluster.py | 6 +++--- tests/nodeos_forked_chain_test.py | 23 +++++++++++++++-------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index ace5ec0e927..24569607117 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -833,7 +833,7 @@ def parseClusterKeys(totalNodes): return producerKeys @staticmethod - def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr): + def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") @@ -1418,13 +1418,13 @@ def reportStatus(self): def printBlockLogIfNeeded(self): printBlockLog=False - if hasattr(self, "nodes"): + if hasattr(self, "nodes") and self.nodes is not None: for node in self.nodes: if node.missingTransaction: printBlockLog=True break - if hasattr(self, "biosNode") and self.biosNode.missingTransaction: + if hasattr(self, "biosNode") and self.biosNode is not None and self.biosNode.missingTransaction: printBlockLog=True if not printBlockLog: diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 13d2d5cf70e..01115285a6e 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -55,7 +55,10 @@ def analyzeBPs(bps0, bps1, expectDivergence): bpsStr+=str(blockNum0)+"->"+prod0 if index is None: - return + if expectDivergence: + errorInDivergence=True + break + return None bpsStr0=None bpsStr2=None @@ -84,13 +87,17 @@ def analyzeBPs(bps0, bps1, expectDivergence): bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff if errorInDivergence: - msg="Failed analyzing block producers - " - if expectDivergence: - msg+="nodes indicate different block producers for the same blocks, but did not expect them to diverge." - else: - msg+="did not expect nodes to indicate different block producers for the same blocks." - msg+="\n Matching Blocks= %s \n Diverging branch node0= %s \n Diverging branch node1= %s" % (bpsStr,bpsStr0,bpsStr1) - Utils.errorExit(msg) + break + + if errorInDivergence: + msg="Failed analyzing block producers - " + if expectDivergence: + msg+="nodes indicate different block producers for the same blocks, but did not expect them to diverge." + else: + msg+="did not expect nodes to indicate different block producers for the same blocks." + msg+="\n Matching Blocks= %s \n Diverging branch node0= %s \n Diverging branch node1= %s" % (bpsStr,bpsStr0,bpsStr1) + Utils.errorExit(msg) + return firstDivergence def getMinHeadAndLib(prodNodes): From 41c45cadf4ccade341cc3155f17f3349bd03a1e8 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 12 Oct 2018 14:46:35 -0500 Subject: [PATCH 133/161] Added verification of block logs for all nodes to distributed transaction test. GH #5983 --- tests/Cluster.py | 106 +++++++++++++++++++++++-- tests/distributed-transactions-test.py | 9 ++- tests/testUtils.py | 62 +++++++++++++++ 3 files changed, 171 insertions(+), 6 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 24569607117..070edd9b3ba 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -1432,21 +1432,117 @@ def printBlockLogIfNeeded(self): self.printBlockLog() + def getBlockLog(self, nodeExtension): + blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeExtension) + "/blocks/" + return Utils.getBlockLog(blockLogDir, exitOnError=False) + def printBlockLog(self): - blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + "/blocks/" - blockLogBios=Utils.getBlockLog(blockLogDir, exitOnError=False) + blockLogBios=self.getBlockLog("bios") Utils.Print(Cluster.__fileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir, json.dumps(blockLogBios, indent=1))) if not hasattr(self, "nodes"): return - numNodes=len(self.nodes) for i in range(numNodes): node=self.nodes[i] - blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + "/blocks/" - blockLog=Utils.getBlockLog(blockLogDir, exitOnError=False) + blockLog=self.getBlockLog(i) Utils.Print(Cluster.__fileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir, json.dumps(blockLog, indent=1))) + + def compareBlockLogs(self): + blockLogs=[] + blockNameExtensions=[] + lowestMaxes=[] + + def back(arr): + return arr[len(arr)-1] + + def sortLowest(maxes,max): + for i in range(len(maxes)): + if max < maxes[i]: + maxes.insert(i, max) + return + + maxes.append(max) + + i="bios" + blockLog=self.getBlockLog(i) + if blockLog is None: + Utils.errorExit("Node %s does not have a block log, all nodes must have a block log" % (i)) + blockLogs.append(blockLog) + blockNameExtensions.append(i) + sortLowest(lowestMaxes,back(blockLog)["block_num"]) + + if not hasattr(self, "nodes"): + Utils.errorExit("There are not multiple nodes to compare, this method assumes that two nodes or more are expected") + + numNodes=len(self.nodes) + for i in range(numNodes): + node=self.nodes[i] + blockLog=self.getBlockLog(i) + if blockLog is None: + Utils.errorExit("Node %s does not have a block log, all nodes must have a block log" % (i)) + blockLogs.append(blockLog) + blockNameExtensions.append(i) + sortLowest(lowestMaxes,back(blockLog)["block_num"]) + + numNodes=len(blockLogs) + + if numNodes < 2: + Utils.errorExit("There are not multiple nodes to compare, this method assumes that two nodes or more are expected") + + if lowestMaxes[0] < 2: + Utils.errorExit("One or more nodes only has %d blocks, if that is a valid scenario, then compareBlockLogs shouldn't be called" % (lowestMaxes[0])) + + # create a list of block logs and name extensions for the given common block number span + def identifyCommon(blockLogs, blockNameExtensions, first, last): + commonBlockLogs=[] + commonBlockNameExtensions=[] + for i in range(numNodes): + if (len(blockLogs[i]) >= last): + commonBlockLogs.append(blockLogs[i][first:last]) + commonBlockNameExtensions.append(blockNameExtensions[i]) + return (commonBlockLogs,commonBlockNameExtensions) + + # compare the contents of the blockLogs for the given common block number span + def compareCommon(blockLogs, blockNameExtensions, first, last): + if Utils.Debug: Utils.Print("comparing block num %s through %s" % (first, last)) + commonBlockLogs=None + commonBlockNameExtensions=None + (commonBlockLogs,commonBlockNameExtensions) = identifyCommon(blockLogs, blockNameExtensions, first, last) + numBlockLogs=len(commonBlockLogs) + if numBlockLogs < 2: + return False + + ret=None + for i in range(1,numBlockLogs): + context="" % (commonBlockNameExtensions[0], commonBlockNameExtensions[i]) + if Utils.Debug: Utils.Print("context=%s" % (context)) + ret=Utils.compare(commonBlockLogs[0], commonBlockLogs[i], context) + if ret is not None: + blockLogDir1=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" + blockLogDir2=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" + Utils.Print(Cluster.__fileDivider) + Utils.Print("Block log from %s:\n%s" % (blockLogDir1, json.dumps(commonBlockLogs[0], indent=1))) + Utils.Print(Cluster.__fileDivider) + Utils.Print("Block log from %s:\n%s" % (blockLogDir2, json.dumps(commonBlockLogs[i], indent=1))) + Utils.Print(Cluster.__fileDivider) + Utils.errorExit("Block logs do not match, difference description -> %s" % (ret)) + + return True + + def stripValues(lowestMaxes,greaterThan): + newLowest=[] + for low in lowestMaxes: + if low > greaterThan: + newLowest.append(low) + return newLowest + + first=0 + while len(lowestMaxes)>0 and compareCommon(blockLogs, blockNameExtensions, first, lowestMaxes[0]): + first=lowestMaxes[0]+1 + lowestMaxes=stripValues(lowestMaxes,lowestMaxes[0]) + diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 546e93b0de4..c29335b3d9f 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -16,7 +16,7 @@ pnodes=args.p topo=args.s delay=args.d -total_nodes = pnodes if args.n == 0 else args.n +total_nodes = pnodes if args.n < pnodes else args.n debug=args.v nodesFile=args.nodes_file dontLaunch=nodesFile is not None @@ -98,6 +98,13 @@ print("Funds spread validated") + if not dontKill: + cluster.killall(allInstances=killAll) + else: + print("NOTE: Skip killing nodes, block log verification will be limited") + + cluster.compareBlockLogs() + testSuccessful=True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) diff --git a/tests/testUtils.py b/tests/testUtils.py index 9302aeda8b5..a8dbe0fd4d2 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -249,6 +249,68 @@ def getBlockLog(blockLogLocation, silentErrors=False, exitOnError=False): return rtn + @staticmethod + def compare(obj1,obj2,context): + type1=type(obj1) + type2=type(obj2) + if type1!=type2: + return "obj1(%s) and obj2(%s) are different types, so cannot be compared, context=%s" % (type1,type2,context) + + if obj1 is None and obj2 is None: + return None + + typeName=type1.__name__ + if type1 == str or type1 == int or type1 == float or type1 == bool: + if obj1!=obj2: + return "obj1=%s and obj2=%s are different (type=%s), context=%s" % (obj1,obj2,typeName,context) + return None + + if type1 == list: + len1=len(obj1) + len2=len(obj2) + diffSizes=False + minLen=len1 + if len1!=len2: + diffSizes=True + minLen=min([len1,len2]) + + for i in range(minLen): + nextContext=context + "[%d]" % (i) + ret=Utils.compare(obj1[i],obj2[i], nextContext) + if ret is not None: + return ret + + if diffSizes: + return "left and right side %s comparison have different sizes %d != %d, context=%s" % (typeName,len1,len2,context) + return None + + if type1 == dict: + keys1=sorted(obj1.keys()) + keys2=sorted(obj2.keys()) + len1=len(keys1) + len2=len(keys2) + diffSizes=False + minLen=len1 + if len1!=len2: + diffSizes=True + minLen=min([len1,len2]) + + for i in range(minLen): + key=keys1[i] + nextContext=context + "[\"%s\"]" % (key) + if key not in obj2: + return "right side does not contain key=%s (has %s) that left side does, context=%s" % (key,keys2,context) + ret=Utils.compare(obj1[key],obj2[key], nextContext) + if ret is not None: + return ret + + if diffSizes: + return "left and right side %s comparison have different number of keys %d != %d, context=%s" % (typeName,len1,len2,context) + + return None + + return "comparison of %s type is not supported, context=%s" % (typeName,context) + ########################################################################################### class Account(object): # pylint: disable=too-few-public-methods From 2ac3ffc7c163a56b9c232514755189a826109563 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 12 Oct 2018 16:05:04 -0400 Subject: [PATCH 134/161] fix descriptions of the two types of cleos commands to create an account --- programs/cleos/main.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index fc2aed750c6..1f36bd20fe0 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -51,10 +51,10 @@ Usage: ./cleos create SUBCOMMAND Subcommands: key Create a new keypair and print the public and private keys - account Create a new account on the blockchain + account Create a new account on the blockchain (assumes system contract does not restrict RAM usage) $ ./cleos create account -Create a new account on the blockchain +Create a new account on the blockchain (assumes system contract does not restrict RAM usage) Usage: ./cleos create account [OPTIONS] creator name OwnerKey ActiveKey Positionals: @@ -896,7 +896,11 @@ struct create_account_subcommand { bool simple; create_account_subcommand(CLI::App* actionRoot, bool s) : simple(s) { - auto createAccount = actionRoot->add_subcommand( (simple ? "account" : "newaccount"), localized("Create an account, buy ram, stake for bandwidth for the account")); + auto createAccount = actionRoot->add_subcommand( + (simple ? "account" : "newaccount"), + (simple ? localized("Create a new account on the blockchain (assumes system contract does not restrict RAM usage)") + : localized("Create a new account on the blockchain with initial resources") ) + ); createAccount->add_option("creator", creator, localized("The name of the account creating the new account"))->required(); createAccount->add_option("name", account_name, localized("The name of the new account"))->required(); createAccount->add_option("OwnerKey", owner_key_str, localized("The owner public key for the new account"))->required(); From e9c60f9209929081dd5e9f404064448cfdd89288 Mon Sep 17 00:00:00 2001 From: Bill Woodward Date: Fri, 12 Oct 2018 16:14:41 -0400 Subject: [PATCH 135/161] Remove config.ini options that are no longer supported by nodeos --- Docker/config.ini | 6 ------ 1 file changed, 6 deletions(-) diff --git a/Docker/config.ini b/Docker/config.ini index c821437ab96..d9871858f19 100644 --- a/Docker/config.ini +++ b/Docker/config.ini @@ -151,12 +151,6 @@ keosd-provider-timeout = 5 # Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block) (eosio::txn_test_gen_plugin) txn-reference-block-lag = 0 -# The path of the wallet files (absolute path or relative to application data dir) (eosio::wallet_plugin) -wallet-dir = "." - -# Timeout for unlocked wallet in seconds (default 900 (15 minutes)). Wallets will automatically lock after specified number of seconds of inactivity. Activity is defined as any wallet command e.g. list-wallets. (eosio::wallet_plugin) -unlock-timeout = 900 - # eosio key that will be imported automatically when a wallet is created. (eosio::wallet_plugin) # eosio-key = From 0099d4055539cfc0b0b8d457a85ba09a650ac214 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 12 Oct 2018 16:41:11 -0400 Subject: [PATCH 136/161] Fixes for foreign keys in the contracts tables * contract tables are now in their own index set * this index set is snapshotted and included in the integrity check through a different process * traversal of the normal index set will include the table_id_objects * traversal of the contracts tables is then free to use those objects to traverse tables in the order of the table_id_objects * this will traverse logical tables instead of the whole combined table * each logical table gets its own section and is ordered by the next most appropriate key depending on its type --- libraries/chain/controller.cpp | 88 +++++++++++++++++-- .../eosio/chain/contract_table_objects.hpp | 48 ++++++---- .../include/eosio/chain/database_utils.hpp | 10 +++ .../chain/include/eosio/chain/snapshot.hpp | 18 +++- 4 files changed, 138 insertions(+), 26 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index e2d64f9dd11..4c9bd566916 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -32,12 +32,6 @@ using controller_index_set = index_set< account_index, account_sequence_index, table_id_multi_index, - key_value_index, - index64_index, - index128_index, - index256_index, - index_double_index, - index_long_double_index, global_property_multi_index, dynamic_global_property_multi_index, block_summary_multi_index, @@ -45,6 +39,15 @@ using controller_index_set = index_set< generated_transaction_multi_index >; +using contract_database_index_set = index_set< + key_value_index, + index64_index, + index128_index, + index256_index, + index_double_index, + index_long_double_index +>; + class maybe_session { public: maybe_session() = default; @@ -397,6 +400,7 @@ struct controller_impl { reversible_blocks.add_index(); controller_index_set::add_indices(db); + contract_database_index_set::add_indices(db); authorization.add_indices(); resource_limits.add_indices(); @@ -414,6 +418,72 @@ struct controller_impl { }); } + void calculate_contract_tables_integrity_hash( sha256::encoder& enc ) const { + index_utils::walk(db, [this, &enc]( const table_id_object& table_row ){ + contract_database_index_set::walk_indices([this, &enc, &table_row]( auto utils ) { + using value_t = typename decltype(utils)::index_t::value_type; + using by_table_id = object_to_table_id_tag_t; + + auto tid_key = boost::make_tuple(table_row.id); + auto next_tid_key = boost::make_tuple(table_id_object::id_type(table_row.id._id + 1)); + decltype(utils)::template walk_range(db, tid_key, next_tid_key, [&enc](const auto& row){ + fc::raw::pack(enc, row); + }); + }); + }); + } + + void add_contract_tables_to_snapshot( const snapshot_writer_ptr& snapshot ) const { + index_utils::walk(db, [this, &snapshot]( const table_id_object& table_row ){ + contract_database_index_set::walk_indices([this, &snapshot, &table_row]( auto utils ) { + using utils_t = decltype(utils); + using value_t = typename decltype(utils)::index_t::value_type; + using by_table_id = object_to_table_id_tag_t; + + std::string table_suffix = fc::format_string("[${code}:${scope}:${table}]", + mutable_variant_object() + ("code", table_row.code) + ("scope", table_row.scope) + ("table",table_row.table) + ); + + snapshot->write_section(table_suffix, [this, &table_row]( auto& section ) { + auto tid_key = boost::make_tuple(table_row.id); + auto next_tid_key = boost::make_tuple(table_id_object::id_type(table_row.id._id + 1)); + utils_t::template walk_range(db, tid_key, next_tid_key, [this, §ion]( const auto &row ) { + section.add_row(row, db); + }); + }); + }); + }); + } + + void read_contract_tables_from_snapshot( const snapshot_reader_ptr& snapshot ) { + index_utils::walk(db, [this, &snapshot]( const table_id_object& table_row ){ + contract_database_index_set::walk_indices([this, &snapshot, &table_row]( auto utils ) { + using utils_t = decltype(utils); + using value_t = typename decltype(utils)::index_t::value_type; + + std::string table_suffix = fc::format_string("[${code}:${scope}:${table}]", + mutable_variant_object() + ("code", table_row.code) + ("scope", table_row.scope) + ("table",table_row.table) + ); + + snapshot->read_section(table_suffix, [this, t_id = table_row.id]( auto& section ) { + bool more = !section.empty(); + while(more) { + utils_t::create(db, [this, §ion, &more, &t_id]( auto &row ) { + row.t_id = t_id; + more = section.read_row(row, db); + }); + } + }); + }); + }); + } + sha256 calculate_integrity_hash() const { sha256::encoder enc; controller_index_set::walk_indices([this, &enc]( auto utils ){ @@ -422,6 +492,8 @@ struct controller_impl { }); }); + calculate_contract_tables_integrity_hash(enc); + authorization.calculate_integrity_hash(enc); resource_limits.calculate_integrity_hash(enc); return enc.result(); @@ -444,6 +516,8 @@ struct controller_impl { }); }); + add_contract_tables_to_snapshot(snapshot); + authorization.add_to_snapshot(snapshot); resource_limits.add_to_snapshot(snapshot); } @@ -472,6 +546,8 @@ struct controller_impl { }); }); + read_contract_tables_from_snapshot(snapshot); + authorization.read_from_snapshot(snapshot); resource_limits.read_from_snapshot(snapshot); diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index 0e1ca61e37e..8e5a3379d55 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -79,13 +79,6 @@ namespace eosio { namespace chain { > >; - struct snapshot_key_value_object { - uint64_t table_ordinal; - uint64_t primary_key; - account_name payer; - fc::blob value; - }; - struct by_primary; struct by_secondary; @@ -137,15 +130,6 @@ namespace eosio { namespace chain { typedef secondary_index::index_object index256_object; typedef secondary_index::index_index index256_index; - template - struct snapshot_secondary_index - { - uint64_t table_ordinal; - uint64_t primary_key; - account_name payer; - SecondaryKey secondary_key; - }; - struct soft_double_less { bool operator()( const float64_t& lhs, const float64_t& rhs )const { return f64_lt(lhs, rhs); @@ -174,6 +158,29 @@ namespace eosio { namespace chain { typedef secondary_index::index_object index_long_double_object; typedef secondary_index::index_index index_long_double_index; + /** + * helper template to map from an index type to the best tag + * to use when traversing by table_id + */ + template + struct object_to_table_id_tag; + +#define DECLARE_TABLE_ID_TAG( object, tag ) \ + template<> \ + struct object_to_table_id_tag { \ + using tag_type = tag;\ + }; + + DECLARE_TABLE_ID_TAG(key_value_object, by_scope_primary) + DECLARE_TABLE_ID_TAG(index64_object, by_primary) + DECLARE_TABLE_ID_TAG(index128_object, by_primary) + DECLARE_TABLE_ID_TAG(index256_object, by_primary) + DECLARE_TABLE_ID_TAG(index_double_object, by_primary) + DECLARE_TABLE_ID_TAG(index_long_double_object, by_primary) + + template + using object_to_table_id_tag_t = typename object_to_table_id_tag::tag_type; + namespace config { template<> struct billable_size { @@ -231,4 +238,13 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_double_object, eosio::chain::index_ CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_long_double_object, eosio::chain::index_long_double_index) FC_REFLECT(eosio::chain::table_id_object, (code)(scope)(table) ) +FC_REFLECT(eosio::chain::key_value_object, (primary_key)(payer)(value) ) + +#define REFLECT_SECONDARY(type)\ + FC_REFLECT(type, (primary_key)(payer)(secondary_key) ) +REFLECT_SECONDARY(eosio::chain::index64_object) +REFLECT_SECONDARY(eosio::chain::index128_object) +REFLECT_SECONDARY(eosio::chain::index256_object) +REFLECT_SECONDARY(eosio::chain::index_double_object) +REFLECT_SECONDARY(eosio::chain::index_long_double_object) diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index 07ca00130a8..ae14ca06959 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -28,6 +28,16 @@ namespace eosio { namespace chain { } } + template + static void walk_range( const chainbase::database& db, const Key& begin_key, const Key& end_key, F function ) { + const auto& idx = db.get_index(); + auto begin_itr = idx.lower_bound(begin_key); + auto end_itr = idx.lower_bound(end_key); + for (auto itr = begin_itr; itr != end_itr; ++itr) { + function(*itr); + } + } + template static void create( chainbase::database& db, F cons ) { db.create(cons); diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index cc70886147b..961d7468191 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -125,13 +125,18 @@ namespace eosio { namespace chain { }; template - void write_section(F f) { - write_start_section(detail::snapshot_section_traits::section_name()); + void write_section(const std::string suffix, F f) { + write_start_section(suffix + detail::snapshot_section_traits::section_name()); auto section = section_writer(*this); f(section); write_end_section(); } + template + void write_section(F f) { + write_section(std::string(), f); + } + virtual ~snapshot_writer(){}; protected: @@ -214,13 +219,18 @@ namespace eosio { namespace chain { }; template - void read_section(F f) { - set_section(detail::snapshot_section_traits::section_name()); + void read_section(const std::string& suffix, F f) { + set_section(suffix + detail::snapshot_section_traits::section_name()); auto section = section_reader(*this); f(section); clear_section(); } + template + void read_section(F f) { + read_section(std::string(), f); + } + virtual void validate() const = 0; virtual ~snapshot_reader(){}; From 390f09395da058dc0a929070335a4f8fc0d0c636 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 12 Oct 2018 18:03:36 -0400 Subject: [PATCH 137/161] protect against future attempts to snapshot internal chainbase OIDs --- .../chain/include/eosio/chain/snapshot.hpp | 39 ++++++++++++++++++- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 961d7468191..4fc13ffbdfc 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include #include @@ -154,17 +155,51 @@ namespace eosio { namespace chain { virtual std::string row_type_name() const = 0; }; + template + struct is_chainbase_object { + static constexpr bool value = false; + }; + + template + struct is_chainbase_object> { + static constexpr bool value = true; + }; + + template + constexpr bool is_chainbase_object_v = is_chainbase_object::value; + + struct row_validation_helper { + template + static auto apply(const T& data, F f) -> std::enable_if_t> { + auto orig = data.id; + f(); + EOS_ASSERT(orig == data.id, snapshot_exception, + "Snapshot for ${type} mutates row member \"id\" which is illegal", + ("type",boost::core::demangle( typeid( T ).name() ))); + } + + template + static auto apply(const T&, F f) -> std::enable_if_t> { + f(); + } + }; + template struct snapshot_row_reader : abstract_snapshot_row_reader { explicit snapshot_row_reader( T& data ) :data(data) {} + void provide(std::istream& in) const override { - fc::raw::unpack(in, data); + row_validation_helper::apply(data, [&in,this](){ + fc::raw::unpack(in, data); + }); } void provide(const fc::variant& var) const override { - fc::from_variant(var, data); + row_validation_helper::apply(data, [&var,this]() { + fc::from_variant(var, data); + }); } std::string row_type_name() const override { From 0fc253192efbaf09f6201d9fa17dcb987f82ef86 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 12 Oct 2018 18:25:03 -0400 Subject: [PATCH 138/161] remove empty contract table sections from the snapshot as they just take up space and are not worth it --- libraries/chain/controller.cpp | 16 ++++-- .../include/eosio/chain/database_utils.hpp | 8 +++ .../chain/include/eosio/chain/snapshot.hpp | 8 +++ libraries/chain/snapshot.cpp | 49 +++++++++++++++++++ 4 files changed, 78 insertions(+), 3 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 4c9bd566916..60be60b8ff8 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -447,9 +447,15 @@ struct controller_impl { ("table",table_row.table) ); - snapshot->write_section(table_suffix, [this, &table_row]( auto& section ) { - auto tid_key = boost::make_tuple(table_row.id); - auto next_tid_key = boost::make_tuple(table_id_object::id_type(table_row.id._id + 1)); + auto tid_key = boost::make_tuple(table_row.id); + auto next_tid_key = boost::make_tuple(table_id_object::id_type(table_row.id._id + 1)); + + // don't include empty ranges in the snapshot + if (utils_t::template empty_range(db, tid_key, next_tid_key)) { + return; + } + + snapshot->write_section(table_suffix, [this, &tid_key, &next_tid_key]( auto& section ) { utils_t::template walk_range(db, tid_key, next_tid_key, [this, §ion]( const auto &row ) { section.add_row(row, db); }); @@ -471,6 +477,10 @@ struct controller_impl { ("table",table_row.table) ); + if (!snapshot->has_section(table_suffix)) { + return; + } + snapshot->read_section(table_suffix, [this, t_id = table_row.id]( auto& section ) { bool more = !section.empty(); while(more) { diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index ae14ca06959..31ec54e2c7d 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -38,6 +38,14 @@ namespace eosio { namespace chain { } } + template + static bool empty_range( const chainbase::database& db, const Key& begin_key, const Key& end_key ) { + const auto& idx = db.get_index(); + auto begin_itr = idx.lower_bound(begin_key); + auto end_itr = idx.lower_bound(end_key); + return begin_itr == end_itr; + } + template static void create( chainbase::database& db, F cons ) { db.create(cons); diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 4fc13ffbdfc..1f4847e5f53 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -266,11 +266,17 @@ namespace eosio { namespace chain { read_section(std::string(), f); } + template + bool has_section(const std::string& suffix = std::string()) { + return has_section(suffix + detail::snapshot_section_traits::section_name()); + } + virtual void validate() const = 0; virtual ~snapshot_reader(){}; protected: + virtual bool has_section( const std::string& section_name ) = 0; virtual void set_section( const std::string& section_name ) = 0; virtual bool read_row( detail::abstract_snapshot_row_reader& row_reader ) = 0; virtual bool empty( ) = 0; @@ -299,6 +305,7 @@ namespace eosio { namespace chain { explicit variant_snapshot_reader(const fc::variant& snapshot); void validate() const override; + bool has_section( const string& section_name ) override; void set_section( const string& section_name ) override; bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override; bool empty ( ) override; @@ -334,6 +341,7 @@ namespace eosio { namespace chain { explicit istream_snapshot_reader(std::istream& snapshot); void validate() const override; + bool has_section( const string& section_name ) override; void set_section( const string& section_name ) override; bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override; bool empty ( ) override; diff --git a/libraries/chain/snapshot.cpp b/libraries/chain/snapshot.cpp index 9002f6073bc..fb119b40a3a 100644 --- a/libraries/chain/snapshot.cpp +++ b/libraries/chain/snapshot.cpp @@ -75,6 +75,17 @@ void variant_snapshot_reader::validate() const { } } +bool variant_snapshot_reader::has_section( const string& section_name ) { + const auto& sections = snapshot["sections"].get_array(); + for( const auto& section: sections ) { + if (section["name"].as_string() == section_name) { + return true; + } + } + + return false; +} + void variant_snapshot_reader::set_section( const string& section_name ) { const auto& sections = snapshot["sections"].get_array(); for( const auto& section: sections ) { @@ -230,6 +241,44 @@ bool istream_snapshot_reader::validate_section() const { return true; } +bool istream_snapshot_reader::has_section( const string& section_name ) { + auto restore_pos = fc::make_scoped_exit([this,pos=snapshot.tellg()](){ + snapshot.seekg(pos); + }); + + const std::streamoff header_size = sizeof(ostream_snapshot_writer::magic_number) + sizeof(current_snapshot_version); + + auto next_section_pos = header_pos + header_size; + + while (true) { + snapshot.seekg(next_section_pos); + uint64_t section_size = 0; + snapshot.read((char*)§ion_size,sizeof(section_size)); + if (section_size == std::numeric_limits::max()) { + break; + } + + next_section_pos = snapshot.tellg() + std::streamoff(section_size); + + uint64_t ignore = 0; + snapshot.read((char*)&ignore,sizeof(ignore)); + + bool match = true; + for(auto c : section_name) { + if(snapshot.get() != c) { + match = false; + break; + } + } + + if (match && snapshot.get() == 0) { + return true; + } + } + + return false; +} + void istream_snapshot_reader::set_section( const string& section_name ) { auto restore_pos = fc::make_scoped_exit([this,pos=snapshot.tellg()](){ snapshot.seekg(pos); From ef7bd03b6d84d028b836d3092f4943587851318b Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Sun, 14 Oct 2018 17:55:41 -0400 Subject: [PATCH 139/161] interim attempt at another direction --- libraries/chain/controller.cpp | 97 +++++++++++-------- .../eosio/chain/contract_table_objects.hpp | 2 +- .../include/eosio/chain/database_utils.hpp | 8 +- .../chain/include/eosio/chain/snapshot.hpp | 16 +-- 4 files changed, 72 insertions(+), 51 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 60be60b8ff8..9d471508da6 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -31,12 +31,12 @@ using resource_limits::resource_limits_manager; using controller_index_set = index_set< account_index, account_sequence_index, - table_id_multi_index, global_property_multi_index, dynamic_global_property_multi_index, block_summary_multi_index, transaction_multi_index, - generated_transaction_multi_index + generated_transaction_multi_index, + table_id_multi_index >; using contract_database_index_set = index_set< @@ -420,6 +420,8 @@ struct controller_impl { void calculate_contract_tables_integrity_hash( sha256::encoder& enc ) const { index_utils::walk(db, [this, &enc]( const table_id_object& table_row ){ + fc::raw::pack(enc, table_row); + contract_database_index_set::walk_indices([this, &enc, &table_row]( auto utils ) { using value_t = typename decltype(utils)::index_t::value_type; using by_table_id = object_to_table_id_tag_t; @@ -434,28 +436,23 @@ struct controller_impl { } void add_contract_tables_to_snapshot( const snapshot_writer_ptr& snapshot ) const { - index_utils::walk(db, [this, &snapshot]( const table_id_object& table_row ){ - contract_database_index_set::walk_indices([this, &snapshot, &table_row]( auto utils ) { - using utils_t = decltype(utils); - using value_t = typename decltype(utils)::index_t::value_type; - using by_table_id = object_to_table_id_tag_t; + snapshot->write_section("contract_tables", [this]( auto& section ) { + index_utils::walk(db, [this, §ion]( const table_id_object& table_row ){ + // add a row for the table + section.add_row(table_row, db); - std::string table_suffix = fc::format_string("[${code}:${scope}:${table}]", - mutable_variant_object() - ("code", table_row.code) - ("scope", table_row.scope) - ("table",table_row.table) - ); + // followed by a size row and then N data rows for each type of table + contract_database_index_set::walk_indices([this, §ion, &table_row]( auto utils ) { + using utils_t = decltype(utils); + using value_t = typename decltype(utils)::index_t::value_type; + using by_table_id = object_to_table_id_tag_t; - auto tid_key = boost::make_tuple(table_row.id); - auto next_tid_key = boost::make_tuple(table_id_object::id_type(table_row.id._id + 1)); + auto tid_key = boost::make_tuple(table_row.id); + auto next_tid_key = boost::make_tuple(table_id_object::id_type(table_row.id._id + 1)); - // don't include empty ranges in the snapshot - if (utils_t::template empty_range(db, tid_key, next_tid_key)) { - return; - } + unsigned_int size = utils_t::template size_range(db, tid_key, next_tid_key); + section.add_row(size, db); - snapshot->write_section(table_suffix, [this, &tid_key, &next_tid_key]( auto& section ) { utils_t::template walk_range(db, tid_key, next_tid_key, [this, §ion]( const auto &row ) { section.add_row(row, db); }); @@ -465,38 +462,44 @@ struct controller_impl { } void read_contract_tables_from_snapshot( const snapshot_reader_ptr& snapshot ) { - index_utils::walk(db, [this, &snapshot]( const table_id_object& table_row ){ - contract_database_index_set::walk_indices([this, &snapshot, &table_row]( auto utils ) { - using utils_t = decltype(utils); - using value_t = typename decltype(utils)::index_t::value_type; + snapshot->read_section("contract_tables", [this]( auto& section ) { + bool more = true; + while (more) { + // read the row for the table + table_id_object::id_type t_id; + index_utils::create(db, [this, §ion, &t_id](auto& row) { + section.read_row(row, db); + t_id = row.id; + }); - std::string table_suffix = fc::format_string("[${code}:${scope}:${table}]", - mutable_variant_object() - ("code", table_row.code) - ("scope", table_row.scope) - ("table",table_row.table) - ); + // read the size and data rows for each type of table + contract_database_index_set::walk_indices([this, §ion, &t_id, &more](auto utils) { + using utils_t = decltype(utils); - if (!snapshot->has_section(table_suffix)) { - return; - } + unsigned_int size; + section.read_row(size, db); - snapshot->read_section(table_suffix, [this, t_id = table_row.id]( auto& section ) { - bool more = !section.empty(); - while(more) { - utils_t::create(db, [this, §ion, &more, &t_id]( auto &row ) { + for (size_t idx = 0; idx < size.value; idx++) { + utils_t::create(db, [this, §ion, &more, &t_id](auto& row) { row.t_id = t_id; more = section.read_row(row, db); }); } }); - }); + } }); } sha256 calculate_integrity_hash() const { sha256::encoder enc; controller_index_set::walk_indices([this, &enc]( auto utils ){ + using value_t = typename decltype(utils)::index_t::value_type; + + // skip the table_id_object as its inlined with contract tables section + if (std::is_same::value) { + return; + } + decltype(utils)::walk(db, [&enc]( const auto &row ) { fc::raw::pack(enc, row); }); @@ -519,7 +522,14 @@ struct controller_impl { }); controller_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot->write_section([this]( auto& section ){ + using value_t = typename decltype(utils)::index_t::value_type; + + // skip the table_id_object as its inlined with contract tables section + if (std::is_same::value) { + return; + } + + snapshot->write_section([this]( auto& section ){ decltype(utils)::walk(db, [this, §ion]( const auto &row ) { section.add_row(row, db); }); @@ -546,7 +556,14 @@ struct controller_impl { }); controller_index_set::walk_indices([this, &snapshot]( auto utils ){ - snapshot->read_section([this]( auto& section ) { + using value_t = typename decltype(utils)::index_t::value_type; + + // skip the table_id_object as its inlined with contract tables section + if (std::is_same::value) { + return; + } + + snapshot->read_section([this]( auto& section ) { bool more = !section.empty(); while(more) { decltype(utils)::create(db, [this, §ion, &more]( auto &row ) { diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index 8e5a3379d55..b3428340823 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -237,7 +237,7 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::index256_object, eosio::chain::index256_i CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_double_object, eosio::chain::index_double_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::index_long_double_object, eosio::chain::index_long_double_index) -FC_REFLECT(eosio::chain::table_id_object, (code)(scope)(table) ) +FC_REFLECT(eosio::chain::table_id_object, (code)(scope)(table)(payer)(count) ) FC_REFLECT(eosio::chain::key_value_object, (primary_key)(payer)(value) ) #define REFLECT_SECONDARY(type)\ diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index 31ec54e2c7d..e2b0c0d487f 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -39,11 +39,15 @@ namespace eosio { namespace chain { } template - static bool empty_range( const chainbase::database& db, const Key& begin_key, const Key& end_key ) { + static size_t size_range( const chainbase::database& db, const Key& begin_key, const Key& end_key ) { const auto& idx = db.get_index(); auto begin_itr = idx.lower_bound(begin_key); auto end_itr = idx.lower_bound(end_key); - return begin_itr == end_itr; + size_t res = 0; + while (begin_itr != end_itr) { + res++; ++begin_itr; + } + return res; } template diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 1f4847e5f53..b6c7a81bf0a 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -125,9 +125,9 @@ namespace eosio { namespace chain { snapshot_writer& _writer; }; - template - void write_section(const std::string suffix, F f) { - write_start_section(suffix + detail::snapshot_section_traits::section_name()); + template + void write_section(const std::string section_name, F f) { + write_start_section(section_name); auto section = section_writer(*this); f(section); write_end_section(); @@ -135,7 +135,7 @@ namespace eosio { namespace chain { template void write_section(F f) { - write_section(std::string(), f); + write_section(detail::snapshot_section_traits::section_name(), f); } virtual ~snapshot_writer(){}; @@ -253,9 +253,9 @@ namespace eosio { namespace chain { }; - template - void read_section(const std::string& suffix, F f) { - set_section(suffix + detail::snapshot_section_traits::section_name()); + template + void read_section(const std::string& section_name, F f) { + set_section(section_name); auto section = section_reader(*this); f(section); clear_section(); @@ -263,7 +263,7 @@ namespace eosio { namespace chain { template void read_section(F f) { - read_section(std::string(), f); + read_section(detail::snapshot_section_traits::section_name(), f); } template From 716e10a41055d1dec265ea46d3967babe91f9bfd Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 12 Oct 2018 19:18:14 -0400 Subject: [PATCH 140/161] Implement deadline timer performance check & use measured result Before using the deadline timer, do a test at startup to make sure it is reasonably accurate. Then use this measured value as a means of setting the deadline time. --- libraries/chain/transaction_context.cpp | 119 ++++++++++++++++++++++-- 1 file changed, 109 insertions(+), 10 deletions(-) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index e6277e164fa..f3ec93e9264 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -7,31 +7,130 @@ #include #include +#pragma push_macro("N") +#undef N +#include +#include +#include +#include +#include +#include +#pragma pop_macro("N") + +#include + namespace eosio { namespace chain { +namespace bacc = boost::accumulators; + + struct deadline_timer_verify { + deadline_timer_verify() { + //keep longest first in list. You're effectively going to take test_intervals[0]*sizeof(test_intervals[0]) + //time to do the the "calibration" + int test_intervals[] = {50000, 10000, 5000, 1000, 500, 100, 50, 10}; + + struct sigaction act; + sigemptyset(&act.sa_mask); + act.sa_handler = timer_hit; + act.sa_flags = 0; + if(sigaction(SIGALRM, &act, NULL)) + return; + + sigset_t alrm; + sigemptyset(&alrm); + sigaddset(&alrm, SIGALRM); + int dummy; + + for(int& interval : test_intervals) { + unsigned int loops = test_intervals[0]/interval; + + for(unsigned int i = 0; i < loops; ++i) { + struct itimerval enable = {{0, 0}, {0, interval}}; + hit = 0; + auto start = std::chrono::high_resolution_clock::now(); + if(setitimer(ITIMER_REAL, &enable, NULL)) + return; + while(!hit) {} + auto end = std::chrono::high_resolution_clock::now(); + int timer_slop = std::chrono::duration_cast(end-start).count() - interval; + + //since more samples are run for the shorter expirations, weigh the longer expirations accordingly. This + //helps to make a few results more fair. Two such examples: AWS c4&i5 xen instances being rather stable + //down to 100us but then struggling with 50us and 10us. MacOS having performance that seems to correlate + //with expiry length; that is, long expirations have high error, short expirations have low error. + //That said, for these platforms, a tighter tolerance may possibly be achieved by taking performance + //metrics in mulitple bins and appliying the slop based on which bin a deadline resides in. Not clear + //if that's worth the extra complexity at this point. + samples(timer_slop, bacc::weight = interval/(float)test_intervals[0]); + } + } + timer_overhead = bacc::mean(samples) + sqrt(bacc::variance(samples))*2; //target 95% of expirations before deadline + use_deadline_timer = timer_overhead < 1000; + + act.sa_handler = SIG_DFL; + sigaction(SIGALRM, &act, NULL); + } + + static void timer_hit(int) { + hit = 1; + } + static volatile sig_atomic_t hit; + + bacc::accumulator_set, float> samples; + bool use_deadline_timer = false; + int timer_overhead; + }; + volatile sig_atomic_t deadline_timer_verify::hit; + static deadline_timer_verify deadline_timer_verification; + deadline_timer::deadline_timer() { if(initialized) return; - struct sigaction act; - act.sa_handler = timer_expired; - sigemptyset(&act.sa_mask); - act.sa_flags = 0; - sigaction(SIGALRM, &act, NULL); initialized = true; + + #define TIMER_STATS_FORMAT "min:${min}us max:${max}us mean:${mean}us stddev:${stddev}us" + #define TIMER_STATS \ + ("min", bacc::min(deadline_timer_verification.samples))("max", bacc::max(deadline_timer_verification.samples)) \ + ("mean", (int)bacc::mean(deadline_timer_verification.samples))("stddev", (int)sqrt(bacc::variance(deadline_timer_verification.samples))) \ + ("t", deadline_timer_verification.timer_overhead) + + if(deadline_timer_verification.use_deadline_timer) { + struct sigaction act; + act.sa_handler = timer_expired; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + if(sigaction(SIGALRM, &act, NULL) == 0) { + ilog("Using ${t}us deadline timer for checktime: " TIMER_STATS_FORMAT, TIMER_STATS); + return; + } + } + + wlog("Using polled checktime; deadline timer too inaccurate: " TIMER_STATS_FORMAT, TIMER_STATS); + deadline_timer_verification.use_deadline_timer = false; //set in case sigaction() fails above } void deadline_timer::start(fc::time_point tp) { + if(tp == fc::time_point::maximum()) { + expired = 0; + return; + } + if(!deadline_timer_verification.use_deadline_timer) { + expired = 1; + return; + } microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); - if(x.count() < 18) + if(x.count() <= deadline_timer_verification.timer_overhead) expired = 1; - else if(x.count() < 1000000) { - struct itimerval enable = {{0, 0}, {0, (int)x.count()-15}}; + else { + struct itimerval enable = {{0, 0}, {0, (int)x.count()-deadline_timer_verification.timer_overhead}}; expired = 0; - setitimer(ITIMER_REAL, &enable, NULL); + expired |= !!setitimer(ITIMER_REAL, &enable, NULL); } } void deadline_timer::stop() { + if(expired) + return; struct itimerval disable = {{0, 0}, {0, 0}}; setitimer(ITIMER_REAL, &disable, NULL); } @@ -41,7 +140,7 @@ namespace eosio { namespace chain { } void deadline_timer::timer_expired(int) { - expired = 1; + expired = 1; } volatile sig_atomic_t deadline_timer::expired = 0; bool deadline_timer::initialized = false; From 55fb79833da4897cb109f5acb02f2360e97a0410 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sun, 14 Oct 2018 23:56:51 -0400 Subject: [PATCH 141/161] feature/bin_rel --- CMakeLists.txt | 6 +-- CMakeModules/package.cmake | 11 ++++++ CMakeModules/utils.cmake | 4 ++ programs/cleos/CMakeLists.txt | 2 + programs/keosd/CMakeLists.txt | 1 + programs/nodeos/CMakeLists.txt | 1 + scripts/generate_bottle.sh | 67 ++++++++++++++++++++++++++++++++++ scripts/generate_deb.sh | 29 +++++++++++++++ scripts/generate_package.sh.in | 35 ++++++++++++++++++ scripts/generate_rpm.sh | 43 ++++++++++++++++++++++ scripts/generate_tarball.sh | 40 ++++++++++++++++++++ 11 files changed, 236 insertions(+), 3 deletions(-) create mode 100644 CMakeModules/package.cmake create mode 100644 CMakeModules/utils.cmake create mode 100644 scripts/generate_bottle.sh create mode 100644 scripts/generate_deb.sh create mode 100644 scripts/generate_package.sh.in create mode 100644 scripts/generate_rpm.sh create mode 100644 scripts/generate_tarball.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index e3e9191d930..0ff42a412a4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -186,6 +186,7 @@ if(ENABLE_COVERAGE_TESTING) find_program( GENHTML_PATH NAMES genhtml) endif() +include(utils) add_subdirectory( externals ) if ("${CORE_SYMBOL_NAME}" STREQUAL "") @@ -207,7 +208,6 @@ endif() message( STATUS "Using '${EOSIO_ROOT_KEY}' as public key for 'eosio' account" ) include(wasm) - add_subdirectory( libraries ) add_subdirectory( contracts ) add_subdirectory( plugins ) @@ -240,6 +240,6 @@ install(FILES libraries/fc/secp256k1/upstream/COPYING DESTINATION ${CMAKE_INSTAL install(FILES externals/binaryen/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.binaryen) install(FILES libraries/fc/src/network/LICENSE.go DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ ) -include(installer) - +#include(installer) +include(package) include(doxygen) diff --git a/CMakeModules/package.cmake b/CMakeModules/package.cmake new file mode 100644 index 00000000000..895ce5459f3 --- /dev/null +++ b/CMakeModules/package.cmake @@ -0,0 +1,11 @@ +set(VENDOR "block.one") +set(PROJECT_NAME "eosio") +set(DESC "Software for the EOS.IO network") +set(URL "https://github.com/eosio/eos") +set(EMAIL "support@block.one") + +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_package.sh.in ${CMAKE_BINARY_DIR}/packages/generate_package.sh @ONLY) +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_bottle.sh ${CMAKE_BINARY_DIR}/packages/generate_bottle.sh COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_deb.sh ${CMAKE_BINARY_DIR}/packages/generate_deb.sh COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_rpm.sh ${CMAKE_BINARY_DIR}/packages/generate_rpm.sh COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/scripts/generate_tarball.sh ${CMAKE_BINARY_DIR}/packages/generate_tarball.sh COPYONLY) diff --git a/CMakeModules/utils.cmake b/CMakeModules/utils.cmake new file mode 100644 index 00000000000..2b15f3d7d28 --- /dev/null +++ b/CMakeModules/utils.cmake @@ -0,0 +1,4 @@ +macro( copy_bin file ) + add_custom_command( TARGET ${file} POST_BUILD COMMAND mkdir -p ${CMAKE_BINARY_DIR}/bin ) + add_custom_command( TARGET ${file} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/${file} ${CMAKE_BINARY_DIR}/bin/ ) +endmacro( copy_bin ) diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 2581a15bd4f..e748581d849 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -36,7 +36,9 @@ target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_D target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) + +copy_bin( ${CLI_CLIENT_EXECUTABLE_NAME} ) install( TARGETS ${CLI_CLIENT_EXECUTABLE_NAME} diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index a332f8e26b1..1c294329387 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -20,6 +20,7 @@ target_include_directories(${KEY_STORE_EXECUTABLE_NAME} PUBLIC ${CMAKE_CURRENT_B mas_sign(${KEY_STORE_EXECUTABLE_NAME}) +copy_bin( ${KEY_STORE_EXECUTABLE_NAME} ) install( TARGETS ${KEY_STORE_EXECUTABLE_NAME} diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 82ce6470789..9e1481c23c3 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -72,6 +72,7 @@ endif() include(additionalPlugins) +copy_bin( ${NODE_EXECUTABLE_NAME} ) install( TARGETS ${NODE_EXECUTABLE_NAME} diff --git a/scripts/generate_bottle.sh b/scripts/generate_bottle.sh new file mode 100644 index 00000000000..f56be136acd --- /dev/null +++ b/scripts/generate_bottle.sh @@ -0,0 +1,67 @@ +#! /bin/bash + +VERS=`sw_vers -productVersion | awk '10\.13\..*/{print $0}'` +if [-z $VERS]; +then + VERS=`sw_vers -productVersion | awk '10\.14\..*/{print $0}'` + if [-z $VERS]; + then + echo "Error, unsupported OS X version" + exit -1 + fi + MAC_VERSION="mojave" +else + MAC_VERSION="high_sierra" +fi + +NAME="${PROJECT}-${VERSION}.${MAC_VERSION}.bottle.tar.gz" + +mkdir -p ${PROJECT}/${VERSION}/opt/eosio/lib/cmake + +PREFIX="${PROJECT}/${VERSION}" +SPREFIX="\/usr\/local" +SUBPREFIX="opt/${PROJECT}" +SSUBPREFIX="opt\/${PROJECT}" + +export PREFIX +export SPREFIX +export SUBPREFIX +export SSUBPREFIX + +bash generate_tarball.sh ${NAME} + +hash=`openssl dgst -sha256 ${NAME} | awk 'NF>1{print $NF}'` + +echo "class Eosio < Formula + + homepage \"${URL}\" + revision 0 + url \"https://github.com/eosio/eos/archive/v${VERSION}.tar.gz\" + version \"${VERSION}\" + + option :universal + + depends_on \"cmake\" => :build + depends_on \"automake\" => :build + depends_on \"libtool\" => :build + depends_on \"wget\" => :build + depends_on \"gmp\" => :build + depends_on \"gettext\" => :build + depends_on \"doxygen\" => :build + depends_on \"graphviz\" => :build + depends_on \"lcov\" => :build + depends_on :xcode => :build + depends_on :macos => :high_sierra + depends_on :arch => :intel + + bottle do + root_url \"https://github.com/eosio/eos/releases/download/v${VERSION}\" + sha256 \"${hash}\" => :${MAC_VERSION} + end + def install + raise \"Error, only supporting binary packages at this time\" + end +end +__END__" &> eosio.rb + +rm -r ${PROJECT} diff --git a/scripts/generate_deb.sh b/scripts/generate_deb.sh new file mode 100644 index 00000000000..5ca57978cbd --- /dev/null +++ b/scripts/generate_deb.sh @@ -0,0 +1,29 @@ +#! /bin/bash + +NAME="${PROJECT}-${VERSION}.x86_64" +PREFIX="usr" +SPREFIX=${PREFIX} +SUBPREFIX="opt/${PROJECT}/${VERSION}" +SSUBPREFIX="opt\/${PROJECT}\/${VERSION}" + +mkdir -p ${PROJECT}/DEBIAN +echo "Package: ${PROJECT} +Version: ${VERSION} +Section: devel +Priority: optional +Architecture: amd64 +Homepage: ${URL} +Maintainer: ${EMAIL} +Description: ${DESC}" &> ${PROJECT}/DEBIAN/control + +export PREFIX +export SUBPREFIX +export SPREFIX +export SSUBPREFIX + +bash generate_tarball.sh ${NAME}.tar.gz + +tar -xvzf ${NAME}.tar.gz -C ${PROJECT} +dpkg-deb --build ${PROJECT} +mv ${PROJECT}.deb ${NAME}.deb +rm -r ${PROJECT} diff --git a/scripts/generate_package.sh.in b/scripts/generate_package.sh.in new file mode 100644 index 00000000000..909598cf5d0 --- /dev/null +++ b/scripts/generate_package.sh.in @@ -0,0 +1,35 @@ +#! /bin/bash + +VARIANT=$1 + +VERSION="@VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@" + +BUILD_DIR="@CMAKE_BINARY_DIR@" + +VENDOR="@VENDOR@" +PROJECT="@PROJECT_NAME@" +DESC="@DESC@" +URL="@URL@" +EMAIL="@EMAIL@" + +export BUILD_DIR +export VERSION +export VENDOR +export PROJECT +export DESC +export URL +export EMAIL + +mkdir tmp + +if [[ ${VARIANT} == "brew" ]]; then + bash generate_bottle.sh +elif [[ ${VARIANT} == "deb" ]]; then + bash generate_deb.sh +elif [[ ${VARIANT} == "rpm" ]]; then + bash generate_rpm.sh +else + echo "Error, unknown package type. Use either ['brew', 'deb', 'rpm']." + exit -1 +fi +rm -r tmp diff --git a/scripts/generate_rpm.sh b/scripts/generate_rpm.sh new file mode 100644 index 00000000000..bc5923b25a5 --- /dev/null +++ b/scripts/generate_rpm.sh @@ -0,0 +1,43 @@ +#! /bin/bash + +NAME="${PROJECT}-${VERSION}.x86_64" +PREFIX="usr" +SPREFIX=${PREFIX} +SUBPREFIX="opt/${PROJECT}/${VERSION}" +SSUBPREFIX="opt\/${PROJECT}\/${VERSION}" + +export PREFIX +export SUBPREFIX +export SPREFIX +export SSUBPREFIX + +bash generate_tarball.sh ${NAME}.tar.gz + +RPMBUILD=`realpath ~/rpmbuild/BUILDROOT/${NAME}-0.x86_64` +mkdir -p ${RPMBUILD} +FILES=$(tar -xvzf ${NAME}.tar.gz -C ${RPMBUILD}) +PFILES="" +for f in ${FILES[@]}; do + if [ -f ${RPMBUILD}/${f} ]; then + PFILES="${PFILES}/${f}\n" + fi +done +echo -e ${PFILES} &> ~/rpmbuild/BUILD/filenames.txt + +mkdir -p ${PROJECT} +echo -e "Name: ${PROJECT} +Version: ${VERSION}.x86_64 +License: MIT +Vendor: ${VENDOR} +Source: ${URL} +URL: ${URL} +Packager: ${VENDOR} <${EMAIL}> +Summary: ${DESC} +Release: 0 +%description +${DESC} +%files -f filenames.txt" &> ${PROJECT}.spec + +rpmbuild -bb ${PROJECT}.spec +mv ~/rpmbuild/RPMS/x86_64 ./ +rm -r ${PROJECT} ~/rpmbuild/BUILD/filenames.txt ${PROJECT}.spec diff --git a/scripts/generate_tarball.sh b/scripts/generate_tarball.sh new file mode 100644 index 00000000000..fb93df1179c --- /dev/null +++ b/scripts/generate_tarball.sh @@ -0,0 +1,40 @@ +#! /bin/bash + +NAME=$1 +EOS_PREFIX=${PREFIX}/${SUBPREFIX} +mkdir -p ${PREFIX}/bin/ +#mkdir -p ${PREFIX}/lib/cmake/${PROJECT} +mkdir -p ${EOS_PREFIX}/bin +#mkdir -p ${EOS_PREFIX}/include +#mkdir -p ${EOS_PREFIX}/lib/cmake/${PROJECT} +#mkdir -p ${EOS_PREFIX}/cmake +#mkdir -p ${EOS_PREFIX}/scripts + +# install binaries +cp -R ${BUILD_DIR}/bin/* ${EOS_PREFIX}/bin + +# install libraries +#cp -R ${BUILD_DIR}/lib/* ${EOS_PREFIX}/lib + +# install cmake modules +#sed "s/_PREFIX_/\/${SPREFIX}/g" ${BUILD_DIR}/modules/EosioTesterPackage.cmake &> ${EOS_PREFIX}/lib/cmake/${PROJECT}/EosioTester.cmake +#sed "s/_PREFIX_/\/${SPREFIX}\/${SSUBPREFIX}/g" ${BUILD_DIR}/modules/${PROJECT}-config.cmake.package &> ${EOS_PREFIX}/lib/cmake/${PROJECT}/${PROJECT}-config.cmake + +# install includes +#cp -R ${BUILD_DIR}/include/* ${EOS_PREFIX}/include + +# make symlinks +#pushd ${PREFIX}/lib/cmake/${PROJECT} &> /dev/null +#ln -sf ../../../${SUBPREFIX}/lib/cmake/${PROJECT}/${PROJECT}-config.cmake ${PROJECT}-config.cmake +#ln -sf ../../../${SUBPREFIX}/lib/cmake/${PROJECT}/EosioTester.cmake EosioTester.cmake +#popd &> /dev/null + +pushd ${PREFIX}/bin &> /dev/null +for f in `ls ${BUILD_DIR}/bin/`; do + bn=$(basename $f) + ln -sf ../${SUBPREFIX}/bin/$bn $bn +done +popd &> /dev/null + +tar -cvzf $NAME ./${PREFIX}/* +rm -r ${PREFIX} From 1e236159c3f9f7d78eb95e34e3e282d199ebc816 Mon Sep 17 00:00:00 2001 From: Issay <5457407+issayTseng@users.noreply.github.com> Date: Mon, 15 Oct 2018 14:05:33 +0800 Subject: [PATCH 142/161] duplicated error message There is a duplicated error message for database_exception and guard_exception. Maybe if the error were exposed to users like developer, they may be confused about what really happens without checking the exact error code. --- libraries/chain/include/eosio/chain/exceptions.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 91467e746f6..3f6bad48215 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -230,7 +230,7 @@ namespace eosio { namespace chain { 3060004, "Contract Query Exception" ) FC_DECLARE_DERIVED_EXCEPTION( guard_exception, database_exception, - 3060100, "Database exception" ) + 3060100, "Guard Exception" ) FC_DECLARE_DERIVED_EXCEPTION( database_guard_exception, guard_exception, 3060101, "Database usage is at unsafe levels" ) From e779c1906025ea38ec0cb463d7a62197c175b167 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Mon, 15 Oct 2018 19:53:34 +0900 Subject: [PATCH 143/161] Add support extended_asset initialization with '@' --- libraries/chain/asset.cpp | 18 ++++++++++++++++++ libraries/chain/include/eosio/chain/asset.hpp | 18 ++++++++++++++---- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/libraries/chain/asset.cpp b/libraries/chain/asset.cpp index 142e04cffa3..fde2c635eb3 100644 --- a/libraries/chain/asset.cpp +++ b/libraries/chain/asset.cpp @@ -79,4 +79,22 @@ asset asset::from_string(const string& from) FC_CAPTURE_LOG_AND_RETHROW( (from) ) } +string extended_asset::to_string()const { + return quantity.to_string() + "@" + contract.to_string(); +} + +extended_asset extended_asset::from_string(const string& from) +{ try { + auto s = fc::trim(from); + + // Find at sign in order to split asset and contract + auto at_pos = s.find('@'); + EOS_ASSERT((at_pos != string::npos), asset_type_exception, "Extended asset's asset and contract should be separated with '@'"); + + auto asset_str = s.substr(0, at_pos); + auto contract_str = fc::trim(s.substr(at_pos + 1)); + + return extended_asset(asset::from_string(asset_str), name(contract_str)); +} FC_CAPTURE_LOG_AND_RETHROW( (from) ) } + } } // eosio::types diff --git a/libraries/chain/include/eosio/chain/asset.hpp b/libraries/chain/include/eosio/chain/asset.hpp index 2c1f2bb4fc3..a973edbde91 100644 --- a/libraries/chain/include/eosio/chain/asset.hpp +++ b/libraries/chain/include/eosio/chain/asset.hpp @@ -96,10 +96,13 @@ struct asset }; struct extended_asset { - extended_asset(){} - extended_asset( asset a, name n ):quantity(a),contract(n){} - asset quantity; - name contract; + extended_asset(){} + extended_asset( asset a, name n ):quantity(a),contract(n){} + asset quantity; + name contract; + + static extended_asset from_string(const string& from); + string to_string()const; }; bool operator < (const asset& a, const asset& b); @@ -114,5 +117,12 @@ inline void from_variant(const fc::variant& var, eosio::chain::asset& vo) { } } +namespace fc { +inline void to_variant(const eosio::chain::extended_asset& var, fc::variant& vo) { vo = var.to_string(); } +inline void from_variant(const fc::variant& var, eosio::chain::extended_asset& vo) { + vo = eosio::chain::extended_asset::from_string(var.get_string()); +} +} + FC_REFLECT(eosio::chain::asset, (amount)(sym)) FC_REFLECT(eosio::chain::extended_asset, (quantity)(contract) ) From ed3df78711703fd30fd43f8b32be33c9891c5018 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Mon, 15 Oct 2018 11:16:57 -0400 Subject: [PATCH 144/161] fix a few bugs with empty sections and empty table types --- libraries/chain/controller.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9d471508da6..faa0f6e46d1 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -463,7 +463,7 @@ struct controller_impl { void read_contract_tables_from_snapshot( const snapshot_reader_ptr& snapshot ) { snapshot->read_section("contract_tables", [this]( auto& section ) { - bool more = true; + bool more = !section.empty(); while (more) { // read the row for the table table_id_object::id_type t_id; @@ -477,7 +477,7 @@ struct controller_impl { using utils_t = decltype(utils); unsigned_int size; - section.read_row(size, db); + more = section.read_row(size, db); for (size_t idx = 0; idx < size.value; idx++) { utils_t::create(db, [this, §ion, &more, &t_id](auto& row) { From 40d6a4d0ff4ae7019b07793e8e7955a3fe409870 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 15 Oct 2018 15:33:55 -0400 Subject: [PATCH 145/161] added optional param for get_account --- plugins/chain_plugin/chain_plugin.cpp | 5 ++++- .../include/eosio/chain_plugin/chain_plugin.hpp | 6 ++++-- programs/cleos/main.cpp | 16 ++++++++++++---- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index cb5a7ee8325..0fe1f9d479c 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1650,6 +1650,9 @@ read_only::get_account_results read_only::get_account( const get_account_params& auto core_symbol = extract_core_symbol(); + if (params.expected_core_symbol.valid()) + core_symbol = *(params.expected_core_symbol); + const auto* t_id = d.find(boost::make_tuple( token_code, params.account_name, N(accounts) )); if( t_id != nullptr ) { const auto &idx = d.get_index(); @@ -1783,7 +1786,7 @@ namespace detail { } chain::symbol read_only::extract_core_symbol()const { - symbol core_symbol; // Default to CORE_SYMBOL if the appropriate data structure cannot be found in the system contract table data + symbol core_symbol(0); // The following code makes assumptions about the contract deployed on eosio account (i.e. the system contract) and how it stores its data. const auto& d = db.db(); diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 89ef3fbeeb7..0b69a6af89a 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -36,6 +36,7 @@ namespace eosio { using fc::optional; using boost::container::flat_set; using chain::asset; + using chain::symbol; using chain::authority; using chain::account_name; using chain::action_name; @@ -137,7 +138,8 @@ class read_only { }; struct get_account_params { - name account_name; + name account_name; + optional expected_core_symbol; }; get_account_results get_account( const get_account_params& params )const; @@ -697,7 +699,7 @@ FC_REFLECT( eosio::chain_apis::read_only::get_account_results, FC_REFLECT( eosio::chain_apis::read_only::get_code_results, (account_name)(code_hash)(wast)(wasm)(abi) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_results, (account_name)(code_hash) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_results, (account_name)(abi) ) -FC_REFLECT( eosio::chain_apis::read_only::get_account_params, (account_name) ) +FC_REFLECT( eosio::chain_apis::read_only::get_account_params, (account_name)(expected_core_symbol) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_params, (account_name)(code_as_wasm) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_params, (account_name) ) FC_REFLECT( eosio::chain_apis::read_only::get_abi_params, (account_name) ) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 1f36bd20fe0..1f6ba8cf57e 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -1466,10 +1466,16 @@ struct canceldelay_subcommand { } }; -void get_account( const string& accountName, bool json_format ) { - auto json = call(get_account_func, fc::mutable_variant_object("account_name", accountName)); - auto res = json.as(); +void get_account( const string& accountName, const string& coresym, bool json_format ) { + fc::variant json; + if (coresym.empty()) { + json = call(get_account_func, fc::mutable_variant_object("account_name", accountName)); + } + else { + json = call(get_account_func, fc::mutable_variant_object("account_name", accountName)("expected_core_symbol", symbol::from_string(coresym))); + } + auto res = json.as(); if (!json_format) { asset staked; asset unstaking; @@ -1912,11 +1918,13 @@ int main( int argc, char** argv ) { // get account string accountName; + string coresym; bool print_json; auto getAccount = get->add_subcommand("account", localized("Retrieve an account from the blockchain"), false); getAccount->add_option("name", accountName, localized("The name of the account to retrieve"))->required(); + getAccount->add_option("core-symbol", coresym, localized("The expected core symbol of the chain you are querying")); getAccount->add_flag("--json,-j", print_json, localized("Output in JSON format") ); - getAccount->set_callback([&]() { get_account(accountName, print_json); }); + getAccount->set_callback([&]() { get_account(accountName, coresym, print_json); }); // get code string codeFilename; From 95068b0300ade7b924d7c9f8b82dbf98ea8d9f17 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 15 Oct 2018 15:47:33 -0400 Subject: [PATCH 146/161] updated generate_bottle.sh --- scripts/generate_bottle.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/generate_bottle.sh b/scripts/generate_bottle.sh index f56be136acd..9f2a2913dfb 100644 --- a/scripts/generate_bottle.sh +++ b/scripts/generate_bottle.sh @@ -1,10 +1,10 @@ #! /bin/bash -VERS=`sw_vers -productVersion | awk '10\.13\..*/{print $0}'` -if [-z $VERS]; +VERS=`sw_vers -productVersion | awk '/10\.13\..*/{print $0}'` +if [[ -z "$VERS" ]]; then - VERS=`sw_vers -productVersion | awk '10\.14\..*/{print $0}'` - if [-z $VERS]; + VERS=`sw_vers -productVersion | awk '/10\.14\..*/{print $0}'` + if [[ -z "$VERS" ]]; then echo "Error, unsupported OS X version" exit -1 From 10ca4e6bcad1dd7a930fae7310a76013adae5bf2 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Mon, 15 Oct 2018 16:38:50 -0400 Subject: [PATCH 147/161] bump `libraries/fc` to `master` with changes --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 4e59c679777..3e5ce84852f 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 4e59c6797777d3d9a226ac214701a08f52be4451 +Subproject commit 3e5ce84852f32dce576f2b8d30365326b71c91e2 From 25e1b61bd0b76966edf61401a485fdd61b86e468 Mon Sep 17 00:00:00 2001 From: Zane Reynolds Date: Mon, 15 Oct 2018 17:08:34 -0400 Subject: [PATCH 148/161] removed the checkboxes adn put some instructions closer to where you fill them out --- .github/PULL_REQUEST_TEMPLATE.md | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 8a745912d67..9ffbb5ffb6b 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,18 +1,9 @@ - - + **Change Description** - -- [ ] Bug fix -- [ ] User experience enhancement -- [ ] New functionality -- [ ] RPC -- [ ] Deprecation -- [ ] Refactoring - - + **Consensus Changes** From ee81146136f69a122a37b4d5d78d12bb5934624b Mon Sep 17 00:00:00 2001 From: wanderingbort Date: Mon, 15 Oct 2018 18:23:27 -0400 Subject: [PATCH 149/161] Revert "Add support extended_asset initialization with '@'" --- libraries/chain/asset.cpp | 18 ------------------ libraries/chain/include/eosio/chain/asset.hpp | 18 ++++-------------- 2 files changed, 4 insertions(+), 32 deletions(-) diff --git a/libraries/chain/asset.cpp b/libraries/chain/asset.cpp index fde2c635eb3..142e04cffa3 100644 --- a/libraries/chain/asset.cpp +++ b/libraries/chain/asset.cpp @@ -79,22 +79,4 @@ asset asset::from_string(const string& from) FC_CAPTURE_LOG_AND_RETHROW( (from) ) } -string extended_asset::to_string()const { - return quantity.to_string() + "@" + contract.to_string(); -} - -extended_asset extended_asset::from_string(const string& from) -{ try { - auto s = fc::trim(from); - - // Find at sign in order to split asset and contract - auto at_pos = s.find('@'); - EOS_ASSERT((at_pos != string::npos), asset_type_exception, "Extended asset's asset and contract should be separated with '@'"); - - auto asset_str = s.substr(0, at_pos); - auto contract_str = fc::trim(s.substr(at_pos + 1)); - - return extended_asset(asset::from_string(asset_str), name(contract_str)); -} FC_CAPTURE_LOG_AND_RETHROW( (from) ) } - } } // eosio::types diff --git a/libraries/chain/include/eosio/chain/asset.hpp b/libraries/chain/include/eosio/chain/asset.hpp index a973edbde91..2c1f2bb4fc3 100644 --- a/libraries/chain/include/eosio/chain/asset.hpp +++ b/libraries/chain/include/eosio/chain/asset.hpp @@ -96,13 +96,10 @@ struct asset }; struct extended_asset { - extended_asset(){} - extended_asset( asset a, name n ):quantity(a),contract(n){} - asset quantity; - name contract; - - static extended_asset from_string(const string& from); - string to_string()const; + extended_asset(){} + extended_asset( asset a, name n ):quantity(a),contract(n){} + asset quantity; + name contract; }; bool operator < (const asset& a, const asset& b); @@ -117,12 +114,5 @@ inline void from_variant(const fc::variant& var, eosio::chain::asset& vo) { } } -namespace fc { -inline void to_variant(const eosio::chain::extended_asset& var, fc::variant& vo) { vo = var.to_string(); } -inline void from_variant(const fc::variant& var, eosio::chain::extended_asset& vo) { - vo = eosio::chain::extended_asset::from_string(var.get_string()); -} -} - FC_REFLECT(eosio::chain::asset, (amount)(sym)) FC_REFLECT(eosio::chain::extended_asset, (quantity)(contract) ) From de4b8d7227c7bfffae3fb0329b65ba03b883ee3d Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Mon, 15 Oct 2018 18:25:53 -0400 Subject: [PATCH 150/161] add basic application level versioning so that the structure of the snapshot datafile and the structure of the snapshotted data are versioned separately --- libraries/chain/controller.cpp | 12 +++++++ .../include/eosio/chain/chain_snapshot.hpp | 34 +++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 libraries/chain/include/eosio/chain/chain_snapshot.hpp diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index faa0f6e46d1..c2063c1316c 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -513,6 +514,10 @@ struct controller_impl { } void add_to_snapshot( const snapshot_writer_ptr& snapshot ) const { + snapshot->write_section([this]( auto §ion ){ + section.add_row(chain_snapshot_header(), db); + }); + snapshot->write_section([this]( auto §ion ){ section.add_row(conf.genesis, db); }); @@ -543,6 +548,13 @@ struct controller_impl { } void read_from_snapshot( const snapshot_reader_ptr& snapshot ) { + snapshot->read_section([this]( auto §ion ){ + chain_snapshot_header header; + section.read_row(header, db); + header.validate(); + }); + + snapshot->read_section([this]( auto §ion ){ block_header_state head_header_state; section.read_row(head_header_state, db); diff --git a/libraries/chain/include/eosio/chain/chain_snapshot.hpp b/libraries/chain/include/eosio/chain/chain_snapshot.hpp new file mode 100644 index 00000000000..7174e69c5b9 --- /dev/null +++ b/libraries/chain/include/eosio/chain/chain_snapshot.hpp @@ -0,0 +1,34 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#pragma once + +#include + +namespace eosio { namespace chain { + +struct chain_snapshot_header { + /** + * Version history + * 1: initial version + */ + + static constexpr uint32_t minimum_compatible_version = 1; + static constexpr uint32_t current_version = 1; + + uint32_t version = current_version; + + void validate() const { + auto min = minimum_compatible_version; + auto max = current_version; + EOS_ASSERT(version >= min && version <= max, + snapshot_validation_exception, + "Unsupported version of chain snapshot: ${version}. Supported version must be between ${min} and ${max} inclusive.", + ("version",version)("min",min)("max",max)); + } +}; + +} } + +FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) \ No newline at end of file From 37cac202f432c6e3eda313d9ab485c1398fe6e7b Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 15 Oct 2018 18:26:35 -0400 Subject: [PATCH 151/161] Added support for cmake find_package --- CMakeLists.txt | 12 +++- CMakeModules/EosioTester.cmake.in | 1 + CMakeModules/EosioTesterBuild.cmake.in | 1 + CMakeModules/eosio-config.cmake.in | 96 ++++++++++++++++++++++++++ eosio_install.sh | 8 +++ 5 files changed, 115 insertions(+), 3 deletions(-) create mode 100644 CMakeModules/eosio-config.cmake.in diff --git a/CMakeLists.txt b/CMakeLists.txt index e3e9191d930..d6243f24733 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -228,9 +228,15 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/testnet.template ${CMAKE_CURRENT_BINA configure_file(${CMAKE_CURRENT_SOURCE_DIR}/eosio.version.in ${CMAKE_CURRENT_BINARY_DIR}/eosio.version.hpp) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eosio.version.hpp DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/EosioTester.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/EosioTester.cmake @ONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/EosioTesterBuild.cmake.in ${CMAKE_BINARY_DIR}/lib/cmake/EosioTester.cmake @ONLY) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/EosioTester.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/) +set(EOS_ROOT_DIR ${CMAKE_BINARY_DIR}) +configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/eosio-config.cmake.in ${CMAKE_BINARY_DIR}/lib/cmake/eosio/eosio-config.cmake @ONLY) +configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/EosioTesterBuild.cmake.in ${CMAKE_BINARY_DIR}/lib/cmake/eosio/EosioTester.cmake @ONLY) + +set(EOS_ROOT_DIR ${CMAKE_INSTALL_PREFIX}) +configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/eosio-config.cmake.in ${CMAKE_BINARY_DIR}/modules/eosio-config.cmake @ONLY) +install(FILES ${CMAKE_BINARY_DIR}/modules/eosio-config.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/eosio) +configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/EosioTester.cmake.in ${CMAKE_BINARY_DIR}/modules/EosioTester.cmake @ONLY) +install(FILES ${CMAKE_BINARY_DIR}/modules/EosioTester.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/eosio) install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/) install(FILES libraries/wabt/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wabt) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 61e25936b82..f47743fe5cb 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -1,4 +1,5 @@ cmake_minimum_required( VERSION 3.5 ) +message(STATUS "Setting up Eosio Tester @VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@ at @EOS_ROOT_DIR@") set(CMAKE_CXX_COMPILER @CMAKE_CXX_COMPILER@) set(CMAKE_C_COMPILER @CMAKE_C_COMPILER@) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index be26054cc57..5618fe0d149 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -1,4 +1,5 @@ cmake_minimum_required( VERSION 3.5 ) +message(STATUS "Setting up Eosio Tester @VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@ at @EOS_ROOT_DIR@") set(CMAKE_CXX_COMPILER @CMAKE_CXX_COMPILER@) set(CMAKE_C_COMPILER @CMAKE_C_COMPILER@) diff --git a/CMakeModules/eosio-config.cmake.in b/CMakeModules/eosio-config.cmake.in new file mode 100644 index 00000000000..97de49c4568 --- /dev/null +++ b/CMakeModules/eosio-config.cmake.in @@ -0,0 +1,96 @@ +if(EOSIO_ROOT STREQUAL "" OR NOT EOSIO_ROOT) + set(EOSIO_ROOT "@EOS_ROOT_DIR@") +endif() +list(APPEND CMAKE_MODULE_PATH ${EOSIO_ROOT}/lib/cmake/eosio) +include(EosioTester) + +function(EXTRACT_MAJOR_MINOR_FROM_VERSION version success major minor) + string(REGEX REPLACE "^([0-9]+)\\..+$" "\\1" _major "${version}") + if("${_major}" STREQUAL "${version}") + set(${success} FALSE PARENT_SCOPE) + return() + endif() + + string(REGEX REPLACE "^[0-9]+\\.([0-9]+)(\\..*)?$" "\\1" _minor "${version}") + if("${_minor}" STREQUAL "${version}") + set(success FALSE PARENT_SCOPE) + return() + endif() + + set(${major} ${_major} PARENT_SCOPE) + set(${minor} ${_minor} PARENT_SCOPE) + set(${success} TRUE PARENT_SCOPE) +endfunction(EXTRACT_MAJOR_MINOR_FROM_VERSION) + +function(EOSIO_CHECK_VERSION output version hard_min soft_max hard_max) # optional 6th argument for error message + set(${output} "INVALID" PARENT_SCOPE) + + EXTRACT_MAJOR_MINOR_FROM_VERSION("${version}" success major minor) + if(NOT success) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' is invalid" PARENT_SCOPE) + endif() + return() + endif() + + EXTRACT_MAJOR_MINOR_FROM_VERSION("${hard_min}" success hard_min_major hard_min_minor) + if(NOT success) + if(${ARGC} GREATER 5) + set(${ARGV5} "hard minimum version '${hard_min}' is invalid" PARENT_SCOPE) + endif() + return() + endif() + + if( "${major}.${minor}" VERSION_LESS "${hard_min_major}.${hard_min_minor}" ) + set(${output} "MISMATCH" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' does not meet hard minimum version requirement of ${hard_min_major}.${hard_min_minor}" PARENT_SCOPE) + endif() + return() + endif() + + if(NOT hard_max STREQUAL "") + EXTRACT_MAJOR_MINOR_FROM_VERSION("${hard_max}" success hard_max_major hard_max_minor) + if(NOT success) + if(${ARGC} GREATER 5) + set(${ARGV5} "hard maximum version '${hard_max}' is invalid" PARENT_SCOPE) + endif() + return() + endif() + + if( "${major}.${minor}" VERSION_GREATER "${hard_max_major}.${hard_max_minor}" ) + set(${output} "MISMATCH" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' does not meet hard maximum version requirement of ${hard_max_major}.${hard_max_minor}" PARENT_SCOPE) + endif() + return() + endif() + endif() + + EXTRACT_MAJOR_MINOR_FROM_VERSION("${soft_max}" success soft_max_major soft_max_minor) + if(NOT success) + set(${output} "MISMATCH" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "soft maximum version '${soft_max}' is invalid" PARENT_SCOPE) + endif() + return() + endif() + + if( ${major} GREATER ${soft_max_major} ) + set(${output} "MISMATCH" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' must have the same major version as the soft maximum version (${soft_max_major})" PARENT_SCOPE) + endif() + return() + endif() + + if( "${major}.${minor}" VERSION_GREATER "${soft_max_major}.${soft_max_minor}" ) + set(${output} "WARN" PARENT_SCOPE) + if(${ARGC} GREATER 5) + set(${ARGV5} "version '${version}' matches requirements but is greater than the soft maximum version of ${soft_max_major}.${soft_max_minor}" PARENT_SCOPE) + endif() + return() + endif() + + set(${output} "MATCH" PARENT_SCOPE) +endfunction(EOSIO_CHECK_VERSION) diff --git a/eosio_install.sh b/eosio_install.sh index e442ce91373..9ed195df7d0 100755 --- a/eosio_install.sh +++ b/eosio_install.sh @@ -57,6 +57,13 @@ fi popd &> /dev/null } + create_cmake_symlink() { + mkdir -p /usr/local/lib/cmake/eosio + pushd /usr/local/lib/cmake/eosio &> /dev/null + ln -sf ../../../eosio/lib/cmake/eosio/$1 $1 + popd &> /dev/null + } + install_symlinks() { printf "\\n\\tInstalling EOSIO Binary Symlinks\\n\\n" create_symlink "cleos" @@ -94,6 +101,7 @@ fi popd &> /dev/null install_symlinks + create_cmake_symlink "eosio-config.cmake" printf "\n\n${bldred}\t _______ _______ _______ _________ _______\n" printf '\t( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' From 7d750e1a2c991fa68181bdebd1c224e1cb515c6f Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Mon, 15 Oct 2018 18:27:22 -0400 Subject: [PATCH 152/161] remove unused variable --- libraries/chain/block_log.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 4cdcba04b1e..dc769cc9612 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -246,7 +246,7 @@ namespace eosio { namespace chain { my->block_stream.write((char*)&totem, sizeof(totem)); if (first_block) { - auto ret = append(first_block); + append(first_block); } auto pos = my->block_stream.tellp(); From b3ef2b6814e1131cc3338edd1c15e5f8d75a5185 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 16 Oct 2018 00:48:23 -0400 Subject: [PATCH 153/161] added deps and licenses --- CMakeLists.txt | 16 +++++++++++++++- scripts/generate_bottle.sh | 15 +++++---------- scripts/generate_deb.sh | 5 +++++ scripts/generate_rpm.sh | 1 + scripts/generate_tarball.sh | 4 ++++ 5 files changed, 30 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0ff42a412a4..608a3268489 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -232,6 +232,21 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/EosioTester.cmake.in ${C configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules/EosioTesterBuild.cmake.in ${CMAKE_BINARY_DIR}/lib/cmake/EosioTester.cmake @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/EosioTester.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/) +configure_file(${CMAKE_SOURCE_DIR}/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/wabt/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.wabt COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/softfloat/COPYING.txt + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.softfloat COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/wasm-jit/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.wavm COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/secp256k1/upstream/COPYING + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.secp256k1 COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/src/network/LICENSE.go + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.go COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/externals/binaryen/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.binaryen COPYONLY) + install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/) install(FILES libraries/wabt/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wabt) install(FILES libraries/softfloat/COPYING.txt DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.softfloat) @@ -240,6 +255,5 @@ install(FILES libraries/fc/secp256k1/upstream/COPYING DESTINATION ${CMAKE_INSTAL install(FILES externals/binaryen/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.binaryen) install(FILES libraries/fc/src/network/LICENSE.go DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ ) -#include(installer) include(package) include(doxygen) diff --git a/scripts/generate_bottle.sh b/scripts/generate_bottle.sh index 9f2a2913dfb..fbec0a7a340 100644 --- a/scripts/generate_bottle.sh +++ b/scripts/generate_bottle.sh @@ -41,16 +41,11 @@ echo "class Eosio < Formula option :universal - depends_on \"cmake\" => :build - depends_on \"automake\" => :build - depends_on \"libtool\" => :build - depends_on \"wget\" => :build - depends_on \"gmp\" => :build - depends_on \"gettext\" => :build - depends_on \"doxygen\" => :build - depends_on \"graphviz\" => :build - depends_on \"lcov\" => :build - depends_on :xcode => :build + depends_on \"gmp\" + depends_on \"gettext\" + depends_on \"openssl\" + depends_on \"gmp\" + depends_on :xcode depends_on :macos => :high_sierra depends_on :arch => :intel diff --git a/scripts/generate_deb.sh b/scripts/generate_deb.sh index 5ca57978cbd..e52d4527316 100644 --- a/scripts/generate_deb.sh +++ b/scripts/generate_deb.sh @@ -6,11 +6,16 @@ SPREFIX=${PREFIX} SUBPREFIX="opt/${PROJECT}/${VERSION}" SSUBPREFIX="opt\/${PROJECT}\/${VERSION}" +DEPS_STR="" +for dep in "${DEPS[@]}"; do + DEPS_STR="${DEPS_STR} Depends: ${dep}" +done mkdir -p ${PROJECT}/DEBIAN echo "Package: ${PROJECT} Version: ${VERSION} Section: devel Priority: optional +Depends: libbz2-dev (>= 1.0), libssl-dev (>= 1.0), libgmp3-dev, build-essential, libicu-dev, zlib1g-dev Architecture: amd64 Homepage: ${URL} Maintainer: ${EMAIL} diff --git a/scripts/generate_rpm.sh b/scripts/generate_rpm.sh index bc5923b25a5..5e9be4f5149 100644 --- a/scripts/generate_rpm.sh +++ b/scripts/generate_rpm.sh @@ -30,6 +30,7 @@ Version: ${VERSION}.x86_64 License: MIT Vendor: ${VENDOR} Source: ${URL} +Requires: openssl-devel.x86_64, gmp-devel.x86_64, libstdc++-devel.x86_64, bzip2.x86_64, bzip2-devel.x86_64, mongodb.x86_64, mongodb-server.x86_64 URL: ${URL} Packager: ${VENDOR} <${EMAIL}> Summary: ${DESC} diff --git a/scripts/generate_tarball.sh b/scripts/generate_tarball.sh index fb93df1179c..675f30b4af7 100644 --- a/scripts/generate_tarball.sh +++ b/scripts/generate_tarball.sh @@ -5,6 +5,7 @@ EOS_PREFIX=${PREFIX}/${SUBPREFIX} mkdir -p ${PREFIX}/bin/ #mkdir -p ${PREFIX}/lib/cmake/${PROJECT} mkdir -p ${EOS_PREFIX}/bin +mkdir -p ${EOS_PREFIX}/licenses/eosio #mkdir -p ${EOS_PREFIX}/include #mkdir -p ${EOS_PREFIX}/lib/cmake/${PROJECT} #mkdir -p ${EOS_PREFIX}/cmake @@ -13,6 +14,9 @@ mkdir -p ${EOS_PREFIX}/bin # install binaries cp -R ${BUILD_DIR}/bin/* ${EOS_PREFIX}/bin +# install licenses +cp -R ${BUILD_DIR}/licenses/eosio/* ${EOS_PREFIX}/licenses + # install libraries #cp -R ${BUILD_DIR}/lib/* ${EOS_PREFIX}/lib From 335054e42b012850dd7741966f774e57b64d80e9 Mon Sep 17 00:00:00 2001 From: liuyujun Date: Tue, 16 Oct 2018 16:58:38 +0800 Subject: [PATCH 154/161] remove unused code --- .../include/eosio/net_plugin/protocol.hpp | 17 ++++++----------- plugins/net_plugin/net_plugin.cpp | 9 --------- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 586c4ae4aa6..a736a9ff464 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -79,17 +79,12 @@ namespace eosio { fc::sha256 node_id; ///< for duplicate notification }; - typedef std::chrono::system_clock::duration::rep tstamp; - typedef int32_t tdist; - - static_assert(sizeof(std::chrono::system_clock::duration::rep) >= 8, "system_clock is expected to be at least 64 bits"); - - struct time_message { - tstamp org; //!< origin timestamp - tstamp rec; //!< receive timestamp - tstamp xmt; //!< transmit timestamp - mutable tstamp dst; //!< destination timestamp - }; + struct time_message { + tstamp org; //!< origin timestamp + tstamp rec; //!< receive timestamp + tstamp xmt; //!< transmit timestamp + mutable tstamp dst; //!< destination timestamp + }; enum id_list_modes { none, diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 28ee6f47d83..1a862d9530e 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -63,15 +63,6 @@ namespace eosio { using net_message_ptr = shared_ptr; - template - std::string itoh(I n, size_t hlen = sizeof(I)<<1) { - static const char* digits = "0123456789abcdef"; - std::string r(hlen, '0'); - for(size_t i = 0, j = (hlen - 1) * 4 ; i < hlen; ++i, j -= 4) - r[i] = digits[(n>>j) & 0x0f]; - return r; - } - struct node_transaction_state { transaction_id_type id; time_point_sec expires; /// time after which this may be purged. From 45fe8b853db368c3e32d21b1caeaed334c60cd28 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 16 Oct 2018 10:41:55 -0400 Subject: [PATCH 155/161] Remove skip_trx_checks() from checktime hot path Checktime can easily be called over 100000 times a block. The benign looking skip_trx_checks() done in checktime ends up running quite a bit of code during contract execution. Refactor the flow a little bit to rely on the new deadline_timer. if skip_trx_checks() is true, never start the deadline_timer, expired will always be 0, and one branch is removed from checktime() --- libraries/chain/transaction_context.cpp | 59 +++++++++++++------------ 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index a352e82a246..476a78d982b 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -268,7 +268,10 @@ namespace bacc = boost::accumulators; checktime(); // Fail early if deadline has already been exceeded - _deadline_timer.start(_deadline); + if(control.skip_trx_checks()) + _deadline_timer.expired = 0; + else + _deadline_timer.start(_deadline); is_initialized = true; } @@ -430,36 +433,34 @@ namespace bacc = boost::accumulators; } void transaction_context::checktime()const { - if (!control.skip_trx_checks()) { - if(BOOST_LIKELY(_deadline_timer.expired == false)) - return; - auto now = fc::time_point::now(); - if( BOOST_UNLIKELY( now > _deadline ) ) { - // edump((now-start)(now-pseudo_start)); - if( explicit_billed_cpu_time || deadline_exception_code == deadline_exception::code_value ) { - EOS_THROW( deadline_exception, "deadline exceeded", ("now", now)("deadline", _deadline)("start", start) ); - } else if( deadline_exception_code == block_cpu_usage_exceeded::code_value ) { - EOS_THROW( block_cpu_usage_exceeded, - "not enough time left in block to complete executing transaction", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } else if( deadline_exception_code == tx_cpu_usage_exceeded::code_value ) { - if (cpu_limit_due_to_greylist) { - EOS_THROW( greylist_cpu_usage_exceeded, - "greylisted transaction was executing for too long", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } else { - EOS_THROW( tx_cpu_usage_exceeded, - "transaction was executing for too long", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } - } else if( deadline_exception_code == leeway_deadline_exception::code_value ) { - EOS_THROW( leeway_deadline_exception, - "the transaction was unable to complete by deadline, " - "but it is possible it could have succeeded if it were allowed to run to completion", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + if(BOOST_LIKELY(_deadline_timer.expired == false)) + return; + auto now = fc::time_point::now(); + if( BOOST_UNLIKELY( now > _deadline ) ) { + // edump((now-start)(now-pseudo_start)); + if( explicit_billed_cpu_time || deadline_exception_code == deadline_exception::code_value ) { + EOS_THROW( deadline_exception, "deadline exceeded", ("now", now)("deadline", _deadline)("start", start) ); + } else if( deadline_exception_code == block_cpu_usage_exceeded::code_value ) { + EOS_THROW( block_cpu_usage_exceeded, + "not enough time left in block to complete executing transaction", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + } else if( deadline_exception_code == tx_cpu_usage_exceeded::code_value ) { + if (cpu_limit_due_to_greylist) { + EOS_THROW( greylist_cpu_usage_exceeded, + "greylisted transaction was executing for too long", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + } else { + EOS_THROW( tx_cpu_usage_exceeded, + "transaction was executing for too long", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); } - EOS_ASSERT( false, transaction_exception, "unexpected deadline exception code" ); + } else if( deadline_exception_code == leeway_deadline_exception::code_value ) { + EOS_THROW( leeway_deadline_exception, + "the transaction was unable to complete by deadline, " + "but it is possible it could have succeeded if it were allowed to run to completion", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); } + EOS_ASSERT( false, transaction_exception, "unexpected deadline exception code" ); } } From 40b2ea8ba72a58dbac8129c71fb19aefec23774f Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 16 Oct 2018 12:08:16 -0500 Subject: [PATCH 156/161] Added version of distributed-transaction-test that uses bnet plugin and renamed bnet versions so individual tests could be run without their bnet versions when using ctest -R. GH #4906 --- tests/CMakeLists.txt | 16 ++++++++++------ tests/distributed-transactions-test.py | 7 ++++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 80e17845781..3ae4273930f 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -50,10 +50,12 @@ add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_ou add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME nodeos_sanity_bnet_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_sanity_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME bnet_nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST bnet_nodeos_run_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME nodeos_run_bnet_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_run_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) @@ -64,6 +66,8 @@ endif() add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST distributed-transactions-bnet-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -79,8 +83,8 @@ set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME bnet_nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST bnet_nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_sanity_bnet_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_sanity_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) @@ -93,8 +97,8 @@ set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME bnet_nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST bnet_nodeos_voting_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_voting_bnet_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 546e93b0de4..5617ca6d372 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -10,8 +10,8 @@ Print=Utils.Print errorExit=Utils.errorExit -args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" - ,"--dump-error-details","-v","--leave-running","--clean-run","--keep-logs"}) +args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed","--p2p-plugin" + ,"--dump-error-details","-v","--leave-running","--clean-run","--keep-logs"}) pnodes=args.p topo=args.s @@ -25,6 +25,7 @@ dumpErrorDetails=args.dump_error_details killAll=args.clean_run keepLogs=args.keep_logs +p2pPlugin=args.p2p_plugin killWallet=not dontKill killEosInstances=not dontKill @@ -62,7 +63,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay) is False: + if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") From e3e9852857bfa6d6f5bee5527facaac3608d5997 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 16 Oct 2018 13:57:43 -0400 Subject: [PATCH 157/161] Remove dice contract --- contracts/dice/CMakeLists.txt | 8 - contracts/dice/README.md | 269 --------------------- contracts/dice/dice.abi | 221 ------------------ contracts/dice/dice.cpp | 391 ------------------------------- unittests/dice_tests.cpp | 425 ---------------------------------- 5 files changed, 1314 deletions(-) delete mode 100644 contracts/dice/CMakeLists.txt delete mode 100644 contracts/dice/README.md delete mode 100644 contracts/dice/dice.abi delete mode 100644 contracts/dice/dice.cpp delete mode 100644 unittests/dice_tests.cpp diff --git a/contracts/dice/CMakeLists.txt b/contracts/dice/CMakeLists.txt deleted file mode 100644 index 3caf729a2d5..00000000000 --- a/contracts/dice/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -file(GLOB ABI_FILES "*.abi") -configure_file("${ABI_FILES}" "${CMAKE_CURRENT_BINARY_DIR}" COPYONLY) - -add_wast_executable(TARGET dice - INCLUDE_FOLDERS "${STANDARD_INCLUDE_FOLDERS}" - LIBRARIES libc++ libc eosiolib - DESTINATION_FOLDER ${CMAKE_CURRENT_BINARY_DIR} -) diff --git a/contracts/dice/README.md b/contracts/dice/README.md deleted file mode 100644 index 0edd7d5ca34..00000000000 --- a/contracts/dice/README.md +++ /dev/null @@ -1,269 +0,0 @@ -DICE ------------------ - -This contract implements a simple DICE game between two players with 50/50 odds of winning. - -Before playing all players deposit funds into their @dice account just like the @exchange contract - -1. Player 1 proposes to bet 1 EOS and submits SHA256(secret1) -2. Player 2 proposes to bet 1 EOS and submits SHA256(secret2) - -Because Player 1 and 2 bet equal amounts their orders are matched and the game begins. - -3. A Player reveales their secret -4. A 5 minute deadline starts whereby the first to reveal automatically wins unless the other player reveals -5. The other player reveals and a winner is chosen and paid based upon the value of sha256( cat(secret1,secret2) ) -6. After the deadline anyone can trigger a default claim and the rewards - - -Economic Incentive for Interface Developers ------------------ - -A variation on this game would be to add an additional information on offer creation that will get paid -a commission when the player wins. With this commission in place there is financial incentive for a -service provider to continue to execute the game in a timely manner as well as provide quality and -entertaining interfaces on top of this game. - - -Other Games ------------ -This same basic model can be used to build more robust games. - - -Potential Vulnerabilities -------- -1. Block Producers may exclude reveal transaction -2. Losers may force winner to wait 5 minutes to get rewards -3. Service providers may fail to auto-reveal on your behalf -4. You may lose internet connectivity mid-game -5. A blockhain reorganization could cause some havock if secrets are revealed too quickly - - @dice could protect users by rejecting reveals until a game creation is irreversible (about 45 seconds max) - - users could take risk themselves by deciding how many confirmations are required - - for small amounts it probably doesn't matter - - under normal operation of DPOS chains there are few if any chain reorganizations - - -Example game session using cleos -------- -#### Prerequisites -* Wallet must be unlock and have at least the following private keys - - **5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3** - **5Jmsawgsp1tQ3GD6JyGCwy1dcvqKZgX6ugMVMdjirx85iv5VyPR** - -##### Upload bios contract -````bash -cleos set contract eosio build/contracts/eosio.bios -p eosio -```` - -##### Ceate eosio.token account -````bash -cleos create account eosio eosio.token EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 -```` - -##### Set eosio.token contract to eosio.token account -````bash -cleos set contract eosio.token build/contracts/eosio.token -p eosio.token -```` - -##### Create dice account -````bash -cleos create account eosio dice EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 -```` - -##### Set dice contract to dice account -````bash -cleos set contract dice build/contracts/dice -p dice -```` - -##### Create native EOS token -````bash -cleos push action eosio.token create '[ "eosio", "1000000000.0000 EOS", 0, 0, 0]' -p eosio.token -```` - -##### Create alice account -````bash -cleos create account eosio alice EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 -```` - -##### Create bob account -````bash -cleos create account eosio bob EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4 -```` - -##### Issue 1000 EOS to alice -````bash -cleos push action eosio.token issue '[ "alice", "1000.0000 EOS", "" ]' -p eosio -```` - -##### Issue 1000 EOS to bob -````bash -cleos push action eosio.token issue '[ "bob", "1000.0000 EOS", "" ]' -p eosio -```` - -##### Allow dice contract to make transfers on alice behalf (deposit) -````bash -cleos set account permission alice active '{"threshold": 1,"keys": [{"key": "EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4","weight": 1}],"accounts": [{"permission":{"actor":"dice","permission":"active"},"weight":1}]}' owner -p alice -```` - -##### Allow dice contract to make transfers on bob behalf (deposit) -````bash -cleos set account permission bob active '{"threshold": 1,"keys": [{"key": "EOS7ijWCBmoXBi3CgtK7DJxentZZeTkeUnaSDvyro9dq7Sd1C3dC4","weight": 1}],"accounts": [{"permission":{"actor":"dice","permission":"active"},"weight":1}]}' owner -p bob -```` - -##### Alice deposits 100 EOS into the dice contract -````bash -cleos push action dice deposit '[ "alice", "100.0000 EOS" ]' -p alice -```` - -##### Bob deposits 100 EOS into the dice contract -````bash -cleos push action dice deposit '[ "bob", "100.0000 EOS" ]' -p bob -```` - -##### Alice generates a secret -````bash -openssl rand 32 -hex -28349b1d4bcdc9905e4ef9719019e55743c84efa0c5e9a0b077f0b54fcd84905 -```` - -##### Alice generates sha256(secret) -````bash -echo -n '28349b1d4bcdc9905e4ef9719019e55743c84efa0c5e9a0b077f0b54fcd84905' | xxd -r -p | sha256sum -b | awk '{print $1}' -d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883 -```` - -##### Alice bets 3 EOS -````bash -cleos push action dice offerbet '[ "3.0000 EOS", "alice", "d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883" ]' -p alice -```` - -##### Bob generates a secret -````bash -openssl rand 32 -hex -15fe76d25e124b08feb835f12e00a879bd15666a33786e64b655891fba7d6c12 -```` - -##### Bob generates sha256(secret) -````bash -echo -n '15fe76d25e124b08feb835f12e00a879bd15666a33786e64b655891fba7d6c12' | xxd -r -p | sha256sum -b | awk '{print $1}' -50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129 -```` - -##### Bob also bets 3 EOS (a game is started) -````bash -cleos push action dice offerbet '[ "3.0000 EOS", "bob", "50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129" ]' -p bob -```` - -##### Dice contract tables right after the game started -````bash -cleos get table dice dice account -```` -````json -{ - "rows": [{ - "owner": "alice", - "eos_balance": "97.0000 EOS", - "open_offers": 0, - "open_games": 1 - },{ - "owner": "bob", - "eos_balance": "97.0000 EOS", - "open_offers": 0, - "open_games": 1 - } - ], - "more": false -} -```` - -````bash -cleos get table dice dice game -```` -````json -{ - "rows": [{ - "id": 1, - "bet": "3.0000 EOS", - "deadline": "1970-01-01T00:00:00", - "player1": { - "commitment": "d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883", - "reveal": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "player2": { - "commitment": "50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129", - "reveal": "0000000000000000000000000000000000000000000000000000000000000000" - } - } - ], - "more": false -} -```` - -##### Bob reveals his secret -````bash -cleos push action dice reveal '[ "50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129", "15fe76d25e124b08feb835f12e00a879bd15666a33786e64b655891fba7d6c12" ]' -p bob -```` - -##### Game table after bob revealed (now the game has a deadline for alice to reveal) -````bash -cleos get table dice dice game -```` -````json -{ - "rows": [{ - "id": 1, - "bet": "3.0000 EOS", - "deadline": "2018-04-17T07:45:49", - "player1": { - "commitment": "d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883", - "reveal": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "player2": { - "commitment": "50ed53fcdaf27f88d51ea4e835b1055efe779bb87e6cfdff47d28c88ffb27129", - "reveal": "15fe76d25e124b08feb835f12e00a879bd15666a33786e64b655891fba7d6c12" - } - } - ], - "more": false -} -```` - -##### Alice reveals her secret (the winner is determined, the game is removed) -````bash -cleos push action dice reveal '[ "d533f24d6f28ddcef3f066474f7b8355383e485681ba8e793e037f5cf36e4883", "28349b1d4bcdc9905e4ef9719019e55743c84efa0c5e9a0b077f0b54fcd84905" ]' -p alice -```` - -##### Balance of the accounts after game ends -````bash -cleos get table dice dice account -```` -````json -{ - "rows": [{ - "owner": "alice", - "eos_balance": "103.0000 EOS", - "open_offers": 0, - "open_games": 0 - },{ - "owner": "bob", - "eos_balance": "97.0000 EOS", - "open_offers": 0, - "open_games": 0 - } - ], - "more": false -} -```` - -##### Alice withdraw from her dice account 103 EOS -````bash -cleos push action dice withdraw '[ "alice", "103.0000 EOS" ]' -p alice -```` - -##### Balance of alice after withdraw -````bash -cleos get currency balance eosio.token alice eos -1003.0000 EOS -```` - diff --git a/contracts/dice/dice.abi b/contracts/dice/dice.abi deleted file mode 100644 index ba47085f1db..00000000000 --- a/contracts/dice/dice.abi +++ /dev/null @@ -1,221 +0,0 @@ -{ - "version": "eosio::abi/1.0", - "types": [{ - "new_type_name": "account_name", - "type": "name" - }], - "structs": [{ - "name": "offer", - "base": "", - "fields": [{ - "name": "id", - "type": "uint64" - },{ - "name": "owner", - "type": "account_name" - },{ - "name": "bet", - "type": "asset" - },{ - "name": "commitment", - "type": "checksum256" - },{ - "name": "gameid", - "type": "uint64" - } - ] - },{ - "name": "player", - "base": "", - "fields": [{ - "name": "commitment", - "type": "checksum256" - },{ - "name": "reveal", - "type": "checksum256" - } - ] - },{ - "name": "game", - "base": "", - "fields": [{ - "name": "id", - "type": "uint64" - },{ - "name": "bet", - "type": "asset" - },{ - "name": "deadline", - "type": "time_point_sec" - },{ - "name": "player1", - "type": "player" - },{ - "name": "player2", - "type": "player" - } - ] - },{ - "name": "global_dice", - "base": "", - "fields": [{ - "name": "id", - "type": "uint64" - },{ - "name": "nextgameid", - "type": "uint64" - } - ] - },{ - "name": "account", - "base": "", - "fields": [{ - "name": "owner", - "type": "account_name" - },{ - "name": "eos_balance", - "type": "asset" - },{ - "name": "open_offers", - "type": "uint32" - },{ - "name": "open_games", - "type": "uint32" - } - ] - },{ - "name": "offerbet", - "base": "", - "fields": [{ - "name": "bet", - "type": "asset" - },{ - "name": "player", - "type": "account_name" - },{ - "name": "commitment", - "type": "checksum256" - } - ] - },{ - "name": "canceloffer", - "base": "", - "fields": [{ - "name": "commitment", - "type": "checksum256" - } - ] - },{ - "name": "reveal", - "base": "", - "fields": [{ - "name": "commitment", - "type": "checksum256" - },{ - "name": "source", - "type": "checksum256" - } - ] - },{ - "name": "claimexpired", - "base": "", - "fields": [{ - "name": "gameid", - "type": "uint64" - } - ] - },{ - "name": "deposit", - "base": "", - "fields": [{ - "name": "from", - "type": "account_name" - },{ - "name": "a", - "type": "asset" - } - ] - },{ - "name": "withdraw", - "base": "", - "fields": [{ - "name": "to", - "type": "account_name" - },{ - "name": "a", - "type": "asset" - } - ] - } - ], - "actions": [{ - "name": "offerbet", - "type": "offerbet", - "ricardian_contract": "" - },{ - "name": "canceloffer", - "type": "canceloffer", - "ricardian_contract": "" - },{ - "name": "reveal", - "type": "reveal", - "ricardian_contract": "" - },{ - "name": "claimexpired", - "type": "claimexpired", - "ricardian_contract": "" - },{ - "name": "deposit", - "type": "deposit", - "ricardian_contract": "" - },{ - "name": "withdraw", - "type": "withdraw", - "ricardian_contract": "" - } - ], - "tables": [{ - "name": "offer", - "index_type": "i64", - "key_names": [ - "id" - ], - "key_types": [ - "uint64" - ], - "type": "offer" - },{ - "name": "game", - "index_type": "i64", - "key_names": [ - "id" - ], - "key_types": [ - "uint64" - ], - "type": "game" - },{ - "name": "global", - "index_type": "i64", - "key_names": [ - "id" - ], - "key_types": [ - "uint64" - ], - "type": "global_dice" - },{ - "name": "account", - "index_type": "i64", - "key_names": [ - "owner" - ], - "key_types": [ - "account_name" - ], - "type": "account" - } - ], - "ricardian_clauses": [], - "abi_extensions": [] -} diff --git a/contracts/dice/dice.cpp b/contracts/dice/dice.cpp deleted file mode 100644 index 5dbe32b012a..00000000000 --- a/contracts/dice/dice.cpp +++ /dev/null @@ -1,391 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ -#include -#include -#include -#include -#include -#include -#include -#include - -using eosio::key256; -using eosio::indexed_by; -using eosio::const_mem_fun; -using eosio::asset; -using eosio::permission_level; -using eosio::action; -using eosio::print; -using eosio::name; - -class dice : public eosio::contract { - public: - const uint32_t FIVE_MINUTES = 5*60; - - dice(account_name self) - :eosio::contract(self), - offers(_self, _self), - games(_self, _self), - global_dices(_self, _self), - accounts(_self, _self) - {} - - //@abi action - void offerbet(const asset& bet, const account_name player, const checksum256& commitment) { - - eosio_assert( bet.symbol == CORE_SYMBOL, "only core token allowed" ); - eosio_assert( bet.is_valid(), "invalid bet" ); - eosio_assert( bet.amount > 0, "must bet positive quantity" ); - - eosio_assert( !has_offer( commitment ), "offer with this commitment already exist" ); - require_auth( player ); - - auto cur_player_itr = accounts.find( player ); - eosio_assert(cur_player_itr != accounts.end(), "unknown account"); - - // Store new offer - auto new_offer_itr = offers.emplace(_self, [&](auto& offer){ - offer.id = offers.available_primary_key(); - offer.bet = bet; - offer.owner = player; - offer.commitment = commitment; - offer.gameid = 0; - }); - - // Try to find a matching bet - auto idx = offers.template get_index(); - auto matched_offer_itr = idx.lower_bound( (uint64_t)new_offer_itr->bet.amount ); - - if( matched_offer_itr == idx.end() - || matched_offer_itr->bet != new_offer_itr->bet - || matched_offer_itr->owner == new_offer_itr->owner ) { - - // No matching bet found, update player's account - accounts.modify( cur_player_itr, 0, [&](auto& acnt) { - eosio_assert( acnt.eos_balance >= bet, "insufficient balance" ); - acnt.eos_balance -= bet; - acnt.open_offers++; - }); - - } else { - // Create global game counter if not exists - auto gdice_itr = global_dices.begin(); - if( gdice_itr == global_dices.end() ) { - gdice_itr = global_dices.emplace(_self, [&](auto& gdice){ - gdice.nextgameid=0; - }); - } - - // Increment global game counter - global_dices.modify(gdice_itr, 0, [&](auto& gdice){ - gdice.nextgameid++; - }); - - // Create a new game - auto game_itr = games.emplace(_self, [&](auto& new_game){ - new_game.id = gdice_itr->nextgameid; - new_game.bet = new_offer_itr->bet; - new_game.deadline = eosio::time_point_sec(0); - - new_game.player1.commitment = matched_offer_itr->commitment; - memset(&new_game.player1.reveal, 0, sizeof(checksum256)); - - new_game.player2.commitment = new_offer_itr->commitment; - memset(&new_game.player2.reveal, 0, sizeof(checksum256)); - }); - - // Update player's offers - idx.modify(matched_offer_itr, 0, [&](auto& offer){ - offer.bet.amount = 0; - offer.gameid = game_itr->id; - }); - - offers.modify(new_offer_itr, 0, [&](auto& offer){ - offer.bet.amount = 0; - offer.gameid = game_itr->id; - }); - - // Update player's accounts - accounts.modify( accounts.find( matched_offer_itr->owner ), 0, [&](auto& acnt) { - acnt.open_offers--; - acnt.open_games++; - }); - - accounts.modify( cur_player_itr, 0, [&](auto& acnt) { - eosio_assert( acnt.eos_balance >= bet, "insufficient balance" ); - acnt.eos_balance -= bet; - acnt.open_games++; - }); - } - } - - //@abi action - void canceloffer( const checksum256& commitment ) { - - auto idx = offers.template get_index(); - auto offer_itr = idx.find( offer::get_commitment(commitment) ); - - eosio_assert( offer_itr != idx.end(), "offer does not exists" ); - eosio_assert( offer_itr->gameid == 0, "unable to cancel offer" ); - require_auth( offer_itr->owner ); - - auto acnt_itr = accounts.find(offer_itr->owner); - accounts.modify(acnt_itr, 0, [&](auto& acnt){ - acnt.open_offers--; - acnt.eos_balance += offer_itr->bet; - }); - - idx.erase(offer_itr); - } - - //@abi action - void reveal( const checksum256& commitment, const checksum256& source ) { - - assert_sha256( (char *)&source, sizeof(source), (const checksum256 *)&commitment ); - - auto idx = offers.template get_index(); - auto curr_revealer_offer = idx.find( offer::get_commitment(commitment) ); - - eosio_assert(curr_revealer_offer != idx.end(), "offer not found"); - eosio_assert(curr_revealer_offer->gameid > 0, "unable to reveal"); - - auto game_itr = games.find( curr_revealer_offer->gameid ); - - player curr_reveal = game_itr->player1; - player prev_reveal = game_itr->player2; - - if( !is_equal(curr_reveal.commitment, commitment) ) { - std::swap(curr_reveal, prev_reveal); - } - - eosio_assert( is_zero(curr_reveal.reveal) == true, "player already revealed"); - - if( !is_zero(prev_reveal.reveal) ) { - - checksum256 result; - sha256( (char *)&game_itr->player1, sizeof(player)*2, &result); - - auto prev_revealer_offer = idx.find( offer::get_commitment(prev_reveal.commitment) ); - - int winner = result.hash[1] < result.hash[0] ? 0 : 1; - - if( winner ) { - pay_and_clean(*game_itr, *curr_revealer_offer, *prev_revealer_offer); - } else { - pay_and_clean(*game_itr, *prev_revealer_offer, *curr_revealer_offer); - } - - } else { - games.modify(game_itr, 0, [&](auto& game){ - - if( is_equal(curr_reveal.commitment, game.player1.commitment) ) - game.player1.reveal = source; - else - game.player2.reveal = source; - - game.deadline = eosio::time_point_sec(now() + FIVE_MINUTES); - }); - } - } - - //@abi action - void claimexpired( const uint64_t gameid ) { - - auto game_itr = games.find(gameid); - - eosio_assert(game_itr != games.end(), "game not found"); - eosio_assert(game_itr->deadline != eosio::time_point_sec(0) && eosio::time_point_sec(now()) > game_itr->deadline, "game not expired"); - - auto idx = offers.template get_index(); - auto player1_offer = idx.find( offer::get_commitment(game_itr->player1.commitment) ); - auto player2_offer = idx.find( offer::get_commitment(game_itr->player2.commitment) ); - - if( !is_zero(game_itr->player1.reveal) ) { - eosio_assert( is_zero(game_itr->player2.reveal), "game error"); - pay_and_clean(*game_itr, *player1_offer, *player2_offer); - } else { - eosio_assert( is_zero(game_itr->player1.reveal), "game error"); - pay_and_clean(*game_itr, *player2_offer, *player1_offer); - } - - } - - //@abi action - void deposit( const account_name from, const asset& quantity ) { - - eosio_assert( quantity.is_valid(), "invalid quantity" ); - eosio_assert( quantity.amount > 0, "must deposit positive quantity" ); - - auto itr = accounts.find(from); - if( itr == accounts.end() ) { - itr = accounts.emplace(_self, [&](auto& acnt){ - acnt.owner = from; - }); - } - - action( - permission_level{ from, N(active) }, - N(eosio.token), N(transfer), - std::make_tuple(from, _self, quantity, std::string("")) - ).send(); - - accounts.modify( itr, 0, [&]( auto& acnt ) { - acnt.eos_balance += quantity; - }); - } - - //@abi action - void withdraw( const account_name to, const asset& quantity ) { - require_auth( to ); - - eosio_assert( quantity.is_valid(), "invalid quantity" ); - eosio_assert( quantity.amount > 0, "must withdraw positive quantity" ); - - auto itr = accounts.find( to ); - eosio_assert(itr != accounts.end(), "unknown account"); - - accounts.modify( itr, 0, [&]( auto& acnt ) { - eosio_assert( acnt.eos_balance >= quantity, "insufficient balance" ); - acnt.eos_balance -= quantity; - }); - - action( - permission_level{ _self, N(active) }, - N(eosio.token), N(transfer), - std::make_tuple(_self, to, quantity, std::string("")) - ).send(); - - if( itr->is_empty() ) { - accounts.erase(itr); - } - } - - private: - //@abi table offer i64 - struct offer { - uint64_t id; - account_name owner; - asset bet; - checksum256 commitment; - uint64_t gameid = 0; - - uint64_t primary_key()const { return id; } - - uint64_t by_bet()const { return (uint64_t)bet.amount; } - - key256 by_commitment()const { return get_commitment(commitment); } - - static key256 get_commitment(const checksum256& commitment) { - const uint64_t *p64 = reinterpret_cast(&commitment); - return key256::make_from_word_sequence(p64[0], p64[1], p64[2], p64[3]); - } - - EOSLIB_SERIALIZE( offer, (id)(owner)(bet)(commitment)(gameid) ) - }; - - typedef eosio::multi_index< N(offer), offer, - indexed_by< N(bet), const_mem_fun >, - indexed_by< N(commitment), const_mem_fun > - > offer_index; - - struct player { - checksum256 commitment; - checksum256 reveal; - - EOSLIB_SERIALIZE( player, (commitment)(reveal) ) - }; - - //@abi table game i64 - struct game { - uint64_t id; - asset bet; - eosio::time_point_sec deadline; - player player1; - player player2; - - uint64_t primary_key()const { return id; } - - EOSLIB_SERIALIZE( game, (id)(bet)(deadline)(player1)(player2) ) - }; - - typedef eosio::multi_index< N(game), game> game_index; - - //@abi table global i64 - struct global_dice { - uint64_t id = 0; - uint64_t nextgameid = 0; - - uint64_t primary_key()const { return id; } - - EOSLIB_SERIALIZE( global_dice, (id)(nextgameid) ) - }; - - typedef eosio::multi_index< N(global), global_dice> global_dice_index; - - //@abi table account i64 - struct account { - account( account_name o = account_name() ):owner(o){} - - account_name owner; - asset eos_balance; - uint32_t open_offers = 0; - uint32_t open_games = 0; - - bool is_empty()const { return !( eos_balance.amount | open_offers | open_games ); } - - uint64_t primary_key()const { return owner; } - - EOSLIB_SERIALIZE( account, (owner)(eos_balance)(open_offers)(open_games) ) - }; - - typedef eosio::multi_index< N(account), account> account_index; - - offer_index offers; - game_index games; - global_dice_index global_dices; - account_index accounts; - - bool has_offer( const checksum256& commitment )const { - auto idx = offers.template get_index(); - auto itr = idx.find( offer::get_commitment(commitment) ); - return itr != idx.end(); - } - - bool is_equal(const checksum256& a, const checksum256& b)const { - return memcmp((void *)&a, (const void *)&b, sizeof(checksum256)) == 0; - } - - bool is_zero(const checksum256& a)const { - const uint64_t *p64 = reinterpret_cast(&a); - return p64[0] == 0 && p64[1] == 0 && p64[2] == 0 && p64[3] == 0; - } - - void pay_and_clean(const game& g, const offer& winner_offer, - const offer& loser_offer) { - - // Update winner account balance and game count - auto winner_account = accounts.find(winner_offer.owner); - accounts.modify( winner_account, 0, [&]( auto& acnt ) { - acnt.eos_balance += 2*g.bet; - acnt.open_games--; - }); - - // Update losser account game count - auto loser_account = accounts.find(loser_offer.owner); - accounts.modify( loser_account, 0, [&]( auto& acnt ) { - acnt.open_games--; - }); - - if( loser_account->is_empty() ) { - accounts.erase(loser_account); - } - - games.erase(g); - offers.erase(winner_offer); - offers.erase(loser_offer); - } -}; - -EOSIO_ABI( dice, (offerbet)(canceloffer)(reveal)(claimexpired)(deposit)(withdraw) ) diff --git a/unittests/dice_tests.cpp b/unittests/dice_tests.cpp deleted file mode 100644 index cb63511d39c..00000000000 --- a/unittests/dice_tests.cpp +++ /dev/null @@ -1,425 +0,0 @@ -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include - -#include - -#ifdef NON_VALIDATING_TEST -#define TESTER tester -#else -#define TESTER validating_tester -#endif - -using namespace eosio; -using namespace eosio::chain; -using namespace eosio::testing; -using namespace fc; -using namespace std; -using mvo = fc::mutable_variant_object; - -struct offer_bet_t { - asset bet; - account_name player; - checksum256_type commitment; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(offerbet); } -}; -FC_REFLECT(offer_bet_t, (bet)(player)(commitment)); - -struct cancel_offer_t { - checksum256_type commitment; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(canceloffer); } -}; -FC_REFLECT(cancel_offer_t, (commitment)); - -struct reveal_t { - checksum256_type commitment; - checksum256_type source; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(reveal); } -}; -FC_REFLECT(reveal_t, (commitment)(source)); - -struct deposit_t { - account_name from; - asset amount; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(deposit); } -}; -FC_REFLECT( deposit_t, (from)(amount) ); - -struct withdraw_t { - account_name to; - asset amount; - - static account_name get_account() { return N(dice); } - static action_name get_name() {return N(withdraw); } -}; -FC_REFLECT( withdraw_t, (to)(amount) ); - -struct __attribute((packed)) account_t { - account_name owner; - asset eos_balance; - uint32_t open_offers; - uint32_t open_games; -}; -FC_REFLECT(account_t, (owner)(eos_balance)(open_offers)(open_games)); - -struct player_t { - checksum_type commitment; - checksum_type reveal; -}; -FC_REFLECT(player_t, (commitment)(reveal)); - -struct __attribute((packed)) game_t { - uint64_t gameid; - asset bet; - fc::time_point_sec deadline; - player_t player1; - player_t player2; -}; -FC_REFLECT(game_t, (gameid)(bet)(deadline)(player1)(player2)); - -struct dice_tester : TESTER { - - template - const auto& get_index() { - return control->db().get_index(); - } - - void offer_bet(account_name account, asset amount, const checksum_type& commitment) { - signed_transaction trx; - action act( {{account, config::active_name}}, - offer_bet_t{amount, account, commitment} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - void cancel_offer(account_name account, const checksum_type& commitment) { - signed_transaction trx; - action act( {{account, config::active_name}}, - cancel_offer_t{commitment} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - void deposit(account_name account, asset amount) { - signed_transaction trx; - action act( {{account, config::active_name}}, - deposit_t{account, amount} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - void withdraw(account_name account, asset amount) { - signed_transaction trx; - action act( {{account, config::active_name}}, - withdraw_t{account, amount} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - void reveal(account_name account, const checksum_type& commitment, const checksum_type& source ) { - signed_transaction trx; - action act( {{account, config::active_name}}, - reveal_t{commitment, source} ); - trx.actions.push_back(act); - set_transaction_headers(trx); - trx.sign(get_private_key( account, "active" ), control->get_chain_id()); - auto ptrx = packed_transaction(trx,packed_transaction::none); - push_transaction(ptrx); - } - - bool dice_account(account_name account, account_t& acnt) { - auto* maybe_tid = find_table(N(dice), N(dice), N(account)); - if(maybe_tid == nullptr) return false; - - auto* o = control->db().find(boost::make_tuple(maybe_tid->id, account)); - if(o == nullptr) { - return false; - } - - fc::raw::unpack(o->value.data(), o->value.size(), acnt); - return true; - } - - bool dice_game(uint64_t game_id, game_t& game) { - const bool not_required = false; - return get_table_entry(game, N(dice), N(dice), N(game), game_id, not_required); - } - - uint32_t open_games(account_name account) { - account_t acnt; - if(!dice_account(account, acnt)) return 0; - return acnt.open_games; - } - - asset game_bet(uint64_t game_id) { - game_t game; - if(!dice_game(game_id, game)) return asset(); - return game.bet; - } - - uint32_t open_offers(account_name account) { - account_t acnt; - if(!dice_account(account, acnt)) return 0; - return acnt.open_offers; - } - - asset balance_of(account_name account) { - account_t acnt; - if(!dice_account(account, acnt)) return asset(); - return acnt.eos_balance; - } - - checksum_type commitment_for( const char* secret ) { - return commitment_for(checksum_type(secret)); - } - - checksum_type commitment_for( const checksum_type& secret ) { - return fc::sha256::hash( secret.data(), sizeof(secret) ); - } - - void add_dice_authority(account_name account) { - auto auth = authority{ - 1, - { - {.key = get_public_key(account,"active"), .weight = 1} - }, - { - {.permission = {N(dice),N(active)}, .weight = 1} - } - }; - set_authority(account, N(active), auth, N(owner) ); - } -}; - -BOOST_AUTO_TEST_SUITE(dice_tests) - -BOOST_FIXTURE_TEST_CASE( dice_test, dice_tester ) try { - - create_accounts( {N(eosio.token), N(dice),N(alice),N(bob),N(carol),N(david)}, false); - - set_code(N(eosio.token), eosio_token_wast); - set_abi(N(eosio.token), eosio_token_abi); - - produce_block(); - - add_dice_authority(N(alice)); - add_dice_authority(N(bob)); - add_dice_authority(N(carol)); - - push_action(N(eosio.token), N(create), N(eosio.token), mvo() - ("issuer", "eosio.token") - ("maximum_supply", core_from_string("1000000000.0000")) - ); - - push_action(N(eosio.token), N(issue), N(eosio.token), mvo() - ("to", "eosio") - ("quantity", core_from_string("1000000000.0000")) - ("memo", "") - ); - - transfer( config::system_account_name, N(alice), core_from_string("10000.0000"), "", N(eosio.token) ); - transfer( config::system_account_name, N(bob), core_from_string("10000.0000"), "", N(eosio.token) ); - transfer( config::system_account_name, N(carol), core_from_string("10000.0000"), "", N(eosio.token) ); - - produce_block(); - - set_code(N(dice), dice_wast); - set_abi(N(dice), dice_abi); - - produce_block(); - - // Alice deposits 1000 - deposit( N(alice), core_from_string("1000.0000")); - produce_block(); - - BOOST_REQUIRE_EQUAL( balance_of(N(alice)), core_from_string("1000.0000")); - BOOST_REQUIRE_EQUAL( open_games(N(alice)), 0); - - // Alice tries to bet 0 (fail) - // secret : 9b886346e1351d4144d0b8392a975612eb0f8b6de7eae1cc9bcc55eb52be343c - BOOST_CHECK_THROW( offer_bet( N(alice), core_from_string("0.0000"), - commitment_for("9b886346e1351d4144d0b8392a975612eb0f8b6de7eae1cc9bcc55eb52be343c") - ), fc::exception); - - // Alice bets 10 (success) - // secret : 0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46 - offer_bet( N(alice), core_from_string("10.0000"), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ); - produce_block(); - - // Bob tries to bet using a secret previously used by Alice (fail) - // secret : 00000000000000000000000000000002c334abe6ce13219a4cf3b862abb03c46 - BOOST_CHECK_THROW( offer_bet( N(bob), core_from_string("10.0000"), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ), fc::exception); - produce_block(); - - // Alice tries to bet 1000 (fail) - // secret : a512f6b1b589a8906d574e9de74a529e504a5c53a760f0991a3e00256c027971 - BOOST_CHECK_THROW( offer_bet( N(alice), core_from_string("1000.0000"), - commitment_for("a512f6b1b589a8906d574e9de74a529e504a5c53a760f0991a3e00256c027971") - ), fc::exception); - produce_block(); - - // Bob tries to bet 90 without deposit - // secret : 4facfc98932dde46fdc4403125a16337f6879a842a7ff8b0dc8e1ecddd59f3c8 - BOOST_CHECK_THROW( offer_bet( N(bob), core_from_string("90.0000"), - commitment_for("4facfc98932dde46fdc4403125a16337f6879a842a7ff8b0dc8e1ecddd59f3c8") - ), fc::exception); - produce_block(); - - // Bob deposits 500 - deposit( N(bob), core_from_string("500.0000")); - BOOST_REQUIRE_EQUAL( balance_of(N(bob)), core_from_string("500.0000")); - - // Bob bets 11 (success) - // secret : eec3272712d974c474a3e7b4028b53081344a5f50008e9ccf918ba0725a8d784 - offer_bet( N(bob), core_from_string("11.0000"), - commitment_for("eec3272712d974c474a3e7b4028b53081344a5f50008e9ccf918ba0725a8d784") - ); - produce_block(); - - // Bob cancels (success) - BOOST_REQUIRE_EQUAL( open_offers(N(bob)), 1); - cancel_offer( N(bob), commitment_for("eec3272712d974c474a3e7b4028b53081344a5f50008e9ccf918ba0725a8d784") ); - BOOST_REQUIRE_EQUAL( open_offers(N(bob)), 0); - - // Carol deposits 300 - deposit( N(carol), core_from_string("300.0000")); - - // Carol bets 10 (success) - // secret : 3efb4bd5e19b780f4980c919330c0306f8157f93db1fc72c7cefec63e0e7f37a - offer_bet( N(carol), core_from_string("10.0000"), - commitment_for("3efb4bd5e19b780f4980c919330c0306f8157f93db1fc72c7cefec63e0e7f37a") - ); - produce_block(); - - BOOST_REQUIRE_EQUAL( open_games(N(alice)), 1); - BOOST_REQUIRE_EQUAL( open_offers(N(alice)), 0); - - BOOST_REQUIRE_EQUAL( open_games(N(carol)), 1); - BOOST_REQUIRE_EQUAL( open_offers(N(carol)), 0); - - BOOST_REQUIRE_EQUAL( game_bet(1), core_from_string("10.0000")); - - - // Alice tries to cancel a nonexistent bet (fail) - BOOST_CHECK_THROW( cancel_offer( N(alice), - commitment_for("00000000000000000000000000000000000000000000000000000000abb03c46") - ), fc::exception); - - // Alice tries to cancel an in-game bet (fail) - BOOST_CHECK_THROW( cancel_offer( N(alice), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ), fc::exception); - - // Alice reveals secret (success) - reveal( N(alice), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46"), - checksum_type("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ); - produce_block(); - - // Alice tries to reveal again (fail) - BOOST_CHECK_THROW( reveal( N(alice), - commitment_for("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46"), - checksum_type("0ba044d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ), fc::exception); - - // Bob tries to reveal an invalid (secret,commitment) pair (fail) - BOOST_CHECK_THROW( reveal( N(bob), - commitment_for("121344d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46"), - checksum_type("141544d2833758ee2c8f24d8a3f70c82c334abe6ce13219a4cf3b862abb03c46") - ), fc::exception); - - // Bob tries to reveal a valid (secret,commitment) pair that has no offer/game (fail) - BOOST_CHECK_THROW( reveal( N(bob), - commitment_for("e48c6884bb97ac5f5951df6012ce79f63bb8549ad0111315ad9ecbaf4c9b1eb8"), - checksum_type("e48c6884bb97ac5f5951df6012ce79f63bb8549ad0111315ad9ecbaf4c9b1eb8") - ), fc::exception); - - // Bob reveals Carol's secret (success) - reveal( N(bob), - commitment_for("3efb4bd5e19b780f4980c919330c0306f8157f93db1fc72c7cefec63e0e7f37a"), - checksum_type("3efb4bd5e19b780f4980c919330c0306f8157f93db1fc72c7cefec63e0e7f37a") - ); - - BOOST_REQUIRE_EQUAL( open_games(N(alice)), 0); - BOOST_REQUIRE_EQUAL( open_offers(N(alice)), 0); - BOOST_REQUIRE_EQUAL( balance_of(N(alice)), core_from_string("1010.0000")); - - BOOST_REQUIRE_EQUAL( open_games(N(carol)), 0); - BOOST_REQUIRE_EQUAL( open_offers(N(carol)), 0); - BOOST_REQUIRE_EQUAL( balance_of(N(carol)), core_from_string("290.0000")); - - // Alice withdraw 1009 (success) - withdraw( N(alice), core_from_string("1009.0000")); - BOOST_REQUIRE_EQUAL( balance_of(N(alice)), core_from_string("1.0000")); - - BOOST_REQUIRE_EQUAL( - get_currency_balance(N(eosio.token), symbol(CORE_SYMBOL), N(alice)), - core_from_string("10009.0000") - ); - - // Alice withdraw 2 (fail) - BOOST_CHECK_THROW( withdraw( N(alice), core_from_string("2.0000")), - fc::exception); - - // Alice withdraw 1 (success) - withdraw( N(alice), core_from_string("1.0000")); - - BOOST_REQUIRE_EQUAL( - get_currency_balance(N(eosio.token), symbol(CORE_SYMBOL), N(alice)), - core_from_string("10010.0000") - ); - - // Verify alice account was deleted - account_t alice_account; - BOOST_CHECK(dice_account(N(alice), alice_account) == false); - - // No games in table - auto* game_tid = find_table(N(dice), N(dice), N(game)); - BOOST_CHECK(game_tid == nullptr); - - // No offers in table - auto* offer_tid = find_table(N(dice), N(dice), N(offer)); - BOOST_CHECK(offer_tid == nullptr); - - // 2 records in account table (Bob & Carol) - auto* account_tid = find_table(N(dice), N(dice), N(account)); - BOOST_CHECK(account_tid != nullptr); - BOOST_CHECK(account_tid->count == 2); - -} FC_LOG_AND_RETHROW() /// basic_test - -BOOST_AUTO_TEST_SUITE_END() From bab07419d6fd848ddb96c4b9b153b116424dcafe Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 16 Oct 2018 13:58:02 -0400 Subject: [PATCH 158/161] Remove dice contract --- contracts/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/contracts/CMakeLists.txt b/contracts/CMakeLists.txt index c6eb0903d63..c4a13225737 100644 --- a/contracts/CMakeLists.txt +++ b/contracts/CMakeLists.txt @@ -30,7 +30,6 @@ add_subdirectory(test_ram_limit) #add_subdirectory(social) add_subdirectory(eosio.bios) add_subdirectory(noop) -add_subdirectory(dice) add_subdirectory(tic_tac_toe) add_subdirectory(payloadless) add_subdirectory(integration_test) From 21d8996ab52e4691df8a0611f083785a9c33c232 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 16 Oct 2018 13:58:30 -0400 Subject: [PATCH 159/161] Remove dice tests --- tests/CMakeLists.txt | 2 +- unittests/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index a5675072108..d5121373003 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -21,7 +21,7 @@ target_link_libraries( plugin_test eosio_testing eosio_chain chainbase eos_utili target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include ${CMAKE_SOURCE_DIR}/plugins/chain_plugin/include ) -add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig) +add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop eosio.msig) # configure_file(${CMAKE_CURRENT_SOURCE_DIR}/core_symbol.py.in ${CMAKE_CURRENT_BINARY_DIR}/core_symbol.py) diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index a57e1cb9ed7..662ee984cfd 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -28,7 +28,7 @@ target_include_directories( unit_test PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/include ) -add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig payloadless tic_tac_toe deferred_test) +add_dependencies(unit_test asserter test_api test_api_mem test_api_db test_ram_limit test_api_multi_index eosio.token proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop eosio.msig payloadless tic_tac_toe deferred_test) #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose From 27c4831e48dd52a8f4371f50ac49d5a69885620e Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 16 Oct 2018 16:24:55 -0400 Subject: [PATCH 160/161] On keosd auto-launch force unix socket path cleos will only autolaunch keosd when cleos' wallet url is left at the default of unix:///home/user/eosio-wallet/keosd.sock When it does the autolaunch it was launching keosd as keosd --http-server-address --https-server-address to explicitly disable HTTP & HTTPS. However, there is a possibility that a user's ~/eosio-wallet/config.ini may have another unix-socket-path set in it that prevents cleos and the auto launched keosd to be able to communicate. Now force set unix-socket-path to the default value. --- programs/cleos/main.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 1f36bd20fe0..9dd1cfb9b86 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -829,6 +829,8 @@ void ensure_keosd_running(CLI::App* app) { pargs.push_back(""); pargs.push_back("--https-server-address"); pargs.push_back(""); + pargs.push_back("--unix-socket-path"); + pargs.push_back(string(key_store_executable_name) + ".sock"); ::boost::process::child keos(binPath, pargs, bp::std_in.close(), From f1987688464148f80230b22508d00dfb43a6b3a0 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Tue, 16 Oct 2018 17:39:02 -0400 Subject: [PATCH 161/161] bump version to 1.4.0 --- CMakeLists.txt | 4 ++-- Docker/README.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f2246490155..3ec2ba0d0bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,8 +26,8 @@ set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 3) -set(VERSION_PATCH 2) +set(VERSION_MINOR 4) +set(VERSION_PATCH 0) set( CLI_CLIENT_EXECUTABLE_NAME cleos ) set( NODE_EXECUTABLE_NAME nodeos ) diff --git a/Docker/README.md b/Docker/README.md index 55982cc7ee4..ae6c0add6fd 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.3.2 tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.4.0 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.3.2 --build-arg branch=v1.3.2 . +docker build -t eosio/eos:v1.4.0 --build-arg branch=v1.4.0 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image.