Skip to content

Commit

Permalink
resolve conflict
Browse files Browse the repository at this point in the history
  • Loading branch information
jmjatlanta committed Jan 21, 2019
2 parents 46e248e + bbcbed0 commit 1bad92a
Show file tree
Hide file tree
Showing 16 changed files with 130 additions and 1,317 deletions.
1 change: 0 additions & 1 deletion gui_version

This file was deleted.

26 changes: 0 additions & 26 deletions libraries/app/application.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -971,7 +971,6 @@ void application::set_program_options(boost::program_options::options_descriptio
("genesis-json", bpo::value<boost::filesystem::path>(), "File to read Genesis State from")
("dbg-init-key", bpo::value<string>(), "Block signing key to use for init witnesses, overrides genesis file")
("api-access", bpo::value<boost::filesystem::path>(), "JSON file specifying API permissions")
("plugins", bpo::value<string>(), "Space-separated list of plugins to activate")
("io-threads", bpo::value<uint16_t>()->implicit_value(0), "Number of IO threads, default to 0 for auto-configuration")
("enable-subscribe-to-all", bpo::value<bool>()->implicit_value(true),
"Whether allow API clients to subscribe to universal object creation and removal events")
Expand Down Expand Up @@ -1002,31 +1001,6 @@ void application::initialize(const fc::path& data_dir, const boost::program_opti
const uint16_t num_threads = options["io-threads"].as<uint16_t>();
fc::asio::default_io_service_scope::set_num_threads(num_threads);
}

std::vector<string> wanted;
if( options.count("plugins") )
{
boost::split(wanted, options.at("plugins").as<std::string>(), [](char c){return c == ' ';});
}
else
{
wanted.push_back("witness");
wanted.push_back("account_history");
wanted.push_back("market_history");
wanted.push_back("grouped_orders");
}
int es_ah_conflict_counter = 0;
for (auto& it : wanted)
{
if(it == "account_history")
++es_ah_conflict_counter;
if(it == "elasticsearch")
++es_ah_conflict_counter;

FC_ASSERT(es_ah_conflict_counter <= 1, "Can't start program with elasticsearch and account_history plugin at the same time");

if (!it.empty()) enable_plugin(it);
}
}

void application::startup()
Expand Down
18 changes: 11 additions & 7 deletions libraries/app/include/graphene/app/application.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,18 +48,17 @@ namespace graphene { namespace app {
application();
~application();

void set_program_options( boost::program_options::options_description& command_line_options,
boost::program_options::options_description& configuration_file_options )const;
void initialize(const fc::path& data_dir, const boost::program_options::variables_map&options);
void initialize_plugins( const boost::program_options::variables_map& options );
void set_program_options(boost::program_options::options_description& command_line_options,
boost::program_options::options_description& configuration_file_options)const;
void initialize(const fc::path& data_dir, const boost::program_options::variables_map& options);
void initialize_plugins(const boost::program_options::variables_map& options);
void startup();
void shutdown();
void startup_plugins();
void shutdown_plugins();

template<typename PluginType>
std::shared_ptr<PluginType> register_plugin()
{
std::shared_ptr<PluginType> register_plugin(bool auto_load = false) {
auto plug = std::make_shared<PluginType>();
plug->plugin_set_app(this);

Expand All @@ -72,6 +71,10 @@ namespace graphene { namespace app {
_cfg_options.add(plugin_cfg_options);

add_available_plugin( plug );

if (auto_load)
enable_plugin(plug->plugin_name());

return plug;
}
std::shared_ptr<abstract_plugin> get_plugin( const string& name )const;
Expand All @@ -98,8 +101,9 @@ namespace graphene { namespace app {

const application_options& get_options();

private:
void enable_plugin( const string& name );

private:
void add_available_plugin( std::shared_ptr<abstract_plugin> p );
std::shared_ptr<detail::application_impl> my;

Expand Down
134 changes: 64 additions & 70 deletions libraries/chain/db_block.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -129,77 +129,74 @@ bool database::push_block(const signed_block& new_block, uint32_t skip)
bool database::_push_block(const signed_block& new_block)
{ try {
uint32_t skip = get_node_properties().skip_flags;
if( !(skip&skip_fork_db) )
{
/// TODO: if the block is greater than the head block and before the next maitenance interval
// verify that the block signer is in the current set of active witnesses.
// TODO: If the block is greater than the head block and before the next maintenance interval
// verify that the block signer is in the current set of active witnesses.

shared_ptr<fork_item> new_head = _fork_db.push_block(new_block);
//If the head block from the longest chain does not build off of the current head, we need to switch forks.
if( new_head->data.previous != head_block_id() )
shared_ptr<fork_item> new_head = _fork_db.push_block(new_block);
//If the head block from the longest chain does not build off of the current head, we need to switch forks.
if( new_head->data.previous != head_block_id() )
{
//If the newly pushed block is the same height as head, we get head back in new_head
//Only switch forks if new_head is actually higher than head
if( new_head->data.block_num() > head_block_num() )
{
//If the newly pushed block is the same height as head, we get head back in new_head
//Only switch forks if new_head is actually higher than head
if( new_head->data.block_num() > head_block_num() )
wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) );
auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id());

// pop blocks until we hit the forked block
while( head_block_id() != branches.second.back()->data.previous )
{
ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) );
pop_block();
}

// push all blocks on the new fork
for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr )
{
wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) );
auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id());

// pop blocks until we hit the forked block
while( head_block_id() != branches.second.back()->data.previous )
{
ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) );
pop_block();
}

// push all blocks on the new fork
for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr )
{
ilog( "pushing block from fork #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) );
optional<fc::exception> except;
try {
undo_database::session session = _undo_db.start_undo_session();
apply_block( (*ritr)->data, skip );
_block_id_to_block.store( (*ritr)->id, (*ritr)->data );
session.commit();
}
catch ( const fc::exception& e ) { except = e; }
if( except )
{
wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) );
// remove the rest of branches.first from the fork_db, those blocks are invalid
while( ritr != branches.first.rend() )
{
ilog( "removing block from fork_db #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) );
_fork_db.remove( (*ritr)->id );
++ritr;
}
_fork_db.set_head( branches.second.front() );

// pop all blocks from the bad fork
while( head_block_id() != branches.second.back()->data.previous )
{
ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) );
pop_block();
}

ilog( "Switching back to fork: ${id}", ("id",branches.second.front()->data.id()) );
// restore all blocks from the good fork
for( auto ritr2 = branches.second.rbegin(); ritr2 != branches.second.rend(); ++ritr2 )
{
ilog( "pushing block #${n} ${id}", ("n",(*ritr2)->data.block_num())("id",(*ritr2)->id) );
auto session = _undo_db.start_undo_session();
apply_block( (*ritr2)->data, skip );
_block_id_to_block.store( (*ritr2)->id, (*ritr2)->data );
session.commit();
}
throw *except;
}
}
return true;
ilog( "pushing block from fork #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) );
optional<fc::exception> except;
try {
undo_database::session session = _undo_db.start_undo_session();
apply_block( (*ritr)->data, skip );
_block_id_to_block.store( (*ritr)->id, (*ritr)->data );
session.commit();
}
catch ( const fc::exception& e ) { except = e; }
if( except )
{
wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) );
// remove the rest of branches.first from the fork_db, those blocks are invalid
while( ritr != branches.first.rend() )
{
ilog( "removing block from fork_db #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) );
_fork_db.remove( (*ritr)->id );
++ritr;
}
_fork_db.set_head( branches.second.front() );

// pop all blocks from the bad fork
while( head_block_id() != branches.second.back()->data.previous )
{
ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) );
pop_block();
}

ilog( "Switching back to fork: ${id}", ("id",branches.second.front()->data.id()) );
// restore all blocks from the good fork
for( auto ritr2 = branches.second.rbegin(); ritr2 != branches.second.rend(); ++ritr2 )
{
ilog( "pushing block #${n} ${id}", ("n",(*ritr2)->data.block_num())("id",(*ritr2)->id) );
auto session = _undo_db.start_undo_session();
apply_block( (*ritr2)->data, skip );
_block_id_to_block.store( (*ritr2)->id, (*ritr2)->data );
session.commit();
}
throw *except;
}
}
else return false;
return true;
}
else return false;
}

try {
Expand All @@ -209,10 +206,7 @@ bool database::_push_block(const signed_block& new_block)
session.commit();
} catch ( const fc::exception& e ) {
elog("Failed to push new block:\n${e}", ("e", e.to_detail_string()));
if( !(skip&skip_fork_db) )
{
_fork_db.remove( new_block.id() );
}
_fork_db.remove( new_block.id() );
throw;
}

Expand Down
1 change: 0 additions & 1 deletion libraries/chain/include/graphene/chain/database.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ namespace graphene { namespace chain {
skip_witness_signature = 1 << 0, ///< used while reindexing
skip_transaction_signatures = 1 << 1, ///< used by non-witness nodes
skip_transaction_dupe_check = 1 << 2, ///< used while reindexing
skip_fork_db = 1 << 3, ///< used while reindexing
skip_block_size_check = 1 << 4, ///< used when applying locally generated transactions
skip_tapos_check = 1 << 5, ///< used while reindexing -- note this skips expiration check as well
// skip_authority_check = 1 << 6, ///< removed because effectively identical to skip_transaction_signatures
Expand Down
8 changes: 6 additions & 2 deletions libraries/chain/market_evaluator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,13 @@ void_result limit_order_create_evaluator::do_evaluate(const limit_order_create_o
_receive_asset = &op.min_to_receive.asset_id(d);

if( _sell_asset->options.whitelist_markets.size() )
FC_ASSERT( _sell_asset->options.whitelist_markets.find(_receive_asset->id) != _sell_asset->options.whitelist_markets.end() );
FC_ASSERT( _sell_asset->options.whitelist_markets.find(_receive_asset->id)
!= _sell_asset->options.whitelist_markets.end(),
"This market has not been whitelisted." );
if( _sell_asset->options.blacklist_markets.size() )
FC_ASSERT( _sell_asset->options.blacklist_markets.find(_receive_asset->id) == _sell_asset->options.blacklist_markets.end() );
FC_ASSERT( _sell_asset->options.blacklist_markets.find(_receive_asset->id)
== _sell_asset->options.blacklist_markets.end(),
"This market has been blacklisted." );

FC_ASSERT( is_authorized_asset( d, *_seller, *_sell_asset ) );
FC_ASSERT( is_authorized_asset( d, *_seller, *_receive_asset ) );
Expand Down
36 changes: 22 additions & 14 deletions programs/delayed_node/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,11 @@ int main(int argc, char** argv) {
bpo::options_description app_options("Graphene Delayed Node");
bpo::options_description cfg_options("Graphene Delayed Node");
app_options.add_options()
("help,h", "Print this help message and exit.")
("data-dir,d", bpo::value<boost::filesystem::path>()->default_value("delayed_node_data_dir"), "Directory containing databases, configuration file, etc.")
;
("help,h", "Print this help message and exit.")
("data-dir,d", bpo::value<boost::filesystem::path>()->default_value("delayed_node_data_dir"), "Directory containing databases, configuration file, etc.")
("plugins", bpo::value<std::string>()->default_value("delayed_node account_history market_history"),
"Space-separated list of plugins to activate");
;

bpo::variables_map options;

Expand All @@ -84,8 +86,8 @@ int main(int argc, char** argv) {
}
catch (const boost::program_options::error& e)
{
std::cerr << "Error parsing command line: " << e.what() << "\n";
return 1;
std::cerr << "Error parsing command line: " << e.what() << "\n";
return 1;
}

if( options.count("help") )
Expand Down Expand Up @@ -160,9 +162,15 @@ int main(int argc, char** argv) {
elog("Error parsing configuration file: ${e}", ("e", e.what()));
return 1;
}
if( !options.count("plugins") )
options.insert( std::make_pair( "plugins", bpo::variable_value(std::string("delayed_node account_history market_history"), true) ) );

std::set<std::string> plugins;
boost::split(plugins, options.at("plugins").as<std::string>(), [](char c){return c == ' ';});

std::for_each(plugins.begin(), plugins.end(), [&](const std::string& plug) mutable {
if (!plug.empty()) {
node.enable_plugin(plug);
}
});
node.initialize(data_dir, options);
node.initialize_plugins( options );

Expand All @@ -172,7 +180,7 @@ int main(int argc, char** argv) {

fc::promise<int>::ptr exit_promise = new fc::promise<int>("UNIX Signal Handler");
fc::set_signal_handler([&exit_promise](int signal) {
exit_promise->set_value(signal);
exit_promise->set_value(signal);
}, SIGINT);

ilog("Started delayed node on a chain with ${h} blocks.", ("h", node.chain_database()->head_block_num()));
Expand Down Expand Up @@ -243,14 +251,14 @@ fc::optional<fc::logging_config> load_logging_config_from_ini_file(const fc::pat
// stdout/stderr will be taken from ini file, everything else hard-coded here
fc::console_appender::config console_appender_config;
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::debug,
fc::console_appender::color::green));
fc::console_appender::level_color(fc::log_level::debug,
fc::console_appender::color::green));
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::warn,
fc::console_appender::color::brown));
fc::console_appender::level_color(fc::log_level::warn,
fc::console_appender::color::brown));
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::error,
fc::console_appender::color::cyan));
fc::console_appender::level_color(fc::log_level::error,
fc::console_appender::color::cyan));
console_appender_config.stream = fc::variant(stream_name, 1).as<fc::console_appender::stream::type>(1);
logging_config.appenders.push_back(fc::appender_config(console_appender_name, "console", fc::variant(console_appender_config, GRAPHENE_MAX_NESTED_OBJECTS)));
found_logging_config = true;
Expand Down
25 changes: 21 additions & 4 deletions programs/witness_node/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#include <boost/filesystem.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/container/flat_set.hpp>
#include <boost/algorithm/string.hpp>

#include <graphene/utilities/git_revision.hpp>
#include <boost/algorithm/string/replace.hpp>
Expand All @@ -64,9 +65,11 @@ int main(int argc, char** argv) {
bpo::options_description cfg_options("Graphene Witness Node");
app_options.add_options()
("help,h", "Print this help message and exit.")
("data-dir,d", bpo::value<boost::filesystem::path>()->default_value("witness_node_data_dir"), "Directory containing databases, configuration file, etc.")
("data-dir,d", bpo::value<boost::filesystem::path>()->default_value("witness_node_data_dir"),
"Directory containing databases, configuration file, etc.")
("version,v", "Display version information")
;
("plugins", bpo::value<std::string>()->default_value("witness account_history market_history grouped_orders"),
"Space-separated list of plugins to activate");

bpo::variables_map options;

Expand All @@ -90,10 +93,24 @@ int main(int argc, char** argv) {
}
catch (const boost::program_options::error& e)
{
std::cerr << "Error parsing command line: " << e.what() << "\n";
return 1;
std::cerr << "Error parsing command line: " << e.what() << "\n";
return 1;
}

std::set<std::string> plugins;
boost::split(plugins, options.at("plugins").as<std::string>(), [](char c){return c == ' ';});

if(plugins.count("account_history") && plugins.count("elasticsearch")) {
std::cerr << "Plugin conflict: Cannot load both account_history plugin and elasticsearch plugin\n";
return 1;
}

std::for_each(plugins.begin(), plugins.end(), [node](const std::string& plug) mutable {
if (!plug.empty()) {
node->enable_plugin(plug);
}
});

if( options.count("help") )
{
std::cout << app_options << "\n";
Expand Down
Loading

0 comments on commit 1bad92a

Please sign in to comment.