From 5ba783b2ff6de338993d30bb0076e77720cd6422 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Tue, 19 Nov 2019 11:49:18 +0100 Subject: [PATCH 01/14] Removed unused verify_parallel --- libraries/protocol/include/graphene/protocol/base.hpp | 3 --- libraries/protocol/operations.cpp | 6 ------ 2 files changed, 9 deletions(-) diff --git a/libraries/protocol/include/graphene/protocol/base.hpp b/libraries/protocol/include/graphene/protocol/base.hpp index f7cb4ef594..85090e8d28 100644 --- a/libraries/protocol/include/graphene/protocol/base.hpp +++ b/libraries/protocol/include/graphene/protocol/base.hpp @@ -26,8 +26,6 @@ #include #include -#include - namespace graphene { namespace protocol { struct asset; struct authority; @@ -97,7 +95,6 @@ namespace graphene { namespace protocol { void get_required_active_authorities( flat_set& )const{} void get_required_owner_authorities( flat_set& )const{} void validate()const{} - fc::optional< fc::future > validate_parallel( uint32_t skip )const; static uint64_t calculate_data_fee( uint64_t bytes, uint64_t price_per_kbyte ); }; diff --git a/libraries/protocol/operations.cpp b/libraries/protocol/operations.cpp index d1b71232c6..cb434729ab 100644 --- a/libraries/protocol/operations.cpp +++ b/libraries/protocol/operations.cpp @@ -37,12 +37,6 @@ uint64_t base_operation::calculate_data_fee( uint64_t bytes, uint64_t price_per_ return static_cast(result); } -fc::optional< fc::future > base_operation::validate_parallel( uint32_t skip )const -{ - validate(); - return fc::optional< fc::future >(); -} - void balance_claim_operation::validate()const { FC_ASSERT( fee == asset() ); From 210d1a518b98ef37158717829c2a340f32ec4433 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Tue, 19 Nov 2019 17:15:59 +0100 Subject: [PATCH 02/14] Removed unused simulated_network --- libraries/net/include/graphene/net/node.hpp | 25 --------- libraries/net/node.cpp | 57 --------------------- 2 files changed, 82 deletions(-) diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index fe03ac0cb6..9f7824558d 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -296,32 +296,7 @@ namespace graphene { namespace net { std::unique_ptr my; }; - class simulated_network : public node - { - public: - ~simulated_network(); - simulated_network(const std::string& user_agent) : node(user_agent) {} - void listen_to_p2p_network() override {} - void connect_to_p2p_network() override {} - void connect_to_endpoint(const fc::ip::endpoint& ep) override {} - - fc::ip::endpoint get_actual_listening_endpoint() const override { return fc::ip::endpoint(); } - - void sync_from(const item_id& current_head_block, const std::vector& hard_fork_block_numbers) override {} - void broadcast(const message& item_to_broadcast) override; - void add_node_delegate(node_delegate* node_delegate_to_add); - - virtual uint32_t get_connection_count() const override { return 8; } - private: - struct node_info; - void message_sender(node_info* destination_node); - std::list network_nodes; - }; - - typedef std::shared_ptr node_ptr; - typedef std::shared_ptr simulated_network_ptr; - } } // graphene::net FC_REFLECT(graphene::net::message_propagation_data, (received_time)(validated_time)(originating_peer)); diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 06aebf6fbd..aa83e5f98e 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4858,63 +4858,6 @@ namespace graphene { namespace net { namespace detail { INVOKE_IN_IMPL(close); } - struct simulated_network::node_info - { - node_delegate* delegate; - fc::future message_sender_task_done; - std::queue messages_to_deliver; - node_info(node_delegate* delegate) : delegate(delegate) {} - }; - - simulated_network::~simulated_network() - { - for( node_info* network_node_info : network_nodes ) - { - network_node_info->message_sender_task_done.cancel_and_wait("~simulated_network()"); - delete network_node_info; - } - } - - void simulated_network::message_sender(node_info* destination_node) - { - while (!destination_node->messages_to_deliver.empty()) - { - try - { - const message& message_to_deliver = destination_node->messages_to_deliver.front(); - if (message_to_deliver.msg_type.value() == trx_message_type) - destination_node->delegate->handle_transaction(message_to_deliver.as()); - else if (message_to_deliver.msg_type.value() == block_message_type) - { - std::vector contained_transaction_message_ids; - destination_node->delegate->handle_block(message_to_deliver.as(), false, contained_transaction_message_ids); - } - else - destination_node->delegate->handle_message(message_to_deliver); - } - catch ( const fc::exception& e ) - { - elog( "${r}", ("r",e) ); - } - destination_node->messages_to_deliver.pop(); - } - } - - void simulated_network::broadcast( const message& item_to_broadcast ) - { - for (node_info* network_node_info : network_nodes) - { - network_node_info->messages_to_deliver.emplace(item_to_broadcast); - if (!network_node_info->message_sender_task_done.valid() || network_node_info->message_sender_task_done.ready()) - network_node_info->message_sender_task_done = fc::async([=](){ message_sender(network_node_info); }, "simulated_network_sender"); - } - } - - void simulated_network::add_node_delegate( node_delegate* node_delegate_to_add ) - { - network_nodes.push_back(new node_info(node_delegate_to_add)); - } - namespace detail { #define ROLLING_WINDOW_SIZE 1000 From b25f2554b3a7b2396a494998e54567ae74d08aa0 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 27 Nov 2019 12:50:53 +0100 Subject: [PATCH 03/14] Implemented framework for recurring tasks that can be cancelled --- libraries/utilities/CMakeLists.txt | 3 +- .../graphene/utilities/recurring_task.hpp | 85 ++++++++++++ libraries/utilities/recurring_task.cpp | 122 ++++++++++++++++++ 3 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 libraries/utilities/include/graphene/utilities/recurring_task.hpp create mode 100644 libraries/utilities/recurring_task.cpp diff --git a/libraries/utilities/CMakeLists.txt b/libraries/utilities/CMakeLists.txt index 66ed0358f5..f312841e60 100644 --- a/libraries/utilities/CMakeLists.txt +++ b/libraries/utilities/CMakeLists.txt @@ -15,7 +15,8 @@ set(sources tempdir.cpp words.cpp elasticsearch.cpp - ${HEADERS}) + recurring_task.cpp + ) configure_file("${CMAKE_CURRENT_SOURCE_DIR}/git_revision.cpp.in" "${CMAKE_CURRENT_BINARY_DIR}/git_revision.cpp" @ONLY) list(APPEND sources "${CMAKE_CURRENT_BINARY_DIR}/git_revision.cpp") diff --git a/libraries/utilities/include/graphene/utilities/recurring_task.hpp b/libraries/utilities/include/graphene/utilities/recurring_task.hpp new file mode 100644 index 0000000000..b516d88028 --- /dev/null +++ b/libraries/utilities/include/graphene/utilities/recurring_task.hpp @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2019 BitShares Blockchain Foundation + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include + +#include +#include + +namespace graphene { namespace utilities { + +/** This class implements a framework for potentially long-running background tasks. + * Subclasses must override run() to do the actual work. run() implementations should use the provided + * sleep() method for waiting, and should regularly call check_cancelled(). Both sleep() and check_cancelled() + * will throw when cancelled. + */ +class recurring_task +{ + std::thread::id _runner; + bool _cancelled = false; + bool _triggered = false; + boost::fibers::mutex _mtx; + boost::fibers::condition_variable _cv; + boost::fibers::future _worker; + + /** Waits for the given duration. Waiting can be interrupted by trigger() or cancel(). + * Throws when cancelled. + */ + void _sleep( std::chrono::microseconds how_long ); +protected: + /** Must be overridden to perform the actual work. + */ + virtual void run() {} + + /** Waits for the given duration. Waiting can be interrupted by trigger() or cancel(). + * Throws when cancelled. + */ + template< class Rep, class Period > + void sleep( std::chrono::duration< Rep, Period > how_long ) + { + _sleep( std::chrono::duration_cast< std::chrono::microseconds >( how_long ) ); + } + + /** Checks if the task has been cancelled, and throws if so. + */ + void check_cancelled(); +public: + explicit recurring_task( const std::string& name = "" ); + explicit recurring_task( std::thread::id runner, const std::string& name = "" ); + virtual ~recurring_task(); + + /** Throws when cancelled. + * If no active fiber is running, starts a new one. Will wake up a sleeping fiber. + */ + void trigger(); + + /** Cancels the running task. Future calls to trigger() and wait() will throw. */ + void cancel(); + + /** Waits for the running task to complete. Throws when cancelled. */ + void wait(); +}; + +} } // end namespace graphene::utilities diff --git a/libraries/utilities/recurring_task.cpp b/libraries/utilities/recurring_task.cpp new file mode 100644 index 0000000000..4c861b3f6f --- /dev/null +++ b/libraries/utilities/recurring_task.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2019 BitShares Blockchain Foundation + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +namespace graphene { namespace utilities { + +recurring_task::recurring_task( const std::string& name ) +{ + fc::set_fiber_name( name ); +} + +recurring_task::recurring_task( std::thread::id runner, const std::string& name ) : _runner( runner ) +{ + fc::set_fiber_name( name ); +} + +recurring_task::~recurring_task() +{ + if( _worker.valid() && _worker.wait_for( std::chrono::seconds(0) ) != boost::fibers::future_status::ready ) + { + cancel(); + try + { + wait(); + } + catch( const fc::canceled_exception ) {} + } +} + +void recurring_task::_sleep( std::chrono::microseconds how_long ) +{ + static const auto cycle = std::chrono::microseconds(2000000); + static const auto zero = std::chrono::microseconds(0); + std::unique_lock lock(_mtx); + if( !_triggered ) + { + do + { + if( how_long > cycle ) + { + _cv.wait_for( lock, cycle ); + how_long -= cycle; // FIXME: total can be longer than originally desired + } + else + { + _cv.wait_for( lock, how_long ); + how_long = zero; + } + check_cancelled(); + } + while( how_long > zero && !_triggered ); + } + _triggered = false; + check_cancelled(); +} + +void recurring_task::check_cancelled() +{ + if( _cancelled ) + FC_THROW_EXCEPTION( fc::canceled_exception, "Task '${n}' was cancelled!", ("n",fc::get_fiber_name()) ); +} + +void recurring_task::trigger() +{ + std::unique_lock lock(_mtx); + check_cancelled(); + if( !_worker.valid() || _worker.wait_for( std::chrono::seconds(0) ) == boost::fibers::future_status::ready ) + { + _worker = _runner != std::thread::id() ? fc::async( std::bind( &recurring_task::run, this ), _runner ) + : fc::async( std::bind( &recurring_task::run, this ) ); + } + else + { + _triggered = true; + _cv.notify_all(); + } +} + +void recurring_task::cancel() +{ + std::unique_lock lock(_mtx); + _cancelled = true; + _cv.notify_all(); +} + +void recurring_task::wait() +{ + std::unique_lock lock(_mtx); + if( !_worker.valid() ) + check_cancelled(); + else + { + lock.unlock(); + _worker.wait(); + } +}; + +} } // end namespace graphene::utilities From 92755f49e34b53501435ca1228b086074d215366 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 27 Nov 2019 13:04:38 +0100 Subject: [PATCH 04/14] Removed unused code --- libraries/net/node.cpp | 25 ------------------------- libraries/net/node_impl.hxx | 1 - 2 files changed, 26 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index aa83e5f98e..f561085913 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -439,30 +439,7 @@ namespace graphene { namespace net { namespace detail { // if we broke out of the while loop, that means either we have connected to enough nodes, or // we don't have any good candidates to connect to right now. -#if 0 - try - { - _retrigger_connect_loop_promise = fc::promise::create("graphene::net::retrigger_connect_loop"); - if( is_wanting_new_connections() || !_add_once_node_list.empty() ) - { - if( is_wanting_new_connections() ) - dlog( "Still want to connect to more nodes, but I don't have any good candidates. Trying again in 15 seconds" ); - else - dlog( "I still have some \"add once\" nodes to connect to. Trying again in 15 seconds" ); - _retrigger_connect_loop_promise->wait_until( fc::time_point::now() + fc::seconds(GRAPHENE_PEER_DATABASE_RETRY_DELAY ) ); - } - else - { - dlog( "I don't need any more connections, waiting forever until something changes" ); - _retrigger_connect_loop_promise->wait(); - } - } - catch ( fc::timeout_exception& ) //intentionally not logged - { - } // catch -#else fc::usleep(fc::seconds(10)); -#endif } catch (const fc::canceled_exception&) { @@ -477,8 +454,6 @@ namespace graphene { namespace net { namespace detail { VERIFY_CORRECT_THREAD(); dlog( "Triggering connect loop now" ); _potential_peer_database_updated = true; - //if( _retrigger_connect_loop_promise ) - // _retrigger_connect_loop_promise->set_value(); } bool node_impl::have_already_received_sync_item( const item_hash_t& item_hash ) diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 7d31d16eea..57380b0e0b 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -193,7 +193,6 @@ class node_impl : public peer_connection_delegate std::list _add_once_node_list; /// list of peers we want to connect to as soon as possible peer_database _potential_peer_db; - fc::promise::ptr _retrigger_connect_loop_promise; bool _potential_peer_database_updated; fc::future _p2p_network_connect_loop_done; // @} From 422a9443a017aaad61ea9d229ac0df6849bd9012 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 4 Dec 2019 16:49:14 +0100 Subject: [PATCH 05/14] Intermediate --- libraries/chain/db_block.cpp | 15 +- libraries/chain/db_management.cpp | 4 +- .../chain/include/graphene/chain/database.hpp | 9 +- libraries/db/object_database.cpp | 4 +- libraries/fc | 2 +- libraries/net/CMakeLists.txt | 2 +- libraries/net/include/graphene/net/node.hpp | 3 +- .../include/graphene/net/peer_connection.hpp | 5 +- libraries/net/node.cpp | 416 +++++++++--------- libraries/net/node_impl.hxx | 177 ++++++-- libraries/net/peer_connection.cpp | 17 +- 11 files changed, 373 insertions(+), 281 deletions(-) diff --git a/libraries/chain/db_block.cpp b/libraries/chain/db_block.cpp index be9ba48ede..a5dc8dc5f7 100644 --- a/libraries/chain/db_block.cpp +++ b/libraries/chain/db_block.cpp @@ -39,6 +39,7 @@ #include +#include #include #include @@ -822,9 +823,9 @@ void database::_precompute_parallel( const Trx* trx, const size_t count, const u } } -fc::future database::precompute_parallel( const signed_block& block, const uint32_t skip )const +boost::fibers::future database::precompute_parallel( const signed_block& block, const uint32_t skip )const { try { - std::vector> workers; + std::vector> workers; if( !block.transactions.empty() ) { if( (skip & skip_expensive) == skip_expensive ) @@ -850,16 +851,20 @@ fc::future database::precompute_parallel( const signed_block& block, const block.id(); if( workers.empty() ) - return fc::future< void >( fc::promise< void >::create( true ) ); + { + boost::fibers::promise< void > done; + done.set_value(); + return done.get_future(); + } auto first = workers.begin(); auto worker = first; while( ++worker != workers.end() ) worker->wait(); - return *first; + return std::move( *first ); } FC_LOG_AND_RETHROW() } -fc::future database::precompute_parallel( const precomputable_transaction& trx )const +boost::fibers::future database::precompute_parallel( const precomputable_transaction& trx )const { return fc::do_parallel([this,&trx] () { _precompute_parallel( &trx, 1, skip_nothing ); diff --git a/libraries/chain/db_management.cpp b/libraries/chain/db_management.cpp index 9ca657ef6f..6002ad7442 100644 --- a/libraries/chain/db_management.cpp +++ b/libraries/chain/db_management.cpp @@ -80,7 +80,7 @@ void database::reindex( fc::path data_dir ) size_t total_block_size = _block_id_to_block.total_block_size(); const auto& gpo = get_global_properties(); - std::queue< std::tuple< size_t, signed_block, fc::future< void > > > blocks; + std::queue< std::tuple< size_t, signed_block, boost::fibers::future< void > > > blocks; uint32_t next_block_num = head_block_num() + 1; uint32_t i = next_block_num; while( next_block_num <= last_block_num || !blocks.empty() ) @@ -93,7 +93,7 @@ void database::reindex( fc::path data_dir ) { if( block->timestamp >= last_block->timestamp - gpo.parameters.maximum_time_until_expiration ) skip &= ~skip_transaction_dupe_check; - blocks.emplace( processed_block_size, std::move(*block), fc::future() ); + blocks.emplace( processed_block_size, std::move(*block), boost::fibers::future() ); std::get<2>(blocks.back()) = precompute_parallel( std::get<1>(blocks.back()), skip ); } else diff --git a/libraries/chain/include/graphene/chain/database.hpp b/libraries/chain/include/graphene/chain/database.hpp index 5afb83ab1e..d80538c9b1 100644 --- a/libraries/chain/include/graphene/chain/database.hpp +++ b/libraries/chain/include/graphene/chain/database.hpp @@ -37,10 +37,12 @@ #include #include #include -#include +#include #include +#include + #include namespace graphene { namespace chain { @@ -453,7 +455,8 @@ namespace graphene { namespace chain { * @return a future that will resolve to the input block with * precomputations applied */ - fc::future precompute_parallel( const signed_block& block, const uint32_t skip = skip_nothing )const; + boost::fibers::future precompute_parallel( const signed_block& block, + const uint32_t skip = skip_nothing )const; /** Precomputes digests, signatures and operation validations. * "Expensive" computations may be done in a parallel thread. @@ -462,7 +465,7 @@ namespace graphene { namespace chain { * @return a future that will resolve to the input transaction with * precomputations applied */ - fc::future precompute_parallel( const precomputable_transaction& trx )const; + boost::fibers::future precompute_parallel( const precomputable_transaction& trx )const; private: template void _precompute_parallel( const Trx* trx, const size_t count, const uint32_t skip )const; diff --git a/libraries/db/object_database.cpp b/libraries/db/object_database.cpp index 75b7090be4..9133f46f1f 100644 --- a/libraries/db/object_database.cpp +++ b/libraries/db/object_database.cpp @@ -72,7 +72,7 @@ void object_database::flush() { // ilog("Save object_database in ${d}", ("d", _data_dir)); fc::create_directories( _data_dir / "object_database.tmp" / "lock" ); - std::vector> tasks; + std::vector> tasks; tasks.reserve(200); for( uint32_t space = 0; space < _index.size(); ++space ) { @@ -109,7 +109,7 @@ void object_database::open(const fc::path& data_dir) wlog("Ignoring locked object_database"); return; } - std::vector> tasks; + std::vector> tasks; tasks.reserve(200); ilog("Opening object database from ${d} ...", ("d", data_dir)); for( uint32_t space = 0; space < _index.size(); ++space ) diff --git a/libraries/fc b/libraries/fc index 2f776301cd..dc672fa449 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 2f776301cd8410525d8b6f16127fcdaa0a16074d +Subproject commit dc672fa449830f6719f5ad32d7b3c90a4b1249bf diff --git a/libraries/net/CMakeLists.txt b/libraries/net/CMakeLists.txt index b533e61a35..33d0424634 100644 --- a/libraries/net/CMakeLists.txt +++ b/libraries/net/CMakeLists.txt @@ -12,7 +12,7 @@ set(SOURCES node.cpp add_library( graphene_net ${SOURCES} ${HEADERS} ) target_link_libraries( graphene_net - PUBLIC fc graphene_db graphene_protocol ) + PUBLIC fc graphene_db graphene_protocol graphene_utilities ) target_include_directories( graphene_net PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" PRIVATE "${CMAKE_SOURCE_DIR}/libraries/chain/include" diff --git a/libraries/net/include/graphene/net/node.hpp b/libraries/net/include/graphene/net/node.hpp index 9f7824558d..e3f461fe42 100644 --- a/libraries/net/include/graphene/net/node.hpp +++ b/libraries/net/include/graphene/net/node.hpp @@ -30,6 +30,7 @@ #include #include +#include namespace graphene { namespace net { @@ -293,7 +294,7 @@ namespace graphene { namespace net { void disable_peer_advertising(); fc::variant_object get_call_statistics() const; private: - std::unique_ptr my; + std::unique_ptr my; }; typedef std::shared_ptr node_ptr; diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index a00e43dcbf..4ce978b3ea 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -39,7 +39,6 @@ #include #include -#include namespace graphene { namespace net { @@ -166,7 +165,7 @@ namespace graphene { namespace net size_t _total_queued_messages_size = 0; std::queue, std::list > > _queued_messages; - fc::future _send_queued_messages_done; + boost::fibers::future _send_queued_messages_done; public: fc::time_point connection_initiation_time; fc::time_point connection_closed_time; @@ -260,7 +259,7 @@ namespace graphene { namespace net uint32_t last_known_fork_block_number = 0; - fc::future accept_or_connect_task_done; + boost::fibers::future accept_or_connect_task_done; firewall_check_state_data *firewall_check_state = nullptr; private: diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index f561085913..48ad5721e3 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -58,11 +58,7 @@ #include #include -#include -#include -#include -#include -#include +#include #include #include #include @@ -87,6 +83,9 @@ #include +#include +#include + //#define ENABLE_DEBUG_ULOGS #ifdef DEFAULT_LOGGER @@ -256,28 +255,16 @@ FC_REFLECT(graphene::net::detail::node_configuration, (listen_endpoint) namespace graphene { namespace net { namespace detail { - void node_impl_deleter::operator()(node_impl* impl_to_delete) - { + node_task::node_task( node_impl& node, const std::string& name ) : #ifdef P2P_IN_DEDICATED_THREAD - std::weak_ptr weak_thread; - if (impl_to_delete) - { - std::shared_ptr impl_thread(impl_to_delete->_thread); - weak_thread = impl_thread; - impl_thread->async([impl_to_delete](){ delete impl_to_delete; }, "delete node_impl").wait(); - dlog("deleting the p2p thread"); - } - if (weak_thread.expired()) - dlog("done deleting the p2p thread"); - else - dlog("failed to delete the p2p thread, we must be leaking a smart pointer somewhere"); -#else // P2P_IN_DEDICATED_THREAD - delete impl_to_delete; + recurring_task( node._thread.get_id(), name ) +#else + recurring_task( name ) #endif // P2P_IN_DEDICATED_THREAD - } + , _node( &node ) {} #ifdef P2P_IN_DEDICATED_THREAD -# define VERIFY_CORRECT_THREAD() assert(_thread->is_current()) +# define VERIFY_CORRECT_THREAD() assert( _thread.get_id() == std::this_thread::get_id() ) #else # define VERIFY_CORRECT_THREAD() do {} while (0) #endif @@ -286,16 +273,19 @@ namespace graphene { namespace net { namespace detail { #define MAXIMUM_NUMBER_OF_BLOCKS_TO_PREFETCH (10 * MAXIMUM_NUMBER_OF_BLOCKS_TO_HANDLE_AT_ONE_TIME) node_impl::node_impl(const std::string& user_agent) : -#ifdef P2P_IN_DEDICATED_THREAD - _thread(std::make_shared("p2p")), -#endif // P2P_IN_DEDICATED_THREAD _delegate(nullptr), _is_firewalled(firewalled_state::unknown), _potential_peer_database_updated(false), + _p2p_network_connect_loop(*this), _sync_items_to_fetch_updated(false), + _fetch_sync_items_loop(*this), + _process_backlog_of_sync_blocks(*this), _suspend_fetching_sync_blocks(false), _items_to_fetch_updated(false), + _fetch_item_loop(*this), _items_to_fetch_sequence_counter(0), + _advertise_inventory_loop(*this), + _terminate_inactive_connections_loop(*this), _recent_block_interval_in_seconds(GRAPHENE_MAX_BLOCK_INTERVAL), _user_agent_string(user_agent), _desired_number_of_connections(GRAPHENE_NET_DEFAULT_DESIRED_CONNECTIONS), @@ -322,10 +312,31 @@ namespace graphene { namespace net { namespace detail { { _rate_limiter.set_actual_rate_time_constant(fc::seconds(2)); fc::rand_bytes((char*) _node_id.data(), (int)_node_id.size()); +#ifdef P2P_IN_DEDICATED_THREAD + _thread = std::thread( [this] () { + fc::initialize_fibers(); + fc::set_thread_name( "p2p" ); + while( !_node_is_shutting_down ) + boost::this_fiber::sleep_for( std::chrono::seconds(2) ); + }); +#endif // P2P_IN_DEDICATED_THREAD } node_impl::~node_impl() { +#ifdef P2P_IN_DEDICATED_THREAD + if( _thread.joinable() ) + { + fc::async( [this] () { _shutdown(); }, _thread.get_id(), "shutdown" ); + _thread.join(); + } + else +#endif // P2P_IN_DEDICATED_THREAD + _shutdown(); + } + + void node_impl::_shutdown() + { VERIFY_CORRECT_THREAD(); ilog( "cleaning up node" ); _node_is_shutting_down = true; @@ -378,21 +389,21 @@ namespace graphene { namespace net { namespace detail { } } - void node_impl::p2p_network_connect_loop() + void p2p_network_connect_task::run() { VERIFY_CORRECT_THREAD(); - while (!_p2p_network_connect_loop_done.canceled()) + while ( true ) { try { dlog("Starting an iteration of p2p_network_connect_loop()."); - display_current_connections(); + _node->display_current_connections(); // add-once peers bypass our checks on the maximum/desired number of connections (but they will still be counted against the totals once they're connected) - if (!_add_once_node_list.empty()) + if (!_node->_add_once_node_list.empty()) { std::list add_once_node_list; - add_once_node_list.swap(_add_once_node_list); + add_once_node_list.swap(_node->_add_once_node_list); dlog("Processing \"add once\" node list containing ${count} peers:", ("count", add_once_node_list.size())); for (const potential_peer_record& add_once_peer : add_once_node_list) { @@ -402,51 +413,52 @@ namespace graphene { namespace net { namespace detail { { // see if we have an existing connection to that peer. If we do, disconnect them and // then try to connect the next time through the loop - peer_connection_ptr existing_connection_ptr = get_connection_to_endpoint( add_once_peer.endpoint ); + peer_connection_ptr existing_connection_ptr = _node->get_connection_to_endpoint( add_once_peer.endpoint ); if(!existing_connection_ptr) - connect_to_endpoint(add_once_peer.endpoint); + _node->connect_to_endpoint(add_once_peer.endpoint); } dlog("Done processing \"add once\" node list"); } - while (is_wanting_new_connections()) + while (_node->is_wanting_new_connections()) { bool initiated_connection_this_pass = false; - _potential_peer_database_updated = false; + _node->_potential_peer_database_updated = false; - for (peer_database::iterator iter = _potential_peer_db.begin(); - iter != _potential_peer_db.end() && is_wanting_new_connections(); + for (peer_database::iterator iter = _node->_potential_peer_db.begin(); + iter != _node->_potential_peer_db.end() && _node->is_wanting_new_connections(); ++iter) { - fc::microseconds delay_until_retry = fc::seconds((iter->number_of_failed_connection_attempts + 1) * _peer_connection_retry_timeout); + fc::microseconds delay_until_retry = fc::seconds((iter->number_of_failed_connection_attempts + 1) + * _node->_peer_connection_retry_timeout); - if (!is_connection_to_endpoint_in_progress(iter->endpoint) && + if (!_node->is_connection_to_endpoint_in_progress(iter->endpoint) && ((iter->last_connection_disposition != last_connection_failed && iter->last_connection_disposition != last_connection_rejected && iter->last_connection_disposition != last_connection_handshaking_failed) || (fc::time_point::now() - iter->last_connection_attempt_time) > delay_until_retry)) { - connect_to_endpoint(iter->endpoint); + _node->connect_to_endpoint(iter->endpoint); initiated_connection_this_pass = true; } } - if (!initiated_connection_this_pass && !_potential_peer_database_updated) + if (!initiated_connection_this_pass && !_node->_potential_peer_database_updated) break; } - display_current_connections(); + _node->display_current_connections(); // if we broke out of the while loop, that means either we have connected to enough nodes, or // we don't have any good candidates to connect to right now. - fc::usleep(fc::seconds(10)); + sleep( std::chrono::seconds(10) ); } catch (const fc::canceled_exception&) { throw; } FC_CAPTURE_AND_LOG( (0) ) - }// while(!canceled) + }// while(true) } void node_impl::trigger_p2p_network_connect_loop() @@ -454,6 +466,7 @@ namespace graphene { namespace net { namespace detail { VERIFY_CORRECT_THREAD(); dlog( "Triggering connect loop now" ); _potential_peer_database_updated = true; + _p2p_network_connect_loop.trigger(); } bool node_impl::have_already_received_sync_item( const item_hash_t& item_hash ) @@ -490,24 +503,24 @@ namespace graphene { namespace net { namespace detail { peer->send_message(fetch_items_message(graphene::net::block_message_type, items_to_request)); } - void node_impl::fetch_sync_items_loop() + void fetch_sync_items_task::run() { VERIFY_CORRECT_THREAD(); - while( !_fetch_sync_items_loop_done.canceled() ) + while( _node->_sync_items_to_fetch_updated ) { - _sync_items_to_fetch_updated = false; + _node->_sync_items_to_fetch_updated = false; dlog( "beginning another iteration of the sync items loop" ); - if (!_suspend_fetching_sync_blocks) + if (!_node->_suspend_fetching_sync_blocks) { std::map > sync_item_requests_to_send; { - ASSERT_TASK_NOT_PREEMPTED(); + // FIXME ASSERT_TASK_NOT_PREEMPTED(); std::set sync_items_to_request; // for each idle peer that we're syncing with - for( const peer_connection_ptr& peer : _active_connections ) + for( const peer_connection_ptr& peer : _node->_active_connections ) { if( peer->we_need_sync_items_from_peer && sync_item_requests_to_send.find(peer) == sync_item_requests_to_send.end() && // if we've already scheduled a request for this peer, don't consider scheduling another @@ -520,14 +533,14 @@ namespace graphene { namespace net { namespace detail { { item_hash_t item_to_potentially_request = peer->ids_of_items_to_get[i]; // if we don't already have this item in our temporary storage and we haven't requested from another syncing peer - if( !have_already_received_sync_item(item_to_potentially_request) && // already got it, but for some reson it's still in our list of items to fetch + if( !_node->have_already_received_sync_item(item_to_potentially_request) && // already got it, but for some reson it's still in our list of items to fetch sync_items_to_request.find(item_to_potentially_request) == sync_items_to_request.end() && // we have already decided to request it from another peer during this iteration - _active_sync_requests.find(item_to_potentially_request) == _active_sync_requests.end() ) // we've requested it in a previous iteration and we're still waiting for it to arrive + _node->_active_sync_requests.find(item_to_potentially_request) == _node->_active_sync_requests.end() ) // we've requested it in a previous iteration and we're still waiting for it to arrive { // then schedule a request from this peer sync_item_requests_to_send[peer].push_back(item_to_potentially_request); sync_items_to_request.insert( item_to_potentially_request ); - if (sync_item_requests_to_send[peer].size() >= _maximum_blocks_per_peer_during_syncing) + if (sync_item_requests_to_send[peer].size() >= _node->_maximum_blocks_per_peer_during_syncing) break; } } @@ -538,19 +551,11 @@ namespace graphene { namespace net { namespace detail { // make all the requests we scheduled in the loop above for( auto sync_item_request : sync_item_requests_to_send ) - request_sync_items_from_peer( sync_item_request.first, sync_item_request.second ); + _node->request_sync_items_from_peer( sync_item_request.first, sync_item_request.second ); sync_item_requests_to_send.clear(); } else dlog("fetch_sync_items_loop is suspended pending backlog processing"); - - if( !_sync_items_to_fetch_updated ) - { - dlog( "no sync items to fetch right now, going to sleep" ); - _retrigger_fetch_sync_items_loop_promise = fc::promise::create("graphene::net::retrigger_fetch_sync_items_loop"); - _retrigger_fetch_sync_items_loop_promise->wait(); - _retrigger_fetch_sync_items_loop_promise.reset(); - } } // while( !canceled ) } @@ -559,8 +564,7 @@ namespace graphene { namespace net { namespace detail { VERIFY_CORRECT_THREAD(); dlog( "Triggering fetch sync items loop now" ); _sync_items_to_fetch_updated = true; - if( _retrigger_fetch_sync_items_loop_promise ) - _retrigger_fetch_sync_items_loop_promise->set_value(); + _fetch_sync_items_loop.trigger(); } bool node_impl::is_item_in_any_peers_inventory(const item_id& item) const @@ -573,16 +577,17 @@ namespace graphene { namespace net { namespace detail { return false; } - void node_impl::fetch_items_loop() + void fetch_items_task::run() { VERIFY_CORRECT_THREAD(); - while (!_fetch_item_loop_done.canceled()) + while ( true ) { - _items_to_fetch_updated = false; + _node->_items_to_fetch_updated = false; dlog("beginning an iteration of fetch items (${count} items to fetch)", - ("count", _items_to_fetch.size())); + ("count", _node->_items_to_fetch.size())); - fc::time_point oldest_timestamp_to_fetch = fc::time_point::now() - fc::seconds(_recent_block_interval_in_seconds * GRAPHENE_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS); + fc::time_point oldest_timestamp_to_fetch = fc::time_point::now() + - fc::seconds(_node->_recent_block_interval_in_seconds * GRAPHENE_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS); fc::time_point next_peer_unblocked_time = fc::time_point::maximum(); // we need to construct a list of items to request from each peer first, @@ -604,12 +609,12 @@ namespace graphene { namespace net { namespace detail { fetch_messages_to_send_set items_by_peer; // initialize the fetch_messages_to_send with an empty set of items for all idle peers - for (const peer_connection_ptr& peer : _active_connections) + for (const peer_connection_ptr& peer : _node->_active_connections) if (peer->idle()) items_by_peer.insert(peer_and_items_to_fetch(peer)); // now loop over all items we want to fetch - for (auto item_iter = _items_to_fetch.begin(); item_iter != _items_to_fetch.end();) + for (auto item_iter = _node->_items_to_fetch.begin(); item_iter != _node->_items_to_fetch.end();) { if (item_iter->timestamp < oldest_timestamp_to_fetch) { @@ -617,7 +622,7 @@ namespace graphene { namespace net { namespace detail { // this can happen during flooding, and the _items_to_fetch could otherwise get clogged // with a bunch of items that we'll never be able to request from any peer wlog("Unable to fetch item ${item} before its likely expiration time, removing it from our list of items to fetch", ("item", item_iter->item)); - item_iter = _items_to_fetch.erase(item_iter); + item_iter = _node->_items_to_fetch.erase(item_iter); } else { @@ -630,15 +635,17 @@ namespace graphene { namespace net { namespace detail { if (peer_iter->item_ids.size() < GRAPHENE_NET_MAX_ITEMS_PER_PEER_DURING_NORMAL_OPERATION && peer->inventory_peer_advertised_to_us.find(item_iter->item) != peer->inventory_peer_advertised_to_us.end()) { - if (item_iter->item.item_type == graphene::net::trx_message_type && peer->is_transaction_fetching_inhibited()) - next_peer_unblocked_time = std::min(peer->transaction_fetching_inhibited_until, next_peer_unblocked_time); + if (item_iter->item.item_type == graphene::net::trx_message_type + && peer->is_transaction_fetching_inhibited()) + next_peer_unblocked_time = std::min( peer->transaction_fetching_inhibited_until, + next_peer_unblocked_time); else { //dlog("requesting item ${hash} from peer ${endpoint}", // ("hash", iter->item.item_hash)("endpoint", peer->get_remote_endpoint())); item_id item_id_to_fetch = item_iter->item; peer->items_requested_from_peer.insert(peer_connection::item_to_time_map_type::value_type(item_id_to_fetch, fc::time_point::now())); - item_iter = _items_to_fetch.erase(item_iter); + item_iter = _node->_items_to_fetch.erase(item_iter); item_fetched = true; items_by_peer.get().modify(peer_iter, [&item_id_to_fetch](peer_and_items_to_fetch& peer_and_items) { peer_and_items.item_ids.push_back(item_id_to_fetch); @@ -672,22 +679,13 @@ namespace graphene { namespace net { namespace detail { } items_by_peer.clear(); - if (!_items_to_fetch_updated) + if (!_node->_items_to_fetch_updated) { - _retrigger_fetch_item_loop_promise = fc::promise::create("graphene::net::retrigger_fetch_item_loop"); fc::microseconds time_until_retrigger = fc::microseconds::maximum(); - if (next_peer_unblocked_time != fc::time_point::maximum()) - time_until_retrigger = next_peer_unblocked_time - fc::time_point::now(); - try - { - if (time_until_retrigger > fc::microseconds(0)) - _retrigger_fetch_item_loop_promise->wait(time_until_retrigger); - } - catch (const fc::timeout_exception&) - { - dlog("Resuming fetch_items_loop due to timeout -- one of our peers should no longer be throttled"); - } - _retrigger_fetch_item_loop_promise.reset(); + if (next_peer_unblocked_time < fc::time_point::maximum()) + sleep( std::chrono::microseconds( (next_peer_unblocked_time - fc::time_point::now()).count() ) ); + else if (time_until_retrigger > fc::microseconds(0)) + sleep( std::chrono::microseconds( time_until_retrigger.count() ) ); } } // while (!canceled) } @@ -696,26 +694,25 @@ namespace graphene { namespace net { namespace detail { { VERIFY_CORRECT_THREAD(); _items_to_fetch_updated = true; - if( _retrigger_fetch_item_loop_promise ) - _retrigger_fetch_item_loop_promise->set_value(); + _fetch_item_loop.trigger(); } - void node_impl::advertise_inventory_loop() + void advertise_inventory_task::run() { VERIFY_CORRECT_THREAD(); - while (!_advertise_inventory_loop_done.canceled()) + while ( !_node->_new_inventory.empty() ) { dlog("beginning an iteration of advertise inventory"); // swap inventory into local variable, clearing the node's copy std::unordered_set inventory_to_advertise; - inventory_to_advertise.swap(_new_inventory); + inventory_to_advertise.swap(_node->_new_inventory); // process all inventory to advertise and construct the inventory messages we'll send // first, then send them all in a batch (to avoid any fiber interruption points while // we're computing the messages) std::list > inventory_messages_to_send; - for (const peer_connection_ptr& peer : _active_connections) + for (const peer_connection_ptr& peer : _node->_active_connections) { // only advertise to peers who are in sync with us idump((peer->peer_needs_sync_items_from_us)); @@ -763,32 +760,25 @@ namespace graphene { namespace net { namespace detail { for (auto iter = inventory_messages_to_send.begin(); iter != inventory_messages_to_send.end(); ++iter) iter->first->send_message(iter->second); inventory_messages_to_send.clear(); - - if (_new_inventory.empty()) - { - _retrigger_advertise_inventory_loop_promise = fc::promise::create("graphene::net::retrigger_advertise_inventory_loop"); - _retrigger_advertise_inventory_loop_promise->wait(); - _retrigger_advertise_inventory_loop_promise.reset(); - } - } // while(!canceled) + } // while(!_new_inventory.empty()) } void node_impl::trigger_advertise_inventory_loop() { VERIFY_CORRECT_THREAD(); - if( _retrigger_advertise_inventory_loop_promise ) - _retrigger_advertise_inventory_loop_promise->set_value(); + _advertise_inventory_loop.trigger(); } - void node_impl::terminate_inactive_connections_loop() + void terminate_inactive_connections_task::run() { VERIFY_CORRECT_THREAD(); + while( true ) { std::list peers_to_disconnect_gently; std::list peers_to_disconnect_forcibly; std::list peers_to_send_keep_alive; std::list peers_to_terminate; - _recent_block_interval_in_seconds = _delegate->get_current_block_interval_in_seconds(); + _node->_recent_block_interval_in_seconds = _node->_delegate->get_current_block_interval_in_seconds(); // Disconnect peers that haven't sent us any data recently // These numbers are just guesses and we need to think through how this works better. @@ -802,11 +792,11 @@ namespace graphene { namespace net { namespace detail { // As usual, the first step is to walk through all our peers and figure out which // peers need action (disconneting, sending keepalives, etc), then we walk through // those lists yielding at our leisure later. - ASSERT_TASK_NOT_PREEMPTED(); + // FIXME ASSERT_TASK_NOT_PREEMPTED(); - uint32_t handshaking_timeout = _peer_inactivity_timeout; + uint32_t handshaking_timeout = _node->_peer_inactivity_timeout; fc::time_point handshaking_disconnect_threshold = fc::time_point::now() - fc::seconds(handshaking_timeout); - for( const peer_connection_ptr handshaking_peer : _handshaking_connections ) + for( const peer_connection_ptr handshaking_peer : _node->_handshaking_connections ) if( handshaking_peer->connection_initiation_time < handshaking_disconnect_threshold && handshaking_peer->get_last_message_received_time() < handshaking_disconnect_threshold && handshaking_peer->get_last_message_sent_time() < handshaking_disconnect_threshold ) @@ -827,7 +817,7 @@ namespace graphene { namespace net { namespace detail { } // timeout for any active peers is two block intervals - uint32_t active_disconnect_timeout = 10 * _recent_block_interval_in_seconds; + uint32_t active_disconnect_timeout = 10 * _node->_recent_block_interval_in_seconds; uint32_t active_send_keepalive_timeout = active_disconnect_timeout / 2; // set the ignored request time out to 1 second. When we request a block @@ -844,7 +834,7 @@ namespace graphene { namespace net { namespace detail { fc::time_point active_disconnect_threshold = fc::time_point::now() - fc::seconds(active_disconnect_timeout); fc::time_point active_send_keepalive_threshold = fc::time_point::now() - fc::seconds(active_send_keepalive_timeout); fc::time_point active_ignored_request_threshold = fc::time_point::now() - active_ignored_request_timeout; - for( const peer_connection_ptr& active_peer : _active_connections ) + for( const peer_connection_ptr& active_peer : _node->_active_connections ) { if( active_peer->connection_initiation_time < active_disconnect_threshold && active_peer->get_last_message_received_time() < active_disconnect_threshold ) @@ -912,7 +902,7 @@ namespace graphene { namespace net { namespace detail { } fc::time_point closing_disconnect_threshold = fc::time_point::now() - fc::seconds(GRAPHENE_NET_PEER_DISCONNECT_TIMEOUT); - for( const peer_connection_ptr& closing_peer : _closing_connections ) + for( const peer_connection_ptr& closing_peer : _node->_closing_connections ) if( closing_peer->connection_closed_time < closing_disconnect_threshold ) { // we asked this peer to close their connectoin to us at least GRAPHENE_NET_PEER_DISCONNECT_TIMEOUT @@ -924,7 +914,7 @@ namespace graphene { namespace net { namespace detail { uint32_t failed_terminate_timeout_seconds = 120; fc::time_point failed_terminate_threshold = fc::time_point::now() - fc::seconds(failed_terminate_timeout_seconds); - for (const peer_connection_ptr& peer : _terminating_connections ) + for (const peer_connection_ptr& peer : _node->_terminating_connections ) if (peer->get_connection_terminated_time() != fc::time_point::min() && peer->get_connection_terminated_time() < failed_terminate_threshold) { @@ -941,8 +931,8 @@ namespace graphene { namespace net { namespace detail { for (const peer_connection_ptr& peer : peers_to_terminate ) { assert(_terminating_connections.find(peer) != _terminating_connections.end()); - _terminating_connections.erase(peer); - schedule_peer_for_deletion(peer); + _node->_terminating_connections.erase(peer); + _node->schedule_peer_for_deletion(peer); } peers_to_terminate.clear(); @@ -951,7 +941,7 @@ namespace graphene { namespace net { namespace detail { // moved to the yielding section for( const peer_connection_ptr& peer : peers_to_disconnect_forcibly ) { - move_peer_to_terminating_list(peer); + _node->move_peer_to_terminating_list(peer); peer->close_connection(); } peers_to_disconnect_forcibly.clear(); @@ -964,8 +954,8 @@ namespace graphene { namespace net { namespace detail { fc::exception detailed_error( FC_LOG_MESSAGE(warn, "Disconnecting due to inactivity", ( "last_message_received_seconds_ago", (peer->get_last_message_received_time() - fc::time_point::now() ).count() / fc::seconds(1 ).count() ) ( "last_message_sent_seconds_ago", (peer->get_last_message_sent_time() - fc::time_point::now() ).count() / fc::seconds(1 ).count() ) - ( "inactivity_timeout", _active_connections.find(peer ) != _active_connections.end() ? _peer_inactivity_timeout * 10 : _peer_inactivity_timeout ) ) ); - disconnect_from_peer( peer.get(), "Disconnecting due to inactivity", false, detailed_error ); + ( "inactivity_timeout", _node->_active_connections.find(peer ) != _node->_active_connections.end() ? _node->_peer_inactivity_timeout * 10 : _node->_peer_inactivity_timeout ) ) ); + _node->disconnect_from_peer( peer.get(), "Disconnecting due to inactivity", false, detailed_error ); } peers_to_disconnect_gently.clear(); @@ -974,46 +964,45 @@ namespace graphene { namespace net { namespace detail { offsetof(current_time_request_message, request_sent_time)); peers_to_send_keep_alive.clear(); - if (!_node_is_shutting_down && !_terminate_inactive_connections_loop_done.canceled()) - _terminate_inactive_connections_loop_done = fc::schedule( [this](){ terminate_inactive_connections_loop(); }, - fc::time_point::now() + fc::seconds(GRAPHENE_NET_PEER_HANDSHAKE_INACTIVITY_TIMEOUT / 2), - "terminate_inactive_connections_loop" ); + sleep( std::chrono::seconds(GRAPHENE_NET_PEER_HANDSHAKE_INACTIVITY_TIMEOUT / 2) ); + } // while( true ) } - void node_impl::fetch_updated_peer_lists_loop() + void fetch_updated_peer_lists_task::run() { VERIFY_CORRECT_THREAD(); - std::list original_active_peers(_active_connections.begin(), _active_connections.end()); - for( const peer_connection_ptr& active_peer : original_active_peers ) + while( true ) { - try - { - active_peer->send_message(address_request_message()); - } - catch ( const fc::canceled_exception& ) - { - throw; - } - catch (const fc::exception& e) - { - dlog("Caught exception while sending address request message to peer ${peer} : ${e}", - ("peer", active_peer->get_remote_endpoint())("e", e)); - } + std::list original_active_peers(_node->_active_connections.begin(), _node->_active_connections.end()); + for( const peer_connection_ptr& active_peer : original_active_peers ) + { + try + { + active_peer->send_message(address_request_message()); + } + catch ( const fc::canceled_exception& ) + { + throw; + } + catch (const fc::exception& e) + { + dlog("Caught exception while sending address request message to peer ${peer} : ${e}", + ("peer", active_peer->get_remote_endpoint())("e", e)); + } + } + + // this has nothing to do with updating the peer list, but we need to prune this list + // at regular intervals, this is a fine place to do it. + fc::time_point_sec oldest_failed_ids_to_keep(fc::time_point::now() - fc::minutes(15)); + auto oldest_failed_ids_to_keep_iter = _node->_recently_failed_items.get().lower_bound(oldest_failed_ids_to_keep); + auto begin_iter = _node->_recently_failed_items.get().begin(); + _node->_recently_failed_items.get().erase(begin_iter, oldest_failed_ids_to_keep_iter); + + sleep( std::chrono::minutes(15) ); } - - // this has nothing to do with updating the peer list, but we need to prune this list - // at regular intervals, this is a fine place to do it. - fc::time_point_sec oldest_failed_ids_to_keep(fc::time_point::now() - fc::minutes(15)); - auto oldest_failed_ids_to_keep_iter = _recently_failed_items.get().lower_bound(oldest_failed_ids_to_keep); - auto begin_iter = _recently_failed_items.get().begin(); - _recently_failed_items.get().erase(begin_iter, oldest_failed_ids_to_keep_iter); - - if (!_node_is_shutting_down && !_fetch_updated_peer_lists_loop_done.canceled() ) - _fetch_updated_peer_lists_loop_done = fc::schedule( [this](){ fetch_updated_peer_lists_loop(); }, - fc::time_point::now() + fc::minutes(15), - "fetch_updated_peer_lists_loop" ); } + void node_impl::update_bandwidth_data(uint32_t bytes_read_this_second, uint32_t bytes_written_this_second) { VERIFY_CORRECT_THREAD(); @@ -1038,53 +1027,54 @@ namespace graphene { namespace net { namespace detail { } } } - void node_impl::bandwidth_monitor_loop() + void bandwidth_monitor_task::run() { VERIFY_CORRECT_THREAD(); - fc::time_point_sec current_time = fc::time_point::now(); + while( true ) + { + fc::time_point_sec current_time = fc::time_point::now(); - if (_bandwidth_monitor_last_update_time == fc::time_point_sec::min()) - _bandwidth_monitor_last_update_time = current_time; + if (_node->_bandwidth_monitor_last_update_time == fc::time_point_sec::min()) + _node->_bandwidth_monitor_last_update_time = current_time; - uint32_t seconds_since_last_update = current_time.sec_since_epoch() - _bandwidth_monitor_last_update_time.sec_since_epoch(); - seconds_since_last_update = std::max(UINT32_C(1), seconds_since_last_update); - uint32_t bytes_read_this_second = _rate_limiter.get_actual_download_rate(); - uint32_t bytes_written_this_second = _rate_limiter.get_actual_upload_rate(); - for (uint32_t i = 0; i < seconds_since_last_update - 1; ++i) - update_bandwidth_data(0, 0); - update_bandwidth_data(bytes_read_this_second, bytes_written_this_second); - _bandwidth_monitor_last_update_time = current_time; + uint32_t seconds_since_last_update = current_time.sec_since_epoch() - _node->_bandwidth_monitor_last_update_time.sec_since_epoch(); + seconds_since_last_update = std::max(UINT32_C(1), seconds_since_last_update); + uint32_t bytes_read_this_second = _node->_rate_limiter.get_actual_download_rate(); + uint32_t bytes_written_this_second = _node->_rate_limiter.get_actual_upload_rate(); + for (uint32_t i = 0; i < seconds_since_last_update - 1; ++i) + _node->update_bandwidth_data(0, 0); + _node->update_bandwidth_data(bytes_read_this_second, bytes_written_this_second); + _node->_bandwidth_monitor_last_update_time = current_time; - if (!_node_is_shutting_down && !_bandwidth_monitor_loop_done.canceled()) - _bandwidth_monitor_loop_done = fc::schedule( [=](){ bandwidth_monitor_loop(); }, - fc::time_point::now() + fc::seconds(1), - "bandwidth_monitor_loop" ); + sleep( std::chrono::seconds(1) ); + } } - void node_impl::dump_node_status_task() + void dump_node_status_task::run() { VERIFY_CORRECT_THREAD(); - dump_node_status(); - if (!_node_is_shutting_down && !_dump_node_status_task_done.canceled()) - _dump_node_status_task_done = fc::schedule([=](){ dump_node_status_task(); }, - fc::time_point::now() + fc::minutes(1), - "dump_node_status_task"); + while( true ) + { + _node->dump_node_status(); + sleep( std::chrono::minutes(1) ); + } } - void node_impl::delayed_peer_deletion_task() + void delayed_peer_deletion_task::run() { VERIFY_CORRECT_THREAD(); + while (!_node->_peers_to_delete.empty()) + { #ifdef USE_PEERS_TO_DELETE_MUTEX - fc::scoped_lock lock(_peers_to_delete_mutex); - dlog("in delayed_peer_deletion_task with ${count} in queue", ("count", _peers_to_delete.size())); - _peers_to_delete.clear(); - dlog("_peers_to_delete cleared"); + std::unique_lock lock(_peers_to_delete_mutex); + dlog("in delayed_peer_deletion_task with ${count} in queue", ("count", _node->_peers_to_delete.size())); + _node->_peers_to_delete.clear(); + dlog("_peers_to_delete cleared"); #else - while (!_peers_to_delete.empty()) - { std::list peers_to_delete_copy; - dlog("beginning an iteration of delayed_peer_deletion_task with ${count} in queue", ("count", _peers_to_delete.size())); - peers_to_delete_copy.swap(_peers_to_delete); + dlog( "beginning an iteration of delayed_peer_deletion_task with ${count} in queue", + ("count", _node->_peers_to_delete.size()) ); + peers_to_delete_copy.swap(_node->_peers_to_delete); } dlog("leaving delayed_peer_deletion_task"); #endif @@ -1099,50 +1089,36 @@ namespace graphene { namespace net { namespace detail { assert(_closing_connections.find(peer_to_delete) == _closing_connections.end()); assert(_terminating_connections.find(peer_to_delete) == _terminating_connections.end()); + unsigned number_of_peers_to_delete; #ifdef USE_PEERS_TO_DELETE_MUTEX dlog("scheduling peer for deletion: ${peer} (may block on a mutex here)", ("peer", peer_to_delete->get_remote_endpoint())); - unsigned number_of_peers_to_delete; { - fc::scoped_lock lock(_peers_to_delete_mutex); + std::unique_lock lock(_peers_to_delete_mutex); _peers_to_delete.emplace_back(peer_to_delete); number_of_peers_to_delete = _peers_to_delete.size(); } dlog("peer scheduled for deletion: ${peer}", ("peer", peer_to_delete->get_remote_endpoint())); - if (!_node_is_shutting_down && - (!_delayed_peer_deletion_task_done.valid() || _delayed_peer_deletion_task_done.ready())) - { - dlog("asyncing delayed_peer_deletion_task to delete ${size} peers", ("size", number_of_peers_to_delete)); - _delayed_peer_deletion_task_done = fc::async([this](){ delayed_peer_deletion_task(); }, "delayed_peer_deletion_task" ); - } - else - dlog("delayed_peer_deletion_task is already scheduled (current size of _peers_to_delete is ${size})", ("size", number_of_peers_to_delete)); #else dlog("scheduling peer for deletion: ${peer} (this will not block)", ("peer", peer_to_delete->get_remote_endpoint())); _peers_to_delete.push_back(peer_to_delete); - if (!_node_is_shutting_down && - (!_delayed_peer_deletion_task_done.valid() || _delayed_peer_deletion_task_done.ready())) - { - dlog("asyncing delayed_peer_deletion_task to delete ${size} peers", ("size", _peers_to_delete.size())); - _delayed_peer_deletion_task_done = fc::async([this](){ delayed_peer_deletion_task(); }, "delayed_peer_deletion_task" ); - } - else - dlog("delayed_peer_deletion_task is already scheduled (current size of _peers_to_delete is ${size})", ("size", _peers_to_delete.size())); - + number_of_peers_to_delete = _peers_to_delete.size(); #endif + dlog("asyncing delayed_peer_deletion_task to delete ${size} peers", ("size", number_of_peers_to_delete)); + _delayed_peer_deletion_task.trigger(); } bool node_impl::is_accepting_new_connections() { VERIFY_CORRECT_THREAD(); - return !_p2p_network_connect_loop_done.canceled() && get_number_of_connections() <= _maximum_number_of_connections; + return !_node_is_shutting_down && get_number_of_connections() <= _maximum_number_of_connections; } bool node_impl::is_wanting_new_connections() { VERIFY_CORRECT_THREAD(); - return !_p2p_network_connect_loop_done.canceled() && get_number_of_connections() < _desired_number_of_connections; + return !_node_is_shutting_down && get_number_of_connections() < _desired_number_of_connections; } uint32_t node_impl::get_number_of_connections() @@ -2699,7 +2675,7 @@ namespace graphene { namespace net { namespace detail { "process_backlog_of_sync_blocks"); } - void node_impl::process_backlog_of_sync_blocks() + void process_backlog_of_sync_blocks_task::run() { VERIFY_CORRECT_THREAD(); // garbage-collect the list of async tasks here for lack of a better place @@ -3687,7 +3663,7 @@ namespace graphene { namespace net { namespace detail { { #ifdef USE_PEERS_TO_DELETE_MUTEX - fc::scoped_lock lock(_peers_to_delete_mutex); + std::unique_lock lock(_peers_to_delete_mutex); #endif try { @@ -3772,11 +3748,12 @@ namespace graphene { namespace net { namespace detail { send_hello_message(new_peer); } - void node_impl::accept_loop() + void accept_task::run() { VERIFY_CORRECT_THREAD(); - while ( !_accept_loop_complete.canceled() ) + while ( true ) { + check_cancelled(); peer_connection_ptr new_peer(peer_connection::make_shared(this)); try @@ -3798,7 +3775,7 @@ namespace graphene { namespace net { namespace detail { }, "accept_connection_task" ); // limit the rate at which we accept connections to mitigate DOS attacks - fc::usleep( fc::milliseconds(10) ); + sleep( std::chrono::milliseconds(10) ); } FC_CAPTURE_AND_LOG( (0) ) } } // accept_loop() @@ -4671,14 +4648,15 @@ namespace graphene { namespace net { namespace detail { #ifdef P2P_IN_DEDICATED_THREAD # define INVOKE_IN_IMPL(method_name, ...) \ - return my->_thread->async([&](){ return my->method_name(__VA_ARGS__); }, "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).wait() + return fc::async([&](){ return my->method_name(__VA_ARGS__); }, + my->_thread.get_id(), "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).wait() #else # define INVOKE_IN_IMPL(method_name, ...) \ return my->method_name(__VA_ARGS__) #endif // P2P_IN_DEDICATED_THREAD node::node(const std::string& user_agent) : - my(new detail::node_impl(user_agent)) + my( std::make_unique( user_agent ) ) { } @@ -4688,8 +4666,7 @@ namespace graphene { namespace net { namespace detail { void node::set_node_delegate( node_delegate* del ) { - fc::thread* delegate_thread = &fc::thread::current(); - INVOKE_IN_IMPL(set_node_delegate, del, delegate_thread); + INVOKE_IN_IMPL(set_node_delegate, del, std::this_thread::get_id()); } void node::load_configuration( const fc::path& configuration_directory ) @@ -4842,7 +4819,8 @@ namespace graphene { namespace net { namespace detail { , BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _delay_after_accumulator))(boost::accumulators::tag::rolling_window::window_size = ROLLING_WINDOW_SIZE) - statistics_gathering_node_delegate_wrapper::statistics_gathering_node_delegate_wrapper(node_delegate* delegate, fc::thread* thread_for_delegate_calls) : + statistics_gathering_node_delegate_wrapper::statistics_gathering_node_delegate_wrapper(node_delegate* delegate, + std::thread::id thread_for_delegate_calls) : _node_delegate(delegate), _thread(thread_for_delegate_calls) BOOST_PP_SEQ_FOR_EACH(INITIALIZE_ACCUMULATOR, unused, NODE_DELEGATE_METHOD_NAMES) @@ -4890,16 +4868,16 @@ namespace graphene { namespace net { namespace detail { &_ ## method_name ## _execution_accumulator, \ &_ ## method_name ## _delay_before_accumulator, \ &_ ## method_name ## _delay_after_accumulator); \ - if (_thread->is_current()) \ + if (_thread == std::this_thread::get_id()) \ { \ call_statistics_collector::actual_execution_measurement_helper helper(statistics_collector); \ return _node_delegate->method_name(__VA_ARGS__); \ } \ else \ - return _thread->async([&, statistics_collector](){ \ + return fc::async([&, statistics_collector](){ \ call_statistics_collector::actual_execution_measurement_helper helper(statistics_collector); \ return _node_delegate->method_name(__VA_ARGS__); \ - }, "invoke " BOOST_STRINGIZE(method_name)).wait(); \ + }, _thread, "invoke " BOOST_STRINGIZE(method_name)).get(); \ } \ catch (const fc::exception& e) \ { \ @@ -4923,16 +4901,16 @@ namespace graphene { namespace net { namespace detail { &_ ## method_name ## _execution_accumulator, \ &_ ## method_name ## _delay_before_accumulator, \ &_ ## method_name ## _delay_after_accumulator); \ - if (_thread->is_current()) \ + if (_thread == std::this_thread::get_id()) \ { \ call_statistics_collector::actual_execution_measurement_helper helper(statistics_collector); \ return _node_delegate->method_name(__VA_ARGS__); \ } \ else \ - return _thread->async([&, statistics_collector](){ \ + return fc::async([&, statistics_collector](){ \ call_statistics_collector::actual_execution_measurement_helper helper(statistics_collector); \ return _node_delegate->method_name(__VA_ARGS__); \ - }, "invoke " BOOST_STRINGIZE(method_name)).wait() + }, _thread, "invoke " BOOST_STRINGIZE(method_name)).get() #endif bool statistics_gathering_node_delegate_wrapper::has_item( const net::item_id& id ) diff --git a/libraries/net/node_impl.hxx b/libraries/net/node_impl.hxx index 57380b0e0b..cf864c25a4 100644 --- a/libraries/net/node_impl.hxx +++ b/libraries/net/node_impl.hxx @@ -1,6 +1,7 @@ #pragma once #include -#include +#include + #include #include #include @@ -8,6 +9,7 @@ #include #include #include +#include namespace graphene { namespace net { namespace detail { @@ -38,7 +40,7 @@ class statistics_gathering_node_delegate_wrapper : public node_delegate { private: node_delegate *_node_delegate; - fc::thread *_thread; + std::thread::id _thread; typedef boost::accumulators::accumulator_set _thread; + std::thread _thread; #endif // P2P_IN_DEDICATED_THREAD std::unique_ptr _delegate; fc::sha256 _chain_id; @@ -194,14 +320,13 @@ class node_impl : public peer_connection_delegate peer_database _potential_peer_db; bool _potential_peer_database_updated; - fc::future _p2p_network_connect_loop_done; + p2p_network_connect_task _p2p_network_connect_loop; // @} /// used by the task that fetches sync items during synchronization // @{ - fc::promise::ptr _retrigger_fetch_sync_items_loop_promise; bool _sync_items_to_fetch_updated; - fc::future _fetch_sync_items_loop_done; + fetch_sync_items_task _fetch_sync_items_loop; typedef std::unordered_map active_sync_requests_map; @@ -210,14 +335,13 @@ class node_impl : public peer_connection_delegate std::list _received_sync_items; /// list of sync blocks we've received, but can't yet process because we are still missing blocks that come earlier in the chain // @} - fc::future _process_backlog_of_sync_blocks_done; + process_backlog_of_sync_blocks_task _process_backlog_of_sync_blocks; bool _suspend_fetching_sync_blocks; /// used by the task that fetches items during normal operation // @{ - fc::promise::ptr _retrigger_fetch_item_loop_promise; bool _items_to_fetch_updated; - fc::future _fetch_item_loop_done; + fetch_items_task _fetch_item_loop; struct item_id_index{}; typedef boost::multi_index_container::ptr _retrigger_advertise_inventory_loop_promise; - fc::future _advertise_inventory_loop_done; + advertise_inventory_task _advertise_inventory_loop; std::unordered_set _new_inventory; /// list of items we have received but not yet advertised to our peers // @} - fc::future _terminate_inactive_connections_loop_done; + terminate_inactive_connections_task _terminate_inactive_connections_loop; uint8_t _recent_block_interval_in_seconds; // a cached copy of the block interval, to avoid a thread hop to the blockchain to get the current value std::string _user_agent_string; @@ -267,7 +390,7 @@ class node_impl : public peer_connection_delegate uint32_t _peer_inactivity_timeout; fc::tcp_server _tcp_server; - fc::future _accept_loop_complete; + accept_task _accept_loop; /** Stores all connections which have not yet finished key exchange or are still sending initial handshaking messages * back and forth (not yet ready to initiate syncing) */ @@ -293,7 +416,7 @@ class node_impl : public peer_connection_delegate bool _peer_advertising_disabled; - fc::future _fetch_updated_peer_lists_loop_done; + fetch_updated_peer_lists_task _fetch_updated_peer_lists_loop; boost::circular_buffer _average_network_read_speed_seconds; boost::circular_buffer _average_network_write_speed_seconds; @@ -305,9 +428,9 @@ class node_impl : public peer_connection_delegate unsigned _average_network_usage_minute_counter; fc::time_point_sec _bandwidth_monitor_last_update_time; - fc::future _bandwidth_monitor_loop_done; + bandwidth_monitor_task _bandwidth_monitor_loop; - fc::future _dump_node_status_task_done; + dump_node_status_task _dump_node_status_task; /* We have two alternate paths through the schedule_peer_for_deletion code -- one that * uses a mutex to prevent one fiber from adding items to the queue while another is deleting @@ -319,10 +442,10 @@ class node_impl : public peer_connection_delegate */ //#define USE_PEERS_TO_DELETE_MUTEX 1 #ifdef USE_PEERS_TO_DELETE_MUTEX - fc::mutex _peers_to_delete_mutex; + boost::fibers::mutex _peers_to_delete_mutex; #endif std::list _peers_to_delete; - fc::future _delayed_peer_deletion_task_done; + delayed_peer_deletion_task _delayed_peer_deletion_task; #ifdef ENABLE_P2P_DEBUGGING_API std::set _allowed_peers; @@ -334,35 +457,28 @@ class node_impl : public peer_connection_delegate unsigned _maximum_number_of_sync_blocks_to_prefetch; unsigned _maximum_blocks_per_peer_during_syncing; - std::list > _handle_message_calls_in_progress; + std::list > _handle_message_calls_in_progress; node_impl(const std::string& user_agent); virtual ~node_impl(); + void _shutdown(); void save_node_configuration(); - void p2p_network_connect_loop(); void trigger_p2p_network_connect_loop(); bool have_already_received_sync_item( const item_hash_t& item_hash ); void request_sync_item_from_peer( const peer_connection_ptr& peer, const item_hash_t& item_to_request ); void request_sync_items_from_peer( const peer_connection_ptr& peer, const std::vector& items_to_request ); - void fetch_sync_items_loop(); void trigger_fetch_sync_items_loop(); bool is_item_in_any_peers_inventory(const item_id& item) const; - void fetch_items_loop(); + void trigger_fetch_items_loop(); - void advertise_inventory_loop(); void trigger_advertise_inventory_loop(); - void terminate_inactive_connections_loop(); - - void fetch_updated_peer_lists_loop(); void update_bandwidth_data(uint32_t bytes_read_this_second, uint32_t bytes_written_this_second); - void bandwidth_monitor_loop(); - void dump_node_status_task(); bool is_accepting_new_connections(); bool is_wanting_new_connections(); @@ -467,7 +583,6 @@ class node_impl : public peer_connection_delegate void dump_node_status(); - void delayed_peer_deletion_task(); void schedule_peer_for_deletion(const peer_connection_ptr& peer_to_delete); void disconnect_from_peer( peer_connection* originating_peer, @@ -476,7 +591,7 @@ class node_impl : public peer_connection_delegate const fc::oexception& additional_data = fc::oexception() ); // methods implementing node's public interface - void set_node_delegate(node_delegate* del, fc::thread* thread_for_delegate_calls); + void set_node_delegate(node_delegate* del, std::thread::id thread_for_delegate_calls); void load_configuration( const fc::path& configuration_directory ); void listen_to_p2p_network(); void connect_to_p2p_network(); diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index 12a0eccdb4..4092658f00 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -27,17 +27,18 @@ #include #include -#include #include +#include + #ifdef DEFAULT_LOGGER # undef DEFAULT_LOGGER #endif #define DEFAULT_LOGGER "p2p" #ifndef NDEBUG -# define VERIFY_CORRECT_THREAD() assert(_thread->is_current()) +# define VERIFY_CORRECT_THREAD() assert( _thread == std::this_thread::get_id() ) #else # define VERIFY_CORRECT_THREAD() do {} while (0) #endif @@ -90,7 +91,7 @@ namespace graphene { namespace net last_known_fork_block_number(0), firewall_check_state(nullptr), #ifndef NDEBUG - _thread(&fc::thread::current()), + _thread(std::this_thread::get_id()), _send_message_queue_tasks_running(0), #endif _currently_handling_message(false) @@ -115,16 +116,6 @@ namespace graphene { namespace net { VERIFY_CORRECT_THREAD(); -#if 0 // this gets too verbose -#ifndef NDEBUG - struct scope_logger { - fc::optional endpoint; - scope_logger(const fc::optional& endpoint) : endpoint(endpoint) { dlog("entering peer_connection::destroy() for peer ${endpoint}", ("endpoint", endpoint)); } - ~scope_logger() { dlog("leaving peer_connection::destroy() for peer ${endpoint}", ("endpoint", endpoint)); } - } send_message_scope_logger(get_remote_endpoint()); -#endif -#endif - try { dlog("calling close_connection()"); From ce794b370e2f289d575d083ac9f4f0147a7a2da2 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 11 Dec 2019 13:25:53 +0100 Subject: [PATCH 06/14] net builds --- .../include/graphene/net/peer_connection.hpp | 28 ++- libraries/net/message_oriented_connection.cpp | 98 +++++---- libraries/net/node.cpp | 198 +++++++++--------- libraries/net/peer_connection.cpp | 49 ++--- 4 files changed, 191 insertions(+), 182 deletions(-) diff --git a/libraries/net/include/graphene/net/peer_connection.hpp b/libraries/net/include/graphene/net/peer_connection.hpp index 4ce978b3ea..641fbc592b 100644 --- a/libraries/net/include/graphene/net/peer_connection.hpp +++ b/libraries/net/include/graphene/net/peer_connection.hpp @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -40,8 +41,24 @@ #include #include -namespace graphene { namespace net - { +namespace graphene { namespace net { + class peer_connection; + + namespace detail { + + class send_queued_messages_task : public graphene::utilities::recurring_task { + public: + send_queued_messages_task() : _conn(nullptr) {} + explicit send_queued_messages_task( peer_connection& conn ) : _conn(&conn) {} + + protected: + virtual void run(); + + peer_connection* _conn; + }; + + } // detail + struct firewall_check_state_data { node_id_t expected_node_id; @@ -57,7 +74,6 @@ namespace graphene { namespace net node_id_t requesting_peer; }; - class peer_connection; class peer_connection_delegate { public: @@ -68,7 +84,6 @@ namespace graphene { namespace net virtual message get_message_for_item(const item_id& item) = 0; }; - class peer_connection; typedef std::shared_ptr peer_connection_ptr; class peer_connection : public message_oriented_connection_delegate, public std::enable_shared_from_this @@ -165,7 +180,7 @@ namespace graphene { namespace net size_t _total_queued_messages_size = 0; std::queue, std::list > > _queued_messages; - boost::fibers::future _send_queued_messages_done; + detail::send_queued_messages_task _send_queued_messages; public: fc::time_point connection_initiation_time; fc::time_point connection_closed_time; @@ -259,8 +274,6 @@ namespace graphene { namespace net uint32_t last_known_fork_block_number = 0; - boost::fibers::future accept_or_connect_task_done; - firewall_check_state_data *firewall_check_state = nullptr; private: #ifndef NDEBUG @@ -312,6 +325,7 @@ namespace graphene { namespace net void send_queued_messages_task(); void accept_connection_task(); void connect_to_task(const fc::ip::endpoint& remote_endpoint); + friend class detail::send_queued_messages_task; }; typedef std::shared_ptr peer_connection_ptr; diff --git a/libraries/net/message_oriented_connection.cpp b/libraries/net/message_oriented_connection.cpp index b62651fa76..07e7e7184a 100644 --- a/libraries/net/message_oriented_connection.cpp +++ b/libraries/net/message_oriented_connection.cpp @@ -21,16 +21,13 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#include -#include -#include -#include #include #include #include #include #include +#include #include @@ -48,14 +45,33 @@ namespace graphene { namespace net { namespace detail { + class message_oriented_connection_impl; + + class connection_task : public graphene::utilities::recurring_task + { + public: + connection_task() : _conn(nullptr) {} + explicit connection_task( message_oriented_connection_impl& conn ) : _conn(&conn) {} + + protected: + virtual void run(); + + message_oriented_connection_impl* _conn; + }; + class message_oriented_connection_impl { private: message_oriented_connection* _self; message_oriented_connection_delegate *_delegate; stcp_socket _sock; - fc::promise::ptr _ready_for_sending; - fc::future _read_loop_done; + + bool _ready_for_sending = false; + bool _destroyed = false; + boost::fibers::condition_variable _cv; + boost::fibers::mutex _mtx; + + connection_task _read_loop; uint64_t _bytes_received; uint64_t _bytes_sent; @@ -64,13 +80,10 @@ namespace graphene { namespace net { fc::time_point _last_message_sent_time; std::atomic_bool _send_message_in_progress; - std::atomic_bool _read_loop_in_progress; #ifndef NDEBUG fc::thread* _thread; #endif - void read_loop(); - void start_read_loop(); public: fc::tcp_socket& get_socket(); void accept(); @@ -92,17 +105,18 @@ namespace graphene { namespace net { fc::time_point get_last_message_received_time() const; fc::time_point get_connection_time() const { return _connected_time; } fc::sha512 get_shared_secret() const; + + friend class connection_task; }; message_oriented_connection_impl::message_oriented_connection_impl(message_oriented_connection* self, message_oriented_connection_delegate* delegate) : _self(self), _delegate(delegate), - _ready_for_sending(fc::promise::create()), + _read_loop(*this), _bytes_received(0), _bytes_sent(0), - _send_message_in_progress(false), - _read_loop_in_progress(false) + _send_message_in_progress(false) #ifndef NDEBUG ,_thread(&fc::thread::current()) #endif @@ -124,18 +138,18 @@ namespace graphene { namespace net { { VERIFY_CORRECT_THREAD(); _sock.accept(); - assert(!_read_loop_done.valid()); // check to be sure we never launch two read loops - _read_loop_done = fc::async([=](){ read_loop(); }, "message read_loop"); - _ready_for_sending->set_value(); + _read_loop.trigger(); + _ready_for_sending = true; + _cv.notify_all(); } void message_oriented_connection_impl::connect_to(const fc::ip::endpoint& remote_endpoint) { VERIFY_CORRECT_THREAD(); _sock.connect_to(remote_endpoint); - assert(!_read_loop_done.valid()); // check to be sure we never launch two read loops - _read_loop_done = fc::async([=](){ read_loop(); }, "message read_loop"); - _ready_for_sending->set_value(); + _read_loop.trigger(); + _ready_for_sending = true; + _cv.notify_all(); } void message_oriented_connection_impl::bind(const fc::ip::endpoint& local_endpoint) @@ -159,16 +173,14 @@ namespace graphene { namespace net { } }; - void message_oriented_connection_impl::read_loop() + void connection_task::run() { VERIFY_CORRECT_THREAD(); const int BUFFER_SIZE = 16; const int LEFTOVER = BUFFER_SIZE - sizeof(message_header); static_assert(BUFFER_SIZE >= sizeof(message_header), "insufficient buffer"); - no_parallel_execution_guard guard( &_read_loop_in_progress ); - - _connected_time = fc::time_point::now(); + _conn->_connected_time = fc::time_point::now(); fc::oexception exception_to_rethrow; bool call_on_connection_closed = false; @@ -179,27 +191,28 @@ namespace graphene { namespace net { char buffer[BUFFER_SIZE]; while( true ) { - _sock.read(buffer, BUFFER_SIZE); - _bytes_received += BUFFER_SIZE; + _conn->_sock.read(buffer, BUFFER_SIZE); + _conn->_bytes_received += BUFFER_SIZE; memcpy((char*)&m, buffer, sizeof(message_header)); - FC_ASSERT( m.size.value() <= MAX_MESSAGE_SIZE, "", ("m.size",m.size.value())("MAX_MESSAGE_SIZE",MAX_MESSAGE_SIZE) ); + FC_ASSERT( m.size.value() <= MAX_MESSAGE_SIZE, "", + ("m.size",m.size.value())("MAX_MESSAGE_SIZE",MAX_MESSAGE_SIZE) ); size_t remaining_bytes_with_padding = 16 * ((m.size.value() - LEFTOVER + 15) / 16); m.data.resize(LEFTOVER + remaining_bytes_with_padding); //give extra 16 bytes to allow for padding added in send call std::copy(buffer + sizeof(message_header), buffer + sizeof(buffer), m.data.begin()); if (remaining_bytes_with_padding) { - _sock.read(&m.data[LEFTOVER], remaining_bytes_with_padding); - _bytes_received += remaining_bytes_with_padding; + _conn->_sock.read(&m.data[LEFTOVER], remaining_bytes_with_padding); + _conn->_bytes_received += remaining_bytes_with_padding; } m.data.resize(m.size.value()); // truncate off the padding bytes - _last_message_received_time = fc::time_point::now(); + _conn->_last_message_received_time = fc::time_point::now(); try { // message handling errors are warnings... - _delegate->on_message(_self, m); + _conn->_delegate->on_message(_conn->_self, m); } /// Dedicated catches needed to distinguish from general fc::exception catch ( const fc::canceled_exception& e ) { throw; } @@ -214,7 +227,8 @@ namespace graphene { namespace net { } catch ( const fc::canceled_exception& e ) { - wlog( "caught a canceled_exception in read_loop. this should mean we're in the process of deleting this object already, so there's no need to notify the delegate: ${e}", ("e", e.to_detail_string() ) ); + wlog( "caught a canceled_exception in read_loop. this should mean we're in the process of deleting this object already, so there's no need to notify the delegate: ${e}", + ("e", e.to_detail_string() ) ); throw; } catch ( const fc::eof_exception& e ) @@ -226,23 +240,26 @@ namespace graphene { namespace net { { elog( "disconnected ${er}", ("er", e.to_detail_string() ) ); call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", e.to_detail_string()))); + exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE( warn, "disconnected: ${e}", + ("e", e.to_detail_string()))); } catch ( const std::exception& e ) { elog( "disconnected ${er}", ("er", e.what() ) ); call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", e.what()))); + exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE( warn, "disconnected: ${e}", + ("e", e.what()))); } catch ( ... ) { elog( "unexpected exception" ); call_on_connection_closed = true; - exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE(warn, "disconnected: ${e}", ("e", fc::except_str()))); + exception_to_rethrow = fc::unhandled_exception(FC_LOG_MESSAGE( warn, "disconnected: ${e}", + ("e", fc::except_str()))); } if (call_on_connection_closed) - _delegate->on_connection_closed(_self); + _conn->_delegate->on_connection_closed(_conn->_self); if (exception_to_rethrow) throw *exception_to_rethrow; @@ -264,7 +281,12 @@ namespace graphene { namespace net { #endif #endif no_parallel_execution_guard guard( &_send_message_in_progress ); - _ready_for_sending->wait(); + + { + std::unique_lock lock(_mtx); + _cv.wait( lock, [this] () { return _ready_for_sending || _destroyed; } ); + } + FC_ASSERT( !_destroyed, "Connection was closed!" ); try { @@ -308,7 +330,8 @@ namespace graphene { namespace net { try { - _read_loop_done.cancel_and_wait(__FUNCTION__); + _read_loop.cancel(); + _read_loop.wait(); } catch ( const fc::exception& e ) { @@ -318,7 +341,8 @@ namespace graphene { namespace net { { wlog( "Exception thrown while canceling message_oriented_connection's read_loop, ignoring" ); } - _ready_for_sending->set_exception( std::make_shared() ); + _destroyed = true; + _cv.notify_all(); } uint64_t message_oriented_connection_impl::get_total_bytes_sent() const diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 48ad5721e3..73738c6d1e 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -85,6 +85,7 @@ #include #include +#include //#define ENABLE_DEBUG_ULOGS @@ -2558,7 +2559,7 @@ namespace graphene { namespace net { namespace detail { bool is_fork_block = is_hard_fork_block(block_message_to_send.block.block_num()); for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections + //ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections bool disconnecting_this_peer = false; if (is_fork_block) { @@ -2631,7 +2632,7 @@ namespace graphene { namespace net { namespace detail { // invalid message received for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections + //ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections if (peer->ids_of_items_being_processed.find(block_message_to_send.block_id) != peer->ids_of_items_being_processed.end()) @@ -2668,40 +2669,38 @@ namespace graphene { namespace net { namespace detail { dlog("Leaving send_sync_block_to_node_delegate"); - if (// _suspend_fetching_sync_blocks && <-- you can use this if "maximum_number_of_blocks_to_handle_at_one_time" == "maximum_number_of_sync_blocks_to_prefetch" - !_node_is_shutting_down && - (!_process_backlog_of_sync_blocks_done.valid() || _process_backlog_of_sync_blocks_done.ready())) - _process_backlog_of_sync_blocks_done = fc::async([=](){ process_backlog_of_sync_blocks(); }, - "process_backlog_of_sync_blocks"); + if ( !_node_is_shutting_down ) + _process_backlog_of_sync_blocks.trigger(); } void process_backlog_of_sync_blocks_task::run() { VERIFY_CORRECT_THREAD(); // garbage-collect the list of async tasks here for lack of a better place - for (auto calls_iter = _handle_message_calls_in_progress.begin(); - calls_iter != _handle_message_calls_in_progress.end();) + for (auto calls_iter = _node->_handle_message_calls_in_progress.begin(); + calls_iter != _node->_handle_message_calls_in_progress.end();) { - if (calls_iter->ready()) - calls_iter = _handle_message_calls_in_progress.erase(calls_iter); + if (calls_iter->wait_for(std::chrono::seconds(0)) == boost::fibers::future_status::ready) + calls_iter = _node->_handle_message_calls_in_progress.erase(calls_iter); else ++calls_iter; } dlog("in process_backlog_of_sync_blocks"); - if (_handle_message_calls_in_progress.size() >= _maximum_number_of_blocks_to_handle_at_one_time) + if (_node->_handle_message_calls_in_progress.size() >= _node->_maximum_number_of_blocks_to_handle_at_one_time) { dlog("leaving process_backlog_of_sync_blocks because we're already processing too many blocks"); return; // we will be rescheduled when the next block finishes its processing } - dlog("currently ${count} blocks in the process of being handled", ("count", _handle_message_calls_in_progress.size())); + dlog("currently ${count} blocks in the process of being handled", + ("count", _node->_handle_message_calls_in_progress.size())); - if (_suspend_fetching_sync_blocks) + if (_node->_suspend_fetching_sync_blocks) { dlog("resuming processing sync block backlog because we only ${count} blocks in progress", - ("count", _handle_message_calls_in_progress.size())); - _suspend_fetching_sync_blocks = false; + ("count", _node->_handle_message_calls_in_progress.size())); + _node->_suspend_fetching_sync_blocks = false; } @@ -2720,23 +2719,23 @@ namespace graphene { namespace net { namespace detail { do { - std::copy(std::make_move_iterator(_new_received_sync_items.begin()), - std::make_move_iterator(_new_received_sync_items.end()), - std::front_inserter(_received_sync_items)); - _new_received_sync_items.clear(); - dlog("currently ${count} sync items to consider", ("count", _received_sync_items.size())); + std::copy(std::make_move_iterator(_node->_new_received_sync_items.begin()), + std::make_move_iterator(_node->_new_received_sync_items.end()), + std::front_inserter(_node->_received_sync_items)); + _node->_new_received_sync_items.clear(); + dlog("currently ${count} sync items to consider", ("count", _node->_received_sync_items.size())); block_processed_this_iteration = false; - for (auto received_block_iter = _received_sync_items.begin(); - received_block_iter != _received_sync_items.end(); + for (auto received_block_iter = _node->_received_sync_items.begin(); + received_block_iter != _node->_received_sync_items.end(); ++received_block_iter) { // find out if this block is the next block on the active chain or one of the forks bool potential_first_block = false; - for (const peer_connection_ptr& peer : _active_connections) + for (const peer_connection_ptr& peer : _node->_active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections + //ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections if (!peer->ids_of_items_to_get.empty() && peer->ids_of_items_to_get.front() == received_block_iter->block_id) { @@ -2755,14 +2754,14 @@ namespace graphene { namespace net { namespace detail { // block through the sync mechanism. Further, we must request both blocks because // we don't know they're the same (for the peer in normal operation, it has only told us the // message id, for the peer in the sync case we only known the block_id). - if (std::find(_most_recent_blocks_accepted.begin(), _most_recent_blocks_accepted.end(), - received_block_iter->block_id) == _most_recent_blocks_accepted.end()) + if (std::find(_node->_most_recent_blocks_accepted.begin(), _node->_most_recent_blocks_accepted.end(), + received_block_iter->block_id) == _node->_most_recent_blocks_accepted.end()) { graphene::net::block_message block_message_to_process = *received_block_iter; - _received_sync_items.erase(received_block_iter); - _handle_message_calls_in_progress.emplace_back(fc::async([this, block_message_to_process](){ - send_sync_block_to_node_delegate(block_message_to_process); - }, "send_sync_block_to_node_delegate")); + _node->_received_sync_items.erase(received_block_iter); + _node->_handle_message_calls_in_progress.emplace_back(fc::async([this, block_message_to_process](){ + _node->send_sync_block_to_node_delegate(block_message_to_process); + }, std::this_thread::get_id(), "send_sync_block_to_node_delegate")); ++blocks_processed; block_processed_this_iteration = true; } @@ -2770,7 +2769,7 @@ namespace graphene { namespace net { namespace detail { { dlog("Already received and accepted this block (presumably through normal inventory mechanism), treating it as accepted"); std::vector< peer_connection_ptr > peers_needing_next_batch; - for (const peer_connection_ptr& peer : _active_connections) + for (const peer_connection_ptr& peer : _node->_active_connections) { auto items_being_processed_iter = peer->ids_of_items_being_processed.find(received_block_iter->block_id); if (items_being_processed_iter != peer->ids_of_items_being_processed.end()) @@ -2786,42 +2785,42 @@ namespace graphene { namespace net { namespace detail { peer->number_of_unfetched_item_ids == 0 && peer->ids_of_items_being_processed.empty()) { - dlog("We received last item in our list for peer ${endpoint}, setup to do a sync check", ("endpoint", peer->get_remote_endpoint())); + dlog("We received last item in our list for peer ${endpoint}, setup to do a sync check", + ("endpoint", peer->get_remote_endpoint())); peers_needing_next_batch.push_back( peer ); } } } for( const peer_connection_ptr& peer : peers_needing_next_batch ) - fetch_next_batch_of_item_ids_from_peer(peer.get()); + _node->fetch_next_batch_of_item_ids_from_peer(peer.get()); } break; // start iterating _received_sync_items from the beginning } // end if potential_first_block } // end for each block in _received_sync_items - if (_handle_message_calls_in_progress.size() >= _maximum_number_of_blocks_to_handle_at_one_time) + if (_node->_handle_message_calls_in_progress.size() >= _node->_maximum_number_of_blocks_to_handle_at_one_time) { dlog("stopping processing sync block backlog because we have ${count} blocks in progress", - ("count", _handle_message_calls_in_progress.size())); + ("count", _node->_handle_message_calls_in_progress.size())); //ulog("stopping processing sync block backlog because we have ${count} blocks in progress, total on hand: ${received}", // ("count", _handle_message_calls_in_progress.size())("received", _received_sync_items.size())); - if (_received_sync_items.size() >= _maximum_number_of_sync_blocks_to_prefetch) - _suspend_fetching_sync_blocks = true; + if (_node->_received_sync_items.size() >= _node->_maximum_number_of_sync_blocks_to_prefetch) + _node->_suspend_fetching_sync_blocks = true; break; } } while (block_processed_this_iteration); dlog("leaving process_backlog_of_sync_blocks, ${count} processed", ("count", blocks_processed)); - if (!_suspend_fetching_sync_blocks) - trigger_fetch_sync_items_loop(); + if (!_node->_suspend_fetching_sync_blocks) + _node->trigger_fetch_sync_items_loop(); } void node_impl::trigger_process_backlog_of_sync_blocks() { - if (!_node_is_shutting_down && - (!_process_backlog_of_sync_blocks_done.valid() || _process_backlog_of_sync_blocks_done.ready())) - _process_backlog_of_sync_blocks_done = fc::async([=](){ process_backlog_of_sync_blocks(); }, "process_backlog_of_sync_blocks"); + if (!_node_is_shutting_down) + _process_backlog_of_sync_blocks.trigger(); } void node_impl::process_block_during_sync( peer_connection* originating_peer, @@ -2896,7 +2895,7 @@ namespace graphene { namespace net { namespace detail { for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections + //ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections auto iter = peer->inventory_peer_advertised_to_us.find(block_message_item_id); if (iter != peer->inventory_peer_advertised_to_us.end()) @@ -3288,7 +3287,7 @@ namespace graphene { namespace net { namespace detail { fc::time_point now = fc::time_point::now(); for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections + //ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections current_connection_data data_for_this_peer; data_for_this_peer.connection_duration = now.sec_since_epoch() - peer->connection_initiation_time.sec_since_epoch(); @@ -3482,7 +3481,8 @@ namespace graphene { namespace net { namespace detail { try { - _accept_loop_complete.cancel_and_wait("node_impl::close()"); + _accept_loop.cancel(); + _accept_loop.wait(); dlog("P2P accept loop terminated"); } catch ( const fc::exception& e ) @@ -3497,10 +3497,8 @@ namespace graphene { namespace net { namespace detail { // terminate all of our long-running loops (these run continuously instead of rescheduling themselves) try { - _p2p_network_connect_loop_done.cancel("node_impl::close()"); - // cancel() is currently broken, so we need to wake up the task to allow it to finish - trigger_p2p_network_connect_loop(); - _p2p_network_connect_loop_done.wait(); + _p2p_network_connect_loop.cancel(); + _p2p_network_connect_loop.wait(); dlog("P2P connect loop terminated"); } catch ( const fc::canceled_exception& ) @@ -3518,7 +3516,8 @@ namespace graphene { namespace net { namespace detail { try { - _process_backlog_of_sync_blocks_done.cancel_and_wait("node_impl::close()"); + _process_backlog_of_sync_blocks.cancel(); + _process_backlog_of_sync_blocks.wait(); dlog("Process backlog of sync items task terminated"); } catch ( const fc::canceled_exception& ) @@ -3540,12 +3539,15 @@ namespace graphene { namespace net { namespace detail { auto it = _handle_message_calls_in_progress.begin(); if( it == _handle_message_calls_in_progress.end() ) break; - if( it->ready() || it->error() || it->canceled() ) + if( it->wait_for(std::chrono::seconds(0)) == boost::fibers::future_status::ready ) { _handle_message_calls_in_progress.erase( it ); continue; } ++handle_message_call_count; + // FIXME: can't cancel message handlers + boost::this_fiber::yield(); + /* try { it->cancel_and_wait("node_impl::close()"); @@ -3563,14 +3565,13 @@ namespace graphene { namespace net { namespace detail { { wlog("Exception thrown while terminating handle_message call #${count} task, ignoring",("count", handle_message_call_count)); } + */ } try { - _fetch_sync_items_loop_done.cancel("node_impl::close()"); - // cancel() is currently broken, so we need to wake up the task to allow it to finish - trigger_fetch_sync_items_loop(); - _fetch_sync_items_loop_done.wait(); + _fetch_sync_items_loop.cancel(); + _fetch_sync_items_loop.wait(); dlog("Fetch sync items loop terminated"); } catch ( const fc::canceled_exception& ) @@ -3588,10 +3589,8 @@ namespace graphene { namespace net { namespace detail { try { - _fetch_item_loop_done.cancel("node_impl::close()"); - // cancel() is currently broken, so we need to wake up the task to allow it to finish - trigger_fetch_items_loop(); - _fetch_item_loop_done.wait(); + _fetch_item_loop.cancel(); + _fetch_item_loop.wait(); dlog("Fetch items loop terminated"); } catch ( const fc::canceled_exception& ) @@ -3609,10 +3608,8 @@ namespace graphene { namespace net { namespace detail { try { - _advertise_inventory_loop_done.cancel("node_impl::close()"); - // cancel() is currently broken, so we need to wake up the task to allow it to finish - trigger_advertise_inventory_loop(); - _advertise_inventory_loop_done.wait(); + _advertise_inventory_loop.cancel(); + _advertise_inventory_loop.wait(); dlog("Advertise inventory loop terminated"); } catch ( const fc::canceled_exception& ) @@ -3667,7 +3664,8 @@ namespace graphene { namespace net { namespace detail { #endif try { - _delayed_peer_deletion_task_done.cancel_and_wait("node_impl::close()"); + _delayed_peer_deletion_task.cancel(); + _delayed_peer_deletion_task.wait(); dlog("Delayed peer deletion task terminated"); } catch ( const fc::exception& e ) @@ -3686,7 +3684,8 @@ namespace graphene { namespace net { namespace detail { // our loops now try { - _terminate_inactive_connections_loop_done.cancel_and_wait("node_impl::close()"); + _terminate_inactive_connections_loop.cancel(); + _terminate_inactive_connections_loop.wait(); dlog("Terminate inactive connections loop terminated"); } catch ( const fc::exception& e ) @@ -3700,7 +3699,8 @@ namespace graphene { namespace net { namespace detail { try { - _fetch_updated_peer_lists_loop_done.cancel_and_wait("node_impl::close()"); + _fetch_updated_peer_lists_loop.cancel(); + _fetch_updated_peer_lists_loop.wait(); dlog("Fetch updated peer lists loop terminated"); } catch ( const fc::exception& e ) @@ -3714,7 +3714,8 @@ namespace graphene { namespace net { namespace detail { try { - _bandwidth_monitor_loop_done.cancel_and_wait("node_impl::close()"); + _bandwidth_monitor_loop.cancel(); + _bandwidth_monitor_loop.wait(); dlog("Bandwidth monitor loop terminated"); } catch ( const fc::exception& e ) @@ -3728,7 +3729,8 @@ namespace graphene { namespace net { namespace detail { try { - _dump_node_status_task_done.cancel_and_wait("node_impl::close()"); + _dump_node_status_task.cancel(); + _dump_node_status_task.wait(); dlog("Dump node status task terminated"); } catch ( const fc::exception& e ) @@ -3754,25 +3756,26 @@ namespace graphene { namespace net { namespace detail { while ( true ) { check_cancelled(); - peer_connection_ptr new_peer(peer_connection::make_shared(this)); + peer_connection_ptr new_peer(peer_connection::make_shared(_node)); try { - _tcp_server.accept( new_peer->get_socket() ); - ilog( "accepted inbound connection from ${remote_endpoint}", ("remote_endpoint", new_peer->get_socket().remote_endpoint() ) ); - if (_node_is_shutting_down) + _node->_tcp_server.accept( new_peer->get_socket() ); + ilog( "accepted inbound connection from ${remote_endpoint}", + ("remote_endpoint", new_peer->get_socket().remote_endpoint() ) ); + if (_node->_node_is_shutting_down) return; new_peer->connection_initiation_time = fc::time_point::now(); - _handshaking_connections.insert( new_peer ); - _rate_limiter.add_tcp_socket( &new_peer->get_socket() ); + _node->_handshaking_connections.insert( new_peer ); + _node->_rate_limiter.add_tcp_socket( &new_peer->get_socket() ); std::weak_ptr new_weak_peer(new_peer); new_peer->accept_or_connect_task_done = fc::async( [this, new_weak_peer]() { peer_connection_ptr new_peer(new_weak_peer.lock()); assert(new_peer); if (!new_peer) return; - accept_connection_task(new_peer); - }, "accept_connection_task" ); + _node->accept_connection_task(new_peer); + }, std::this_thread::get_id(), "accept_connection_task" ); // limit the rate at which we accept connections to mitigate DOS attacks sleep( std::chrono::milliseconds(10) ); @@ -3932,7 +3935,7 @@ namespace graphene { namespace net { namespace detail { } // methods implementing node's public interface - void node_impl::set_node_delegate(node_delegate* del, fc::thread* thread_for_delegate_calls) + void node_impl::set_node_delegate(node_delegate* del, std::thread::id thread_for_delegate_calls) { VERIFY_CORRECT_THREAD(); _delegate.reset(); @@ -4076,7 +4079,7 @@ namespace graphene { namespace net { namespace detail { wlog(error_message); std::cout << "\033[31m" << error_message; _delegate->error_encountered( error_message, fc::oexception() ); - fc::usleep( fc::seconds(5 ) ); + boost::this_fiber::sleep_for( std::chrono::seconds(5 ) ); } else // don't wait, just find a random port { @@ -4115,25 +4118,16 @@ namespace graphene { namespace net { namespace detail { VERIFY_CORRECT_THREAD(); assert(_node_public_key != fc::ecc::public_key_data()); - assert(!_accept_loop_complete.valid() && - !_p2p_network_connect_loop_done.valid() && - !_fetch_sync_items_loop_done.valid() && - !_fetch_item_loop_done.valid() && - !_advertise_inventory_loop_done.valid() && - !_terminate_inactive_connections_loop_done.valid() && - !_fetch_updated_peer_lists_loop_done.valid() && - !_bandwidth_monitor_loop_done.valid() && - !_dump_node_status_task_done.valid()); if (_node_configuration.accept_incoming_connections) - _accept_loop_complete = fc::async( [=](){ accept_loop(); }, "accept_loop"); - _p2p_network_connect_loop_done = fc::async( [=]() { p2p_network_connect_loop(); }, "p2p_network_connect_loop" ); - _fetch_sync_items_loop_done = fc::async( [=]() { fetch_sync_items_loop(); }, "fetch_sync_items_loop" ); - _fetch_item_loop_done = fc::async( [=]() { fetch_items_loop(); }, "fetch_items_loop" ); - _advertise_inventory_loop_done = fc::async( [=]() { advertise_inventory_loop(); }, "advertise_inventory_loop" ); - _terminate_inactive_connections_loop_done = fc::async( [=]() { terminate_inactive_connections_loop(); }, "terminate_inactive_connections_loop" ); - _fetch_updated_peer_lists_loop_done = fc::async([=](){ fetch_updated_peer_lists_loop(); }, "fetch_updated_peer_lists_loop"); - _bandwidth_monitor_loop_done = fc::async([=](){ bandwidth_monitor_loop(); }, "bandwidth_monitor_loop"); - _dump_node_status_task_done = fc::async([=](){ dump_node_status_task(); }, "dump_node_status_task"); + _accept_loop.trigger(); + _p2p_network_connect_loop.trigger(); + _fetch_sync_items_loop.trigger(); + _fetch_item_loop.trigger(); + _advertise_inventory_loop.trigger(); + _terminate_inactive_connections_loop.trigger(); + _fetch_updated_peer_lists_loop.trigger(); + _bandwidth_monitor_loop.trigger(); + _dump_node_status_task.trigger(); } void node_impl::add_node(const fc::ip::endpoint& ep) @@ -4169,7 +4163,7 @@ namespace graphene { namespace net { namespace detail { if (!new_peer) return; connect_to_task(new_peer, *new_peer->get_remote_endpoint()); - }, "connect_to_task"); + }, std::this_thread::get_id(), "connect_to_task"); } void node_impl::connect_to_endpoint(const fc::ip::endpoint& remote_endpoint) @@ -4366,7 +4360,7 @@ namespace graphene { namespace net { namespace detail { std::vector statuses; for (const peer_connection_ptr& peer : _active_connections) { - ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections + //ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections peer_status this_peer_status; this_peer_status.version = 0; @@ -4648,8 +4642,8 @@ namespace graphene { namespace net { namespace detail { #ifdef P2P_IN_DEDICATED_THREAD # define INVOKE_IN_IMPL(method_name, ...) \ - return fc::async([&](){ return my->method_name(__VA_ARGS__); }, - my->_thread.get_id(), "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).wait() + return fc::async([&](){ return my->method_name(__VA_ARGS__); }, \ + my->_thread.get_id(), "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).get() #else # define INVOKE_IN_IMPL(method_name, ...) \ return my->method_name(__VA_ARGS__) @@ -4968,7 +4962,7 @@ namespace graphene { namespace net { namespace detail { uint32_t statistics_gathering_node_delegate_wrapper::get_block_number(const item_hash_t& block_id) { // this function doesn't need to block, - ASSERT_TASK_NOT_PREEMPTED(); + //ASSERT_TASK_NOT_PREEMPTED(); return _node_delegate->get_block_number(block_id); } diff --git a/libraries/net/peer_connection.cpp b/libraries/net/peer_connection.cpp index 4092658f00..cc086525a5 100644 --- a/libraries/net/peer_connection.cpp +++ b/libraries/net/peer_connection.cpp @@ -76,6 +76,7 @@ namespace graphene { namespace net _node(delegate), _message_connection(this), _total_queued_messages_size(0), + _send_queued_messages(*this), direction(peer_connection_direction::unknown), is_firewalled(firewalled_state::unknown), our_state(our_connection_state::disconnected), @@ -134,7 +135,8 @@ namespace graphene { namespace net try { dlog("canceling _send_queued_messages task"); - _send_queued_messages_done.cancel_and_wait(__FUNCTION__); + _send_queued_messages.cancel(); + _send_queued_messages.wait(); dlog("cancel_and_wait completed normally"); } catch( const fc::exception& e ) @@ -146,21 +148,6 @@ namespace graphene { namespace net wlog("Unexpected exception from peer_connection's send_queued_messages_task"); } - try - { - dlog("canceling accept_or_connect_task"); - accept_or_connect_task_done.cancel_and_wait(__FUNCTION__); - dlog("accept_or_connect_task completed normally"); - } - catch( const fc::exception& e ) - { - wlog("Unexpected exception from peer_connection's accept_or_connect_task : ${e}", ("e", e)); - } - catch( ... ) - { - wlog("Unexpected exception from peer_connection's accept_or_connect_task"); - } - _message_connection.destroy_connection(); // shut down the read loop } @@ -273,7 +260,7 @@ namespace graphene { namespace net _node->on_connection_closed( this ); } - void peer_connection::send_queued_messages_task() + void detail::send_queued_messages_task::run() { VERIFY_CORRECT_THREAD(); #ifndef NDEBUG @@ -283,16 +270,16 @@ namespace graphene { namespace net ~counter() { assert(_send_message_queue_tasks_counter == 1); --_send_message_queue_tasks_counter; /* dlog("leaving peer_connection::send_queued_messages_task()"); */ } } concurrent_invocation_counter(_send_message_queue_tasks_running); #endif - while (!_queued_messages.empty()) + while (!_conn->_queued_messages.empty()) { - _queued_messages.front()->transmission_start_time = fc::time_point::now(); - message message_to_send = _queued_messages.front()->get_message(_node); + _conn->_queued_messages.front()->transmission_start_time = fc::time_point::now(); + message message_to_send = _conn->_queued_messages.front()->get_message(_conn->_node); try { //dlog("peer_connection::send_queued_messages_task() calling message_oriented_connection::send_message() " // "to send message of type ${type} for peer ${endpoint}", // ("type", message_to_send.msg_type)("endpoint", get_remote_endpoint())); - _message_connection.send_message(message_to_send); + _conn->_message_connection.send_message(message_to_send); //dlog("peer_connection::send_queued_messages_task()'s call to message_oriented_connection::send_message() completed normally for peer ${endpoint}", // ("endpoint", get_remote_endpoint())); } @@ -306,7 +293,7 @@ namespace graphene { namespace net wlog("Error sending message: ${exception}. Closing connection.", ("exception", send_error)); try { - close_connection(); + _conn->close_connection(); } catch (const fc::exception& close_error) { @@ -322,11 +309,10 @@ namespace graphene { namespace net { wlog("message_oriented_exception::send_message() threw an unhandled exception"); } - _queued_messages.front()->transmission_finish_time = fc::time_point::now(); - _total_queued_messages_size -= _queued_messages.front()->get_size_in_queue(); - _queued_messages.pop(); + _conn->_queued_messages.front()->transmission_finish_time = fc::time_point::now(); + _conn->_total_queued_messages_size -= _conn->_queued_messages.front()->get_size_in_queue(); + _conn->_queued_messages.pop(); } - //dlog("leaving peer_connection::send_queued_messages_task() due to queue exhaustion"); } void peer_connection::send_queueable_message(std::unique_ptr&& message_to_send) @@ -349,16 +335,7 @@ namespace graphene { namespace net return; } - if( _send_queued_messages_done.valid() && _send_queued_messages_done.canceled() ) - FC_THROW_EXCEPTION(fc::exception, "Attempting to send a message on a connection that is being shut down"); - - if (!_send_queued_messages_done.valid() || _send_queued_messages_done.ready()) - { - //dlog("peer_connection::send_message() is firing up send_queued_message_task"); - _send_queued_messages_done = fc::async([this](){ send_queued_messages_task(); }, "send_queued_messages_task"); - } - //else - // dlog("peer_connection::send_message() doesn't need to fire up send_queued_message_task, it's already running"); + _send_queued_messages.trigger(); } void peer_connection::send_message(const message& message_to_send, size_t message_send_time_field_offset) From 0f9fed4de123b43242fbaaed8d79679dbc2d893e Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 11 Dec 2019 17:01:41 +0100 Subject: [PATCH 07/14] witness_node and cli_wallet building --- libraries/app/api.cpp | 20 +++++------ libraries/app/database_api.cpp | 1 + .../app/include/graphene/app/application.hpp | 2 -- .../account_history_plugin.cpp | 2 -- .../account_history_plugin.hpp | 2 -- .../plugins/debug_witness/debug_witness.cpp | 2 -- .../graphene/debug_witness/debug_witness.hpp | 1 - .../delayed_node/delayed_node_plugin.cpp | 9 +++-- .../elasticsearch/elasticsearch_plugin.cpp | 17 ++++++++++ .../elasticsearch/elasticsearch_plugin.hpp | 9 +++++ .../market_history/market_history_plugin.hpp | 1 - .../market_history/market_history_plugin.cpp | 2 -- .../include/graphene/witness/witness.hpp | 21 +++++++++--- libraries/plugins/witness/witness.cpp | 34 ++++++++++--------- libraries/wallet/wallet.cpp | 15 ++++---- programs/cli_wallet/main.cpp | 14 ++++---- programs/witness_node/main.cpp | 12 ++++--- 17 files changed, 102 insertions(+), 62 deletions(-) diff --git a/libraries/app/api.cpp b/libraries/app/api.cpp index 99564106a2..b4b8b057ee 100644 --- a/libraries/app/api.cpp +++ b/libraries/app/api.cpp @@ -39,7 +39,9 @@ #include #include #include -#include +#include + +#include template class fc::api; template class fc::api; @@ -187,12 +189,13 @@ namespace graphene { namespace app { fc::variant network_broadcast_api::broadcast_transaction_synchronous(const precomputable_transaction& trx) { - fc::promise::ptr prom = fc::promise::create(); - broadcast_transaction_with_callback( [prom]( const fc::variant& v ){ - prom->set_value(v); + boost::fibers::promise prom; + boost::fibers::future result = prom.get_future(); + broadcast_transaction_with_callback( [&prom]( const fc::variant& v ) { + prom.set_value(v); }, trx ); - return fc::future(prom).wait(); + return result.get(); } void network_broadcast_api::broadcast_block( const signed_block& b ) @@ -356,12 +359,7 @@ namespace graphene { namespace app { if(_app.is_plugin_enabled("elasticsearch")) { auto es = _app.get_plugin("elasticsearch"); if(es.get()->get_running_mode() != elasticsearch::mode::only_save) { - if(!_app.elasticsearch_thread) - _app.elasticsearch_thread= std::make_shared("elasticsearch"); - - return _app.elasticsearch_thread->async([&es, &account, &stop, &limit, &start]() { - return es->get_account_history(account, stop, limit, start); - }, "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).wait(); + return es->get_account_history(account, stop, limit, start); } } diff --git a/libraries/app/database_api.cpp b/libraries/app/database_api.cpp index adbe87fc7f..117379fd7f 100644 --- a/libraries/app/database_api.cpp +++ b/libraries/app/database_api.cpp @@ -31,6 +31,7 @@ #include #include +#include #include diff --git a/libraries/app/include/graphene/app/application.hpp b/libraries/app/include/graphene/app/application.hpp index b9c37fb6c6..1b054798ce 100644 --- a/libraries/app/include/graphene/app/application.hpp +++ b/libraries/app/include/graphene/app/application.hpp @@ -139,8 +139,6 @@ namespace graphene { namespace app { bool is_plugin_enabled(const string& name) const; - std::shared_ptr elasticsearch_thread; - private: void add_available_plugin( std::shared_ptr p ); std::shared_ptr my; diff --git a/libraries/plugins/account_history/account_history_plugin.cpp b/libraries/plugins/account_history/account_history_plugin.cpp index c3d0826077..289260fd1a 100644 --- a/libraries/plugins/account_history/account_history_plugin.cpp +++ b/libraries/plugins/account_history/account_history_plugin.cpp @@ -34,8 +34,6 @@ #include #include -#include - namespace graphene { namespace account_history { namespace detail diff --git a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp index 99492768ce..41d1631a94 100644 --- a/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp +++ b/libraries/plugins/account_history/include/graphene/account_history/account_history_plugin.hpp @@ -28,8 +28,6 @@ #include -#include - namespace graphene { namespace account_history { using namespace chain; //using namespace graphene::db; diff --git a/libraries/plugins/debug_witness/debug_witness.cpp b/libraries/plugins/debug_witness/debug_witness.cpp index 7268006d3b..296abbbfd3 100644 --- a/libraries/plugins/debug_witness/debug_witness.cpp +++ b/libraries/plugins/debug_witness/debug_witness.cpp @@ -28,8 +28,6 @@ #include -#include - #include using namespace graphene::debug_witness_plugin; diff --git a/libraries/plugins/debug_witness/include/graphene/debug_witness/debug_witness.hpp b/libraries/plugins/debug_witness/include/graphene/debug_witness/debug_witness.hpp index 4b5369211a..a1dd7730c7 100644 --- a/libraries/plugins/debug_witness/include/graphene/debug_witness/debug_witness.hpp +++ b/libraries/plugins/debug_witness/include/graphene/debug_witness/debug_witness.hpp @@ -27,7 +27,6 @@ #include #include -#include #include namespace graphene { namespace debug_witness_plugin { diff --git a/libraries/plugins/delayed_node/delayed_node_plugin.cpp b/libraries/plugins/delayed_node/delayed_node_plugin.cpp index 01f4e48b31..e75d42245c 100644 --- a/libraries/plugins/delayed_node/delayed_node_plugin.cpp +++ b/libraries/plugins/delayed_node/delayed_node_plugin.cpp @@ -30,6 +30,11 @@ #include #include #include +#include + +#include + +#include namespace graphene { namespace delayed_node { namespace bpo = boost::program_options; @@ -119,7 +124,7 @@ void delayed_node_plugin::mainloop() { try { - fc::usleep( fc::microseconds( 296645 ) ); // wake up a little over 3Hz + boost::this_fiber::sleep_for( std::chrono::microseconds( 296645 ) ); // wake up a little over 3Hz if( my->last_received_remote_head == my->last_processed_remote_head ) continue; @@ -160,7 +165,7 @@ void delayed_node_plugin::plugin_startup() void delayed_node_plugin::connection_failed() { elog("Connection to trusted node failed; retrying in 5 seconds..."); - fc::schedule([this]{connect();}, fc::time_point::now() + fc::seconds(5)); + fc::schedule([this]{connect();}, std::chrono::system_clock::now() + std::chrono::seconds(5)); } } } diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp index 92a5f41f19..f92926191d 100644 --- a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -25,6 +25,9 @@ #include #include #include + +#include + #include namespace graphene { namespace elasticsearch { @@ -407,10 +410,21 @@ void elasticsearch_plugin_impl::populateESstruct() elasticsearch_plugin::elasticsearch_plugin() : my( new detail::elasticsearch_plugin_impl(*this) ) { + _elasticsearch_thread = std::thread( [this] () { + fc::initialize_fibers(); + std::unique_lock lock(_mtx); + _cv.wait( lock, [this] () { return _shutting_down; }); + }); } elasticsearch_plugin::~elasticsearch_plugin() { + { + std::unique_lock lock(_mtx); + _shutting_down = true; + _cv.notify_all(); + } + _elasticsearch_thread.join(); } std::string elasticsearch_plugin::plugin_name()const @@ -544,6 +558,8 @@ vector elasticsearch_plugin::get_account_history( unsigned limit = 100, operation_history_id_type start = operation_history_id_type()) { + return fc::async([this, account_id, stop, limit, start]() { + const string account_id_string = std::string(object_id_type(account_id)); const auto stop_number = stop.instance.value; @@ -592,6 +608,7 @@ vector elasticsearch_plugin::get_account_history( result.push_back(fromEStoOperation(source)); } return result; + }, _elasticsearch_thread.get_id(), "thread invoke for method " BOOST_PP_STRINGIZE(method_name)).get(); } operation_history_object elasticsearch_plugin::fromEStoOperation(variant source) diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp index 7b08e1d73f..0bccc375fd 100644 --- a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -28,6 +28,10 @@ #include #include +#include + +#include + namespace graphene { namespace elasticsearch { using namespace chain; @@ -77,6 +81,11 @@ class elasticsearch_plugin : public graphene::app::plugin private: operation_history_object fromEStoOperation(variant source); graphene::utilities::ES prepareHistoryQuery(string query); + + std::thread _elasticsearch_thread; + boost::fibers::mutex _mtx; + boost::fibers::condition_variable _cv; + bool _shutting_down; }; diff --git a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp index 51eca0d857..70e06d15c1 100644 --- a/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp +++ b/libraries/plugins/market_history/include/graphene/market_history/market_history_plugin.hpp @@ -26,7 +26,6 @@ #include #include -#include #include #include diff --git a/libraries/plugins/market_history/market_history_plugin.cpp b/libraries/plugins/market_history/market_history_plugin.cpp index 6b4a3558c4..cc05fa923e 100644 --- a/libraries/plugins/market_history/market_history_plugin.cpp +++ b/libraries/plugins/market_history/market_history_plugin.cpp @@ -33,8 +33,6 @@ #include #include -#include - namespace graphene { namespace market_history { namespace detail diff --git a/libraries/plugins/witness/include/graphene/witness/witness.hpp b/libraries/plugins/witness/include/graphene/witness/witness.hpp index 8ca09a5b27..c1830f1f78 100644 --- a/libraries/plugins/witness/include/graphene/witness/witness.hpp +++ b/libraries/plugins/witness/include/graphene/witness/witness.hpp @@ -25,8 +25,7 @@ #include #include - -#include +#include namespace graphene { namespace witness_plugin { @@ -46,8 +45,22 @@ namespace block_production_condition }; } +class witness_plugin; + +class block_production_task : public graphene::utilities::recurring_task +{ +public: + block_production_task() : _witness(nullptr) {} + explicit block_production_task( witness_plugin& witness ) : _witness(&witness) {} +protected: + virtual void run(); + + witness_plugin* _witness; +}; + class witness_plugin : public graphene::app::plugin { public: + witness_plugin() : _block_production_task(*this) {} ~witness_plugin() { stop_block_production(); } std::string plugin_name()const override; @@ -68,7 +81,6 @@ class witness_plugin : public graphene::app::plugin { { return _witness_key_cache; } private: - void schedule_production_loop(); block_production_condition::block_production_condition_enum block_production_loop(); block_production_condition::block_production_condition_enum maybe_produce_block( fc::limited_mutable_variant_object& capture ); void add_private_key(const std::string& key_id_to_wif_pair_string); @@ -84,11 +96,12 @@ class witness_plugin : public graphene::app::plugin { std::map _private_keys; std::set _witnesses; - fc::future _block_production_task; + block_production_task _block_production_task; /// For tracking signing keys of specified witnesses, only update when applied a block fc::flat_map< chain::witness_id_type, fc::optional > _witness_key_cache; + friend class block_production_task; }; } } //graphene::witness_plugin diff --git a/libraries/plugins/witness/witness.cpp b/libraries/plugins/witness/witness.cpp index d2609625ab..350b18c3a0 100644 --- a/libraries/plugins/witness/witness.cpp +++ b/libraries/plugins/witness/witness.cpp @@ -28,11 +28,11 @@ #include -#include #include #include +#include #include using namespace graphene::witness_plugin; @@ -186,7 +186,7 @@ void witness_plugin::plugin_startup() { refresh_witness_key_cache(); }); - schedule_production_loop(); + _block_production_task.trigger(); } else { @@ -205,8 +205,8 @@ void witness_plugin::stop_block_production() _shutting_down = true; try { - if( _block_production_task.valid() ) - _block_production_task.cancel_and_wait(__FUNCTION__); + _block_production_task.cancel(); + _block_production_task.wait(); } catch(fc::canceled_exception&) { //Expected exception. Move along. } catch(fc::exception& e) { @@ -227,21 +227,24 @@ void witness_plugin::refresh_witness_key_cache() } } -void witness_plugin::schedule_production_loop() +void block_production_task::run() { - if (_shutting_down) return; + while( true ) + { + check_cancelled(); + if (_witness->_shutting_down) return; - //Schedule for the next second's tick regardless of chain state - // If we would wait less than 50ms, wait for the whole second. - fc::time_point now = fc::time_point::now(); - int64_t time_to_next_second = 1000000 - (now.time_since_epoch().count() % 1000000); - if( time_to_next_second < 50000 ) // we must sleep for at least 50ms - time_to_next_second += 1000000; + _witness->block_production_loop(); - fc::time_point next_wakeup( now + fc::microseconds( time_to_next_second ) ); + //Schedule for the next second's tick regardless of chain state + // If we would wait less than 50ms, wait for the whole second. + fc::time_point now = fc::time_point::now(); + int64_t time_to_next_second = 1000000 - (now.time_since_epoch().count() % 1000000); + if( time_to_next_second < 50000 ) // we must sleep for at least 50ms + time_to_next_second += 1000000; - _block_production_task = fc::schedule([this]{block_production_loop();}, - next_wakeup, "Witness Block Production"); + sleep( std::chrono::microseconds(time_to_next_second) ); + } } block_production_condition::block_production_condition_enum witness_plugin::block_production_loop() @@ -305,7 +308,6 @@ block_production_condition::block_production_condition_enum witness_plugin::bloc break; } - schedule_production_loop(); return result; } diff --git a/libraries/wallet/wallet.cpp b/libraries/wallet/wallet.cpp index 1c0ecb683e..b2294097b9 100644 --- a/libraries/wallet/wallet.cpp +++ b/libraries/wallet/wallet.cpp @@ -33,7 +33,7 @@ #include #include #include - +#include #include #include #include @@ -51,16 +51,16 @@ #include #include #include +#include #include #include #include #include #include -#include -#include #include #include #include +#include #include #include @@ -83,6 +83,9 @@ # include #endif +#include +#include + // explicit instantiation for later use namespace fc { template class api; @@ -374,10 +377,10 @@ class wallet_api_impl _wallet.pending_witness_registrations.erase(iter); } - fc::mutex _resync_mutex; + boost::fibers::mutex _resync_mutex; void resync() { - fc::scoped_lock lock(_resync_mutex); + std::unique_lock lock(_resync_mutex); // this method is used to update wallet_data annotations // e.g. wallet has been restarted and was not notified // of events while it was down @@ -546,7 +549,7 @@ class wallet_api_impl void on_block_applied( const variant& block_id ) { - fc::async([this]{resync();}, "Resync after block"); + fc::async([this]{resync();}, std::this_thread::get_id(), "Resync after block"); } bool copy_wallet_file( string destination_filename ) diff --git a/programs/cli_wallet/main.cpp b/programs/cli_wallet/main.cpp index abe317cd31..630e86d643 100644 --- a/programs/cli_wallet/main.cpp +++ b/programs/cli_wallet/main.cpp @@ -53,6 +53,8 @@ #include #include +#include + #ifdef WIN32 # include #else @@ -350,30 +352,30 @@ int main( int argc, char** argv ) } else { - fc::promise::ptr exit_promise = fc::promise::create("UNIX Signal Handler"); + boost::fibers::promise exit_promise; fc::set_signal_handler( [&exit_promise](int signal) { ilog( "Captured SIGINT in daemon mode, exiting" ); - exit_promise->set_value(signal); + exit_promise.set_value(signal); }, SIGINT ); fc::set_signal_handler( [&exit_promise](int signal) { ilog( "Captured SIGTERM in daemon mode, exiting" ); - exit_promise->set_value(signal); + exit_promise.set_value(signal); }, SIGTERM ); #ifdef SIGQUIT fc::set_signal_handler( [&exit_promise](int signal) { ilog( "Captured SIGQUIT in daemon mode, exiting" ); - exit_promise->set_value(signal); + exit_promise.set_value(signal); }, SIGQUIT ); #endif boost::signals2::scoped_connection closed_connection( con->closed.connect( [&exit_promise] { elog( "Server has disconnected us." ); - exit_promise->set_value(0); + exit_promise.set_value(0); })); ilog( "Entering Daemon Mode, ^C to exit" ); - exit_promise->wait(); + exit_promise.get_future().wait(); closed_connection.disconnect(); } diff --git a/programs/witness_node/main.cpp b/programs/witness_node/main.cpp index c3c76ae2d4..dfdafaf87c 100644 --- a/programs/witness_node/main.cpp +++ b/programs/witness_node/main.cpp @@ -36,14 +36,15 @@ #include #include -#include #include #include +#include #include #include #include #include +#include #include #include @@ -62,6 +63,7 @@ namespace bpo = boost::program_options; int main(int argc, char** argv) { fc::print_stacktrace_on_segfault(); + fc::initialize_fibers(); app::application* node = new app::application(); fc::oexception unhandled_exception; try { @@ -172,22 +174,22 @@ int main(int argc, char** argv) { node->startup(); node->startup_plugins(); - fc::promise::ptr exit_promise = fc::promise::create("UNIX Signal Handler"); + boost::fibers::promise exit_promise; fc::set_signal_handler([&exit_promise](int signal) { elog( "Caught SIGINT attempting to exit cleanly" ); - exit_promise->set_value(signal); + exit_promise.set_value(signal); }, SIGINT); fc::set_signal_handler([&exit_promise](int signal) { elog( "Caught SIGTERM attempting to exit cleanly" ); - exit_promise->set_value(signal); + exit_promise.set_value(signal); }, SIGTERM); ilog("Started BitShares node on a chain with ${h} blocks.", ("h", node->chain_database()->head_block_num())); ilog("Chain ID is ${id}", ("id", node->chain_database()->get_chain_id()) ); - int signal = exit_promise->wait(); + int signal = exit_promise.get_future().get(); ilog("Exiting from signal ${n}", ("n", signal)); node->shutdown_plugins(); node->shutdown(); From 0fbc1a3badc1136457898c98cb4dd5492af7b8aa Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 11 Dec 2019 18:10:03 +0100 Subject: [PATCH 08/14] Compiles all --- programs/delayed_node/main.cpp | 8 ++--- programs/network_mapper/network_mapper.cpp | 38 ++++++++++----------- tests/app/main.cpp | 10 +++--- tests/cli/main.cpp | 16 ++++----- tests/elasticsearch/main.cpp | 23 +++++++------ tests/tests/database_api_tests.cpp | 33 ++++++++++-------- tests/tests/grouped_orders_api_tests.cpp | 3 +- tests/tests/history_api_tests.cpp | 13 +++---- tests/tests/market_rounding_tests.cpp | 5 +-- tests/tests/network_broadcast_api_tests.cpp | 3 +- tests/tests/settle_tests.cpp | 9 ++--- 11 files changed, 85 insertions(+), 76 deletions(-) diff --git a/programs/delayed_node/main.cpp b/programs/delayed_node/main.cpp index 4af5207645..0e73ea1798 100644 --- a/programs/delayed_node/main.cpp +++ b/programs/delayed_node/main.cpp @@ -28,7 +28,6 @@ #include #include -#include #include #include #include @@ -43,6 +42,7 @@ #include #include #include +#include #include #include @@ -184,15 +184,15 @@ int main(int argc, char** argv) { node.startup_plugins(); - fc::promise::ptr exit_promise = fc::promise::create("UNIX Signal Handler"); + boost::fibers::promise exit_promise; fc::set_signal_handler([&exit_promise](int signal) { - exit_promise->set_value(signal); + exit_promise.set_value(signal); }, SIGINT); ilog("Started delayed node on a chain with ${h} blocks.", ("h", node.chain_database()->head_block_num())); ilog("Chain ID is ${id}", ("id", node.chain_database()->get_chain_id()) ); - int signal = exit_promise->wait(); + int signal = exit_promise.get_future().get(); ilog("Exiting from signal ${n}", ("n", signal)); node.shutdown_plugins(); node.shutdown(); diff --git a/programs/network_mapper/network_mapper.cpp b/programs/network_mapper/network_mapper.cpp index 68b5f526f2..38361e9653 100644 --- a/programs/network_mapper/network_mapper.cpp +++ b/programs/network_mapper/network_mapper.cpp @@ -4,7 +4,9 @@ #include #include #include -#include +#include + +#include #include #include #include @@ -13,6 +15,8 @@ #include #include +#include + class peer_probe : public graphene::net::peer_connection_delegate { public: @@ -24,7 +28,8 @@ class peer_probe : public graphene::net::peer_connection_delegate fc::ip::endpoint _remote; bool _connection_was_rejected; bool _done; - fc::promise::ptr _probe_complete_promise; + boost::fibers::promise _probe_complete_promise; + boost::fibers::future _probe_complete_future = _probe_complete_promise.get_future(); public: peer_probe() : @@ -32,24 +37,24 @@ class peer_probe : public graphene::net::peer_connection_delegate _we_closed_connection(false), _connection(graphene::net::peer_connection::make_shared(this)), _connection_was_rejected(false), - _done(false), - _probe_complete_promise(fc::promise::create("probe_complete")) + _done(false) {} void start(const fc::ip::endpoint& endpoint_to_probe, const fc::ecc::private_key& my_node_id, const graphene::chain::chain_id_type& chain_id) { - _remote = endpoint_to_probe; - fc::future connect_task = fc::async([this](){ _connection->connect_to(_remote); }, "connect_task"); + boost::fibers::future connect_task = fc::async( [_connection=_connection,endpoint_to_probe](){ + _connection->connect_to(endpoint_to_probe); + }, std::this_thread::get_id(), "connect_task"); try { - connect_task.wait(fc::seconds(10)); + connect_task.wait_for(std::chrono::seconds(10)); } catch (const fc::timeout_exception&) { ilog("timeout connecting to node ${endpoint}", ("endpoint", endpoint_to_probe)); - connect_task.cancel(__FUNCTION__); + _connection->close_connection(); // this should cancel the async connect_to operation throw; } @@ -151,18 +156,13 @@ class peer_probe : public graphene::net::peer_connection_delegate void on_connection_closed(graphene::net::peer_connection* originating_peer) override { _done = true; - _probe_complete_promise->set_value(); + _probe_complete_promise.set_value(); } graphene::net::message get_message_for_item(const graphene::net::item_id& item) override { return graphene::net::item_not_available_message(item); } - - void wait( const fc::microseconds& timeout_us ) - { - _probe_complete_promise->wait( timeout_us ); - } }; int main(int argc, char** argv) @@ -223,17 +223,17 @@ int main(int argc, char** argv) if (!probes.empty()) { - fc::yield(); + boost::this_fiber::yield(); std::vector> running; for ( auto& probe : probes ) { - if (probe->_probe_complete_promise->error()) + if ( probe->_probe_complete_future.wait_for(std::chrono::seconds(0)) != boost::fibers::future_status::ready ) { - std::cerr << fc::string(probe->_remote) << " ran into an error!\n"; + running.push_back( probe ); continue; } - if (!probe->_probe_complete_promise->ready()) + if ( probe->_probe_complete_future.get_exception_ptr() != std::exception_ptr() ) { - running.push_back( probe ); + std::cerr << fc::string(probe->_remote) << " ran into an error!\n"; continue; } diff --git a/tests/app/main.cpp b/tests/app/main.cpp index bd7905870d..d02a26c7c5 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -34,11 +34,11 @@ #include #include -#include #include #include #include +#include #include "../../libraries/app/application_impl.hxx" @@ -234,7 +234,7 @@ BOOST_AUTO_TEST_CASE( two_node_network ) app1.initialize(app_dir.path(), cfg); BOOST_TEST_MESSAGE( "Starting app1 and waiting 500 ms" ); app1.startup(); - fc::usleep(fc::milliseconds(500)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(500)); BOOST_TEST_MESSAGE( "Creating and initializing app2" ); @@ -255,7 +255,7 @@ BOOST_AUTO_TEST_CASE( two_node_network ) BOOST_TEST_MESSAGE( "Starting app2 and waiting 500 ms" ); app2.startup(); - fc::usleep(fc::milliseconds(500)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(500)); BOOST_REQUIRE_EQUAL(app1.p2p_node()->get_connection_count(), 1u); BOOST_CHECK_EQUAL(std::string(app1.p2p_node()->get_connected_peers().front().host.get_address()), "127.0.0.1"); @@ -303,7 +303,7 @@ BOOST_AUTO_TEST_CASE( two_node_network ) BOOST_TEST_MESSAGE( "Broadcasting tx" ); app1.p2p_node()->broadcast(graphene::net::trx_message(trx)); - fc::usleep(fc::milliseconds(500)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(500)); BOOST_CHECK_EQUAL( db1->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 1000000 ); BOOST_CHECK_EQUAL( db2->get_balance( GRAPHENE_NULL_ACCOUNT, asset_id_type() ).amount.value, 1000000 ); @@ -320,7 +320,7 @@ BOOST_AUTO_TEST_CASE( two_node_network ) BOOST_TEST_MESSAGE( "Broadcasting block" ); app2.p2p_node()->broadcast(graphene::net::block_message( block_1 )); - fc::usleep(fc::milliseconds(500)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(500)); BOOST_TEST_MESSAGE( "Verifying nodes are still connected" ); BOOST_CHECK_EQUAL(app1.p2p_node()->get_connection_count(), 1u); BOOST_CHECK_EQUAL(app1.chain_database()->head_block_num(), 1u); diff --git a/tests/cli/main.cpp b/tests/cli/main.cpp index baf3852ba7..ec54efebf6 100644 --- a/tests/cli/main.cpp +++ b/tests/cli/main.cpp @@ -34,12 +34,12 @@ #include #include -#include #include #include #include #include #include +#include #include @@ -148,7 +148,7 @@ std::shared_ptr start_application(fc::temp_directory app1->startup_plugins(); app1->startup(); - fc::usleep(fc::milliseconds(500)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(500)); return app1; } @@ -249,7 +249,7 @@ class client_connection ~client_connection() { // wait for everything to finish up - fc::usleep(fc::milliseconds(500)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(500)); } public: fc::http::websocket_client websocket_client; @@ -275,7 +275,7 @@ struct cli_fixture ~dummy() { // wait for everything to finish up - fc::usleep(fc::milliseconds(500)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(500)); } }; dummy dmy; @@ -318,7 +318,7 @@ struct cli_fixture BOOST_TEST_MESSAGE("Cleanup cli_wallet::boost_fixture_test_case"); // wait for everything to finish up - fc::usleep(fc::seconds(1)); + boost::this_fiber::sleep_for(std::chrono::seconds(1)); app1->shutdown(); #ifdef _WIN32 @@ -860,7 +860,7 @@ BOOST_AUTO_TEST_CASE( cli_multisig_transaction ) } // wait for everything to finish up - fc::usleep(fc::seconds(1)); + boost::this_fiber::sleep_for(std::chrono::seconds(1)); } catch( fc::exception& e ) { edump((e.to_detail_string())); throw; @@ -895,7 +895,7 @@ BOOST_AUTO_TEST_CASE( saving_keys_wallet_test ) { BOOST_CHECK( pk.keys.size() == 1 ); // nathan key BOOST_CHECK( generate_block( cli.app1 ) ); - fc::usleep( fc::seconds(1) ); + boost::this_fiber::sleep_for(std::chrono::seconds(1) ); wallet = fc::json::from_file( path ).as( 2 * GRAPHENE_MAX_NESTED_OBJECTS ); BOOST_CHECK( wallet.extra_keys.size() == 2 ); // nathan + account1 @@ -1084,7 +1084,7 @@ BOOST_AUTO_TEST_CASE( cli_create_htlc ) } // wait for everything to finish up - fc::usleep(fc::seconds(1)); + boost::this_fiber::sleep_for(std::chrono::seconds(1)); } catch( fc::exception& e ) { edump((e.to_detail_string())); throw; diff --git a/tests/elasticsearch/main.cpp b/tests/elasticsearch/main.cpp index 24eb9382e8..e0ed684896 100644 --- a/tests/elasticsearch/main.cpp +++ b/tests/elasticsearch/main.cpp @@ -33,6 +33,7 @@ #define BOOST_TEST_MODULE Elastic Search Database Tests #include +#include using namespace graphene::chain; using namespace graphene::chain::test; @@ -54,7 +55,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { // delete all first auto delete_account_history = graphene::utilities::deleteAll(es); - fc::usleep(fc::milliseconds(1000)); // this is because index.refresh_interval, nothing to worry + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); // this is because index.refresh_interval, nothing to worry if(delete_account_history) { // all records deleted @@ -64,7 +65,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { auto bob = create_account("bob"); generate_block(); - fc::usleep(fc::milliseconds(1000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); // for later use //int asset_create_op_id = operation::tag::value; @@ -89,7 +90,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { auto willie = create_account("willie"); generate_block(); - fc::usleep(fc::milliseconds(1000)); // index.refresh_interval + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); // index.refresh_interval es.endpoint = es.index_prefix + "*/data/_count"; res = graphene::utilities::simpleQuery(es); @@ -104,7 +105,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_account_history) { transfer(account_id_type()(db), bob, asset(300)); generate_block(); - fc::usleep(fc::milliseconds(1000)); // index.refresh_interval + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); // index.refresh_interval res = graphene::utilities::simpleQuery(es); j = fc::json::from_string(res); @@ -145,14 +146,14 @@ BOOST_AUTO_TEST_CASE(elasticsearch_objects) { auto delete_objects = graphene::utilities::deleteAll(es); generate_block(); - fc::usleep(fc::milliseconds(1000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); if(delete_objects) { // all records deleted // asset and bitasset create_bitasset("USD", account_id_type()); generate_block(); - fc::usleep(fc::milliseconds(1000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); string query = "{ \"query\" : { \"bool\" : { \"must\" : [{\"match_all\": {}}] } } }"; es.endpoint = es.index_prefix + "*/data/_count"; @@ -195,10 +196,10 @@ BOOST_AUTO_TEST_CASE(elasticsearch_suite) { es.elasticsearch_url = "http://localhost:9200/"; es.index_prefix = "bitshares-"; auto delete_account_history = graphene::utilities::deleteAll(es); - fc::usleep(fc::milliseconds(1000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); es.index_prefix = "objects-"; auto delete_objects = graphene::utilities::deleteAll(es); - fc::usleep(fc::milliseconds(1000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); if(delete_account_history && delete_objects) { // all records deleted @@ -224,7 +225,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_history_api) { auto delete_account_history = graphene::utilities::deleteAll(es); generate_block(); - fc::usleep(fc::milliseconds(1000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); if(delete_account_history) { @@ -237,7 +238,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_history_api) { create_bitasset("OIL", dan.id); // create op 6 generate_block(); - fc::usleep(fc::milliseconds(1000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); graphene::app::history_api hist_api(app); app.enable_plugin("elasticsearch"); @@ -506,7 +507,7 @@ BOOST_AUTO_TEST_CASE(elasticsearch_history_api) { create_account("alice"); generate_block(); - fc::usleep(fc::milliseconds(1000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(1000)); // f(C, 0, 4, 10) = { 7 } histories = hist_api.get_account_history("alice", operation_history_id_type(0), 4, operation_history_id_type(10)); diff --git a/tests/tests/database_api_tests.cpp b/tests/tests/database_api_tests.cpp index 31f03e7781..7266e080be 100644 --- a/tests/tests/database_api_tests.cpp +++ b/tests/tests/database_api_tests.cpp @@ -30,8 +30,11 @@ #include #include +#include + #include "../common/database_fixture.hpp" +#include #include using namespace graphene::chain; @@ -772,7 +775,7 @@ BOOST_AUTO_TEST_CASE( subscription_key_collision_test ) db_api.get_accounts( collision_ids ); generate_block(); - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread BOOST_CHECK_EQUAL( objects_changed, 0 ); // did not subscribe to UIATEST, so no notification @@ -781,7 +784,7 @@ BOOST_AUTO_TEST_CASE( subscription_key_collision_test ) db_api.get_assets( asset_names ); generate_block(); - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread BOOST_CHECK_EQUAL( objects_changed, 0 ); // UIATEST did not change in this block, so no notification } @@ -964,7 +967,7 @@ BOOST_AUTO_TEST_CASE( subscription_notification_test ) ++expected_objects_changed18; // db_api18 subscribed to HTLC object, notify object creation ++expected_objects_changed48; // db_api48 subscribed to HTLC object, notify object creation - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread check_results(); transfer( account_id_type(), alice_id, asset(1) ); @@ -978,7 +981,7 @@ BOOST_AUTO_TEST_CASE( subscription_notification_test ) // db_api6 didn't subscribe to anything, nothing would be notified // db_api7: no change on UIA, nothing would be notified - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread check_results(); vector obj_ids; @@ -1007,7 +1010,7 @@ BOOST_AUTO_TEST_CASE( subscription_notification_test ) // db_api6 didn't subscribe to anything, nothing would be notified // db_api7: no change on UIA, nothing would be notified - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread check_results(); db_api6.set_auto_subscription( false ); @@ -1022,7 +1025,7 @@ BOOST_AUTO_TEST_CASE( subscription_notification_test ) // db_api6 didn't subscribe to anything, nothing would be notified // db_api7: no change on UIA, nothing would be notified - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread check_results(); account_names.clear(); @@ -1043,7 +1046,7 @@ BOOST_AUTO_TEST_CASE( subscription_notification_test ) // db_api6 didn't subscribe to anything, nothing would be notified // db_api7: no change on UIA, nothing would be notified - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread check_results(); db_api6.set_auto_subscription( true ); @@ -1058,7 +1061,7 @@ BOOST_AUTO_TEST_CASE( subscription_notification_test ) ++expected_objects_changed6; // db_api6 subscribed to dynamic global properties, would be notified // db_api7: no change on UIA, nothing would be notified - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread check_results(); db_api5.set_subscribe_callback( callback5, false ); // reset subscription @@ -1079,7 +1082,7 @@ BOOST_AUTO_TEST_CASE( subscription_notification_test ) // db_api6 subscribed to anything, nothing notified // db_api7: no change on UIA, nothing would be notified - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread check_results(); } FC_LOG_AND_RETHROW() @@ -1565,7 +1568,7 @@ BOOST_AUTO_TEST_CASE( api_limit_get_limit_orders ){ create_account("bob"); asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); GRAPHENE_CHECK_THROW(db_api.get_limit_orders(std::string(static_cast(asset_id_type())), std::string(static_cast(bit_jmj_id)), 370), fc::exception); vector limit_orders =db_api.get_limit_orders(std::string( @@ -1588,7 +1591,7 @@ BOOST_AUTO_TEST_CASE( api_limit_get_call_orders ){ asset_id_type bitusd_id = create_bitasset( "USDBIT", nathan_id, 100, disable_force_settle).id; generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); BOOST_CHECK( bitusd_id(db).is_market_issued() ); GRAPHENE_CHECK_THROW(db_api.get_call_orders(std::string(static_cast(bitusd_id)), 370), fc::exception); @@ -1610,7 +1613,7 @@ BOOST_AUTO_TEST_CASE( api_limit_get_settle_orders ){ asset_id_type bitusd_id = create_bitasset( "USDBIT", nathan_id, 100, disable_force_settle).id; generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); GRAPHENE_CHECK_THROW(db_api.get_settle_orders( std::string(static_cast(bitusd_id)), 370), fc::exception); vector result =db_api.get_settle_orders( @@ -1635,7 +1638,7 @@ BOOST_AUTO_TEST_CASE( api_limit_get_order_book ){ asset_id_type bitdan_id = create_bitasset( "DANBIT", dan_id, 100, disable_force_settle).id; generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); GRAPHENE_CHECK_THROW(db_api.get_order_book(std::string(static_cast(bitusd_id)), std::string(static_cast(bitdan_id)),89), fc::exception); graphene::app::order_book result =db_api.get_order_book(std::string( @@ -1692,7 +1695,7 @@ BOOST_AUTO_TEST_CASE( asset_in_collateral ) BOOST_CHECK_EQUAL( 0, oassets[3]->total_backing_collateral->value ); generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); const auto& bitusd = bitusd_id( db ); const auto& bitdan = bitdan_id( db ); @@ -1763,7 +1766,7 @@ BOOST_AUTO_TEST_CASE( asset_in_collateral ) force_settle( dan_id(db), bitusd.amount(100) ); // settles against nathan, receives 500 CORE collateral generate_blocks( db.head_block_time() + fc::days(2) ); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); auto assets = db_api.list_assets( GRAPHENE_SYMBOL, 1 ); BOOST_REQUIRE( !assets.empty() ); diff --git a/tests/tests/grouped_orders_api_tests.cpp b/tests/tests/grouped_orders_api_tests.cpp index e08c7d5d0e..2b42af9789 100644 --- a/tests/tests/grouped_orders_api_tests.cpp +++ b/tests/tests/grouped_orders_api_tests.cpp @@ -23,6 +23,7 @@ */ #include +#include #include #include @@ -48,7 +49,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_grouped_limit_orders) { create_account("bob"); asset_id_type bit_jmj_id = create_bitasset("JMJBIT").id; generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); GRAPHENE_CHECK_THROW(orders_api.get_grouped_limit_orders(std::string( static_cast(asset_id_type())), std::string( static_cast(asset_id_type())),10, start,260), fc::exception); vector< limit_order_group > orders =orders_api.get_grouped_limit_orders(std::string( static_cast(asset_id_type())), std::string( static_cast(bit_jmj_id)), 10,start,240); BOOST_REQUIRE_EQUAL( orders.size(), 0u); diff --git a/tests/tests/history_api_tests.cpp b/tests/tests/history_api_tests.cpp index e8856fda28..064ee9c166 100644 --- a/tests/tests/history_api_tests.cpp +++ b/tests/tests/history_api_tests.cpp @@ -23,6 +23,7 @@ */ #include +#include #include @@ -47,7 +48,7 @@ BOOST_AUTO_TEST_CASE(get_account_history) { create_account("bob"); generate_block(); - fc::usleep(fc::milliseconds(2000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(2000)); int asset_create_op_id = operation::tag::value; int account_create_op_id = operation::tag::value; @@ -542,7 +543,7 @@ BOOST_AUTO_TEST_CASE(get_account_history_operations) { create_account("alice"); generate_block(); - fc::usleep(fc::milliseconds(2000)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(2000)); int asset_create_op_id = operation::tag::value; int account_create_op_id = operation::tag::value; @@ -610,7 +611,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history_operations) { create_account("alice"); generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); int asset_create_op_id = operation::tag::value; int account_create_op_id = operation::tag::value; @@ -676,7 +677,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history) { create_account("bob"); generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); int asset_create_op_id = operation::tag::value; int account_create_op_id = operation::tag::value; @@ -729,7 +730,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_relative_account_history) { create_account("bob"); generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); GRAPHENE_CHECK_THROW(hist_api.get_relative_account_history("1.2.0", 126, 260, 0), fc::exception); vector histories = hist_api.get_relative_account_history("1.2.0", 126, 210, 0); @@ -750,7 +751,7 @@ BOOST_AUTO_TEST_CASE(api_limit_get_account_history_by_operations) { create_account("dan"); create_account("bob"); generate_block(); - fc::usleep(fc::milliseconds(100)); + boost::this_fiber::sleep_for(std::chrono::milliseconds(100)); GRAPHENE_CHECK_THROW(hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 260), fc::exception); history_operation_detail histories = hist_api.get_account_history_by_operations("1.2.0", operation_types, 0, 210); BOOST_REQUIRE_EQUAL( histories.total_count, 3u ); diff --git a/tests/tests/market_rounding_tests.cpp b/tests/tests/market_rounding_tests.cpp index 66698bee45..62b5df48c3 100644 --- a/tests/tests/market_rounding_tests.cpp +++ b/tests/tests/market_rounding_tests.cpp @@ -23,6 +23,7 @@ */ #include +#include #include @@ -74,7 +75,7 @@ BOOST_AUTO_TEST_CASE( trade_amount_equals_zero ) BOOST_CHECK_EQUAL(get_balance(core_seller, test), 3); generate_block(); - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread auto result = get_market_order_history(core_id, test_id); BOOST_CHECK_EQUAL(result.size(), 4u); @@ -130,7 +131,7 @@ BOOST_AUTO_TEST_CASE( trade_amount_equals_zero_after_hf_184 ) BOOST_CHECK_EQUAL(get_balance(core_seller, test), 2); generate_block(); - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread auto result = get_market_order_history(core_id, test_id); BOOST_CHECK_EQUAL(result.size(), 2u); diff --git a/tests/tests/network_broadcast_api_tests.cpp b/tests/tests/network_broadcast_api_tests.cpp index a40c112662..9bfa1a2b7e 100644 --- a/tests/tests/network_broadcast_api_tests.cpp +++ b/tests/tests/network_broadcast_api_tests.cpp @@ -23,6 +23,7 @@ */ #include +#include #include #include @@ -65,7 +66,7 @@ BOOST_AUTO_TEST_CASE( broadcast_transaction_with_callback_test ) { generate_block(); - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread BOOST_CHECK_EQUAL( called, 1u ); diff --git a/tests/tests/settle_tests.cpp b/tests/tests/settle_tests.cpp index 6310493ae5..f9c7b8f0d2 100644 --- a/tests/tests/settle_tests.cpp +++ b/tests/tests/settle_tests.cpp @@ -23,6 +23,7 @@ */ #include +#include #include @@ -1534,7 +1535,7 @@ BOOST_AUTO_TEST_CASE( global_settle_ticker_test ) force_global_settle( pmark, pmark.amount(1) / core.amount(1) ); generate_block(); - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread { BOOST_CHECK_EQUAL( meta_idx.size(), 1 ); @@ -1553,7 +1554,7 @@ BOOST_AUTO_TEST_CASE( global_settle_ticker_test ) } generate_blocks( db.head_block_time() + 86000 ); // less than a day - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread // nothing changes { @@ -1573,7 +1574,7 @@ BOOST_AUTO_TEST_CASE( global_settle_ticker_test ) } generate_blocks( db.head_block_time() + 4000 ); // now more than 24 hours - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread // the history is rolled out, new 24h volume should be 0 { @@ -1593,7 +1594,7 @@ BOOST_AUTO_TEST_CASE( global_settle_ticker_test ) } generate_block(); - fc::usleep(fc::milliseconds(200)); // sleep a while to execute callback in another thread + boost::this_fiber::sleep_for(std::chrono::milliseconds(200)); // sleep a while to execute callback in another thread // nothing changes { From 1ff48dfea6667de541e1a6620ccb55ef44933953 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Thu, 12 Dec 2019 12:15:53 +0100 Subject: [PATCH 09/14] Fixup fiber name in recurring_task --- .../graphene/utilities/recurring_task.hpp | 1 + libraries/utilities/recurring_task.cpp | 20 +++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/libraries/utilities/include/graphene/utilities/recurring_task.hpp b/libraries/utilities/include/graphene/utilities/recurring_task.hpp index b516d88028..c67ae9ffa5 100644 --- a/libraries/utilities/include/graphene/utilities/recurring_task.hpp +++ b/libraries/utilities/include/graphene/utilities/recurring_task.hpp @@ -43,6 +43,7 @@ class recurring_task boost::fibers::mutex _mtx; boost::fibers::condition_variable _cv; boost::fibers::future _worker; + std::string _name; /** Waits for the given duration. Waiting can be interrupted by trigger() or cancel(). * Throws when cancelled. diff --git a/libraries/utilities/recurring_task.cpp b/libraries/utilities/recurring_task.cpp index 4c861b3f6f..3ba8dbb440 100644 --- a/libraries/utilities/recurring_task.cpp +++ b/libraries/utilities/recurring_task.cpp @@ -28,15 +28,10 @@ namespace graphene { namespace utilities { -recurring_task::recurring_task( const std::string& name ) -{ - fc::set_fiber_name( name ); -} +recurring_task::recurring_task( const std::string& name ) : _name(name) {} -recurring_task::recurring_task( std::thread::id runner, const std::string& name ) : _runner( runner ) -{ - fc::set_fiber_name( name ); -} +recurring_task::recurring_task( std::thread::id runner, const std::string& name ) + : _runner( runner ), _name(name) {} recurring_task::~recurring_task() { @@ -90,8 +85,13 @@ void recurring_task::trigger() check_cancelled(); if( !_worker.valid() || _worker.wait_for( std::chrono::seconds(0) ) == boost::fibers::future_status::ready ) { - _worker = _runner != std::thread::id() ? fc::async( std::bind( &recurring_task::run, this ), _runner ) - : fc::async( std::bind( &recurring_task::run, this ) ); + std::function lambda; + if( _name.empty() ) + lambda = std::bind( &recurring_task::run, this ); + else + lambda = [this] () { fc::set_fiber_name(_name); run(); }; + _worker = _runner != std::thread::id() ? fc::async( std::move(lambda), _runner ) + : fc::async( std::move(lambda) ); } else { From 2e6e1e26c6cef187f3468c663f35108a8c9d9736 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 18 Dec 2019 12:08:36 +0100 Subject: [PATCH 10/14] Fixup node --- libraries/net/node.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index 73738c6d1e..e16c87e845 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -3769,7 +3769,7 @@ namespace graphene { namespace net { namespace detail { _node->_handshaking_connections.insert( new_peer ); _node->_rate_limiter.add_tcp_socket( &new_peer->get_socket() ); std::weak_ptr new_weak_peer(new_peer); - new_peer->accept_or_connect_task_done = fc::async( [this, new_weak_peer]() { + fc::async( [this, new_weak_peer]() { peer_connection_ptr new_peer(new_weak_peer.lock()); assert(new_peer); if (!new_peer) @@ -4157,7 +4157,7 @@ namespace graphene { namespace net { namespace detail { return; std::weak_ptr new_weak_peer(new_peer); - new_peer->accept_or_connect_task_done = fc::async([this, new_weak_peer](){ + fc::async([this, new_weak_peer](){ peer_connection_ptr new_peer(new_weak_peer.lock()); assert(new_peer); if (!new_peer) From 8e6f5a5c9743b4ebf2f36ca8a02f2a6b40c77549 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 18 Dec 2019 14:25:17 +0100 Subject: [PATCH 11/14] Dont trigger p2p connect loop as a side effect of load_config --- libraries/net/node.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index e16c87e845..b29c658356 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -4007,8 +4007,6 @@ namespace graphene { namespace net { namespace detail { fc::time_point::now() - fc::seconds(_peer_connection_retry_timeout)); _potential_peer_db.update_entry(updated_peer_record); } - - trigger_p2p_network_connect_loop(); } catch (fc::exception& except) { From cd9907e6aff39c5537e088517370f5a0c5976b88 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 18 Dec 2019 14:25:33 +0100 Subject: [PATCH 12/14] Fixup accept_loop init --- libraries/net/node.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/libraries/net/node.cpp b/libraries/net/node.cpp index b29c658356..fe64442800 100644 --- a/libraries/net/node.cpp +++ b/libraries/net/node.cpp @@ -293,6 +293,7 @@ namespace graphene { namespace net { namespace detail { _maximum_number_of_connections(GRAPHENE_NET_DEFAULT_MAX_CONNECTIONS), _peer_connection_retry_timeout(GRAPHENE_NET_DEFAULT_PEER_CONNECTION_RETRY_TIME), _peer_inactivity_timeout(GRAPHENE_NET_PEER_HANDSHAKE_INACTIVITY_TIMEOUT), + _accept_loop(*this), _most_recent_blocks_accepted(_maximum_number_of_connections), _total_number_of_unfetched_items(0), _rate_limiter(0, 0), From f469d4c348b02ceba09c04d4561c6a43d350eb42 Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 18 Dec 2019 14:25:43 +0100 Subject: [PATCH 13/14] Bump fc --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index dc672fa449..93bab405d8 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit dc672fa449830f6719f5ad32d7b3c90a4b1249bf +Subproject commit 93bab405d8aeda79e44f13f37a4a88c7913d825f From e437cb76f4bf2366472d380a5347474a41a41bcb Mon Sep 17 00:00:00 2001 From: Peter Conrad Date: Wed, 18 Dec 2019 14:26:03 +0100 Subject: [PATCH 14/14] Initialize fibers properly --- tests/app/main.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/app/main.cpp b/tests/app/main.cpp index d02a26c7c5..5a92ad11e9 100644 --- a/tests/app/main.cpp +++ b/tests/app/main.cpp @@ -36,6 +36,7 @@ #include #include +#include #include #include @@ -219,6 +220,8 @@ BOOST_AUTO_TEST_CASE( two_node_network ) try { BOOST_TEST_MESSAGE( "Creating and initializing app1" ); + fc::initialize_fibers(); + fc::temp_directory app_dir( graphene::utilities::temp_directory_path() ); graphene::app::application app1;